summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Lunn <andrew@lunn.ch>2015-01-19 16:00:15 -0600
committerAndrew Lunn <andrew@lunn.ch>2015-01-19 16:00:15 -0600
commitfe6e91e3387e4f90ed4605a1a538248a31686c16 (patch)
treed4e10d0d214ca73500fcbb3dbbd25027320fddda
parentc6574542149dede8093e326d2c358ba447e30f33 (diff)
parent38bdf45f4aa5cb6186d50a29e6cbbd9d486a1519 (diff)
Merge branch 'mvebu/fixes-3' into mvebu/soc
-rw-r--r--.mailmap1
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.txt10
-rw-r--r--Documentation/devicetree/bindings/input/stmpe-keypad.txt2
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile3
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts24
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/perf_regs.c8
-rw-r--r--arch/arm/kernel/setup.c9
-rw-r--r--arch/arm/kernel/smp.c12
-rw-r--r--arch/arm/mach-mvebu/coherency.c7
-rw-r--r--arch/arm/mm/dump.c9
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm64/configs/defconfig9
-rw-r--r--arch/arm64/include/asm/arch_timer.h1
-rw-r--r--arch/arm64/include/asm/cpu.h5
-rw-r--r--arch/arm64/include/asm/dma-mapping.h11
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/processor.h4
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/kernel/cpuinfo.c10
-rw-r--r--arch/arm64/kernel/efi.c2
-rw-r--r--arch/arm64/kernel/module.c1
-rw-r--r--arch/arm64/kernel/perf_regs.c8
-rw-r--r--arch/arm64/kernel/setup.c1
-rw-r--r--arch/arm64/kernel/smp_spin_table.c1
-rw-r--r--arch/arm64/kernel/suspend.c14
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c1
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/kernel/acpi.c9
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/nios2/kernel/cpuinfo.c1
-rw-r--r--arch/nios2/kernel/entry.S20
-rw-r--r--arch/parisc/include/asm/ldcw.h13
-rw-r--r--arch/powerpc/include/asm/kexec.h10
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c2
-rw-r--r--arch/powerpc/kernel/smp.c9
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c8
-rw-r--r--arch/um/Kconfig.common1
-rw-r--r--arch/x86/boot/Makefile1
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S46
-rw-r--r--arch/x86/include/asm/vgtod.h6
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c17
-rw-r--r--arch/x86/kernel/perf_regs.c90
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/vmx.c88
-rw-r--r--arch/x86/lib/insn.c2
-rw-r--r--arch/x86/mm/init.c37
-rw-r--r--arch/x86/um/sys_call_table_32.c2
-rw-r--r--arch/x86/um/sys_call_table_64.c2
-rw-r--r--arch/x86/vdso/vma.c45
-rw-r--r--crypto/af_alg.c3
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/acpi_processor.c25
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/processor_core.c56
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/scan.c13
-rw-r--r--drivers/acpi/video.c27
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/opp.c39
-rw-r--r--drivers/bus/mvebu-mbus.c13
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/via-agp.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c46
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c11
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpuidle/governors/ladder.c7
-rw-r--r--drivers/cpuidle/governors/menu.c25
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c320
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h17
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c60
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c24
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c28
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c69
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/notify.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c48
-rw-r--r--drivers/gpu/drm/tegra/drm.c16
-rw-r--r--drivers/gpu/drm/tegra/gem.c52
-rw-r--r--drivers/hid/Kconfig3
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-kye.c4
-rw-r--r--drivers/hid/hid-logitech-dj.c16
-rw-r--r--drivers/hid/hid-logitech-hidpp.c41
-rw-r--r--drivers/hid/hid-roccat-pyra.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/input/evdev.c60
-rw-r--r--drivers/input/input.c22
-rw-r--r--drivers/input/keyboard/Kconfig1
-rw-r--r--drivers/input/keyboard/gpio_keys.c114
-rw-r--r--drivers/input/keyboard/hil_kbd.c6
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c141
-rw-r--r--drivers/input/mouse/alps.c84
-rw-r--r--drivers/input/mouse/trackpoint.c4
-rw-r--r--drivers/input/mouse/trackpoint.h5
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c99
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c4
-rw-r--r--drivers/iommu/intel-iommu.c12
-rw-r--r--drivers/iommu/ipmmu-vmsa.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c1
-rw-r--r--drivers/md/dm-thin.c29
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/mfd/stmpe.c4
-rw-r--r--drivers/mfd/stmpe.h3
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c6
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c34
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c142
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c54
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c9
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/s6gmac.c1058
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c19
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c1
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c15
-rw-r--r--drivers/net/phy/micrel.c18
-rw-r--r--drivers/net/usb/qmi_wwan.c10
-rw-r--r--drivers/net/usb/r8152.c17
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c15
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c17
-rw-r--r--drivers/net/xen-netback/xenbus.c1
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c57
-rw-r--r--drivers/pinctrl/pinctrl-st.c5
-rw-r--r--drivers/powercap/intel_rapl.c1
-rw-r--r--drivers/regulator/s2mps11.c19
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/spi/spi-img-spfi.c8
-rw-r--r--drivers/spi/spi-sh-msiof.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c2
-rw-r--r--drivers/thermal/cpu_cooling.c360
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c20
-rw-r--r--drivers/thermal/imx_thermal.c15
-rw-r--r--drivers/thermal/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c8
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/int340x_thermal/int3402_thermal.c1
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c4
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c309
-rw-r--r--drivers/thermal/intel_powerclamp.c1
-rw-r--r--drivers/thermal/rockchip_thermal.c1
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c12
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_core.c6
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c17
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/video/fbdev/core/fb_defio.c5
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_pll.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/pll.c3
-rw-r--r--drivers/video/fbdev/omap2/dss/sdi.c2
-rw-r--r--drivers/video/logo/logo.c17
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/virtio/virtio_pci_common.h1
-rw-r--r--drivers/virtio/virtio_pci_legacy.c12
-rw-r--r--fs/btrfs/backref.c13
-rw-r--r--fs/btrfs/delayed-inode.c8
-rw-r--r--fs/btrfs/extent-tree.c12
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/cifs/cifsglob.h6
-rw-r--r--fs/cifs/netmisc.c12
-rw-r--r--fs/cifs/readdir.c10
-rw-r--r--fs/cifs/smb2misc.c12
-rw-r--r--fs/cifs/smb2ops.c3
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/smb2transport.c2
-rw-r--r--fs/ext4/extents.c4
-rw-r--r--fs/ext4/file.c220
-rw-r--r--fs/ext4/resize.c24
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/fcntl.c5
-rw-r--r--fs/isofs/rock.c3
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/notify/fanotify/fanotify_user.c10
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c5
-rw-r--r--fs/ocfs2/namei.c43
-rw-r--r--fs/udf/dir.c31
-rw-r--r--fs/udf/inode.c14
-rw-r--r--fs/udf/namei.c17
-rw-r--r--fs/udf/symlink.c57
-rw-r--r--fs/udf/udfdecl.h3
-rw-r--r--fs/udf/unicode.c28
-rw-r--r--include/acpi/processor.h8
-rw-r--r--include/drm/drmP.h4
-rw-r--r--include/drm/drm_gem.h7
-rw-r--r--include/dt-bindings/thermal/thermal.h2
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/ceph/osd_client.h4
-rw-r--r--include/linux/cpu_cooling.h6
-rw-r--r--include/linux/cpuidle.h3
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/kdb.h62
-rw-r--r--include/linux/mfd/stmpe.h22
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/netdevice.h20
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/pagemap.h13
-rw-r--r--include/linux/perf_event.h12
-rw-r--r--include/linux/perf_regs.h16
-rw-r--r--include/linux/pm_domain.h8
-rw-r--r--include/linux/rmap.h10
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/net/genetlink.h9
-rw-r--r--include/net/mac80211.h7
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/vxlan.h28
-rw-r--r--include/sound/pcm.h10
-rw-r--r--include/uapi/asm-generic/fcntl.h2
-rw-r--r--include/uapi/linux/in6.h3
-rw-r--r--include/uapi/linux/kfd_ioctl.h37
-rw-r--r--include/uapi/linux/libc-compat.h3
-rw-r--r--include/uapi/linux/virtio_ring.h7
-rw-r--r--kernel/audit.c10
-rw-r--r--kernel/auditfilter.c23
-rw-r--r--kernel/auditsc.c49
-rw-r--r--kernel/debug/debug_core.c52
-rw-r--r--kernel/debug/kdb/kdb_bp.c37
-rw-r--r--kernel/debug/kdb/kdb_debugger.c4
-rw-r--r--kernel/debug/kdb/kdb_main.c267
-rw-r--r--kernel/debug/kdb/kdb_private.h3
-rw-r--r--kernel/events/core.c19
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/locking/mutex-debug.c2
-rw-r--r--kernel/sched/core.c13
-rw-r--r--kernel/sched/deadline.c25
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/trace/trace_kdb.c4
-rw-r--r--lib/Kconfig.kgdb25
-rw-r--r--lib/assoc_array.c1
-rw-r--r--mm/Kconfig.debug9
-rw-r--r--mm/filemap.c29
-rw-r--r--mm/memcontrol.c17
-rw-r--r--mm/memory.c35
-rw-r--r--mm/mmap.c13
-rw-r--r--mm/page-writeback.c43
-rw-r--r--mm/rmap.c42
-rw-r--r--mm/vmscan.c24
-rw-r--r--net/batman-adv/fragmentation.c4
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/multicast.c11
-rw-r--r--net/batman-adv/network-coding.c2
-rw-r--r--net/batman-adv/originator.c7
-rw-r--r--net/batman-adv/routing.c6
-rw-r--r--net/bluetooth/6lowpan.c1
-rw-r--r--net/bluetooth/bnep/core.c3
-rw-r--r--net/bluetooth/cmtp/core.c3
-rw-r--r--net/bluetooth/hci_event.c16
-rw-r--r--net/bluetooth/hidp/core.c3
-rw-r--r--net/ceph/auth_x.c2
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/core/dev.c175
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/ipv4/geneve.c6
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/tcp_ipv6.c45
-rw-r--r--net/mac80211/key.c12
-rw-r--r--net/mpls/mpls_gso.c5
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netlink/af_netlink.c38
-rw-r--r--net/netlink/af_netlink.h8
-rw-r--r--net/netlink/genetlink.c56
-rw-r--r--net/openvswitch/actions.c3
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow.c5
-rw-r--r--net/openvswitch/flow_netlink.c13
-rw-r--r--net/openvswitch/vport-geneve.c3
-rw-r--r--net/openvswitch/vport-gre.c18
-rw-r--r--net/openvswitch/vport-vxlan.c2
-rw-r--r--net/openvswitch/vport.c7
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/sunrpc/xdr.c6
-rw-r--r--net/wireless/Kconfig2
-rw-r--r--scripts/Makefile.clean16
-rw-r--r--security/keys/gc.c4
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c2
-rw-r--r--sound/pci/hda/hda_controller.c24
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--sound/pci/hda/hda_priv.h1
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/soc/codecs/rt5677.c9
-rw-r--r--sound/soc/dwc/designware_i2s.c49
-rw-r--r--sound/soc/intel/Kconfig4
-rw-r--r--sound/soc/intel/bytcr_dpcm_rt5640.c2
-rw-r--r--sound/soc/intel/sst-firmware.c6
-rw-r--r--sound/soc/intel/sst/sst_acpi.c2
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c4
-rw-r--r--sound/soc/rockchip/rockchip_i2s.h2
-rw-r--r--sound/soc/soc-core.c14
-rw-r--r--sound/usb/caiaq/audio.c2
-rw-r--r--tools/lib/lockdep/preload.c4
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-diff.c46
-rw-r--r--tools/perf/builtin-list.c13
-rw-r--r--tools/perf/builtin-report.c24
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/tests/hists_cumulate.c66
-rw-r--r--tools/perf/tests/hists_filter.c2
-rw-r--r--tools/perf/tests/hists_output.c10
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/ui/hist.c3
-rw-r--r--tools/perf/ui/tui/setup.c26
-rw-r--r--tools/perf/util/callchain.c30
-rw-r--r--tools/perf/util/callchain.h2
-rw-r--r--tools/perf/util/hist.c18
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/probe-event.c6
-rw-r--r--tools/perf/util/probe-finder.c18
-rw-r--r--tools/power/cpupower/utils/cpupower.c2
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.c2
-rw-r--r--tools/testing/selftests/exec/execveat.c4
-rw-r--r--virt/kvm/kvm_main.c26
427 files changed, 4517 insertions, 3562 deletions
diff --git a/.mailmap b/.mailmap
index ada8ad696b2e..d357e1bd2a43 100644
--- a/.mailmap
+++ b/.mailmap
@@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
Henk Vergonet <Henk.Vergonet@gmail.com>
Henrik Kretzschmar <henne@nachtwindheim.de>
+Henrik Rydberg <rydberg@bitmath.org>
Herbert Xu <herbert@gondor.apana.org.au>
Jacob Shin <Jacob.Shin@amd.com>
James Bottomley <jejb@mulgrave.(none)>
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index a4a38fcf2ed6..44b705767aca 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -10,12 +10,13 @@ Optional properties:
Each button (key) is represented as a sub-node of "gpio-keys":
Subnode properties:
+ - gpios: OF device-tree gpio specification.
+ - interrupts: the interrupt line for that input.
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
-Required mutual exclusive subnode-properties:
- - gpios: OF device-tree gpio specification.
- - interrupts: the interrupt line for that input
+Note that either "interrupts" or "gpios" properties can be omitted, but not
+both at the same time. Specifying both properties is allowed.
Optional subnode-properties:
- linux,input-type: Specify event type this button/key generates.
@@ -23,6 +24,9 @@ Optional subnode-properties:
- debounce-interval: Debouncing interval time in milliseconds.
If not specified defaults to 5.
- gpio-key,wakeup: Boolean, button can wake-up the system.
+ - linux,can-disable: Boolean, indicates that button is connected
+ to dedicated (not shared) interrupt which can be disabled to
+ suppress events from the button.
Example nodes:
diff --git a/Documentation/devicetree/bindings/input/stmpe-keypad.txt b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
index 1b97222e8a0b..12bb771d66d4 100644
--- a/Documentation/devicetree/bindings/input/stmpe-keypad.txt
+++ b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
@@ -8,6 +8,8 @@ Optional properties:
- debounce-interval : Debouncing interval time in milliseconds
- st,scan-count : Scanning cycles elapsed before key data is updated
- st,no-autorepeat : If specified device will not autorepeat
+ - keypad,num-rows : See ./matrix-keymap.txt
+ - keypad,num-columns : See ./matrix-keymap.txt
Example:
diff --git a/MAINTAINERS b/MAINTAINERS
index ddb9ac8d32b3..3589d67437f8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -724,15 +724,15 @@ F: include/uapi/linux/apm_bios.h
F: drivers/char/apm-emulation.c
APPLE BCM5974 MULTITOUCH DRIVER
-M: Henrik Rydberg <rydberg@euromail.se>
+M: Henrik Rydberg <rydberg@bitmath.org>
L: linux-input@vger.kernel.org
-S: Maintained
+S: Odd fixes
F: drivers/input/mouse/bcm5974.c
APPLE SMC DRIVER
-M: Henrik Rydberg <rydberg@euromail.se>
+M: Henrik Rydberg <rydberg@bitmath.org>
L: lm-sensors@lm-sensors.org
-S: Maintained
+S: Odd fixes
F: drivers/hwmon/applesmc.c
APPLETALK NETWORK LAYER
@@ -2259,6 +2259,7 @@ F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
M: Chris Mason <clm@fb.com>
M: Josef Bacik <jbacik@fb.com>
+M: David Sterba <dsterba@suse.cz>
L: linux-btrfs@vger.kernel.org
W: http://btrfs.wiki.kernel.org/
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -4940,10 +4941,10 @@ F: include/uapi/linux/input.h
F: include/linux/input/
INPUT MULTITOUCH (MT) PROTOCOL
-M: Henrik Rydberg <rydberg@euromail.se>
+M: Henrik Rydberg <rydberg@bitmath.org>
L: linux-input@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
-S: Maintained
+S: Odd fixes
F: Documentation/input/multi-touch-protocol.txt
F: drivers/input/input-mt.c
K: \b(ABS|SYN)_MT_
diff --git a/Makefile b/Makefile
index b1c3254441f3..e41a3356abee 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc4
NAME = Diseased Newt
# *DOCUMENTATION*
@@ -391,6 +391,7 @@ USERINCLUDE := \
# Needed to be compatible with the O= option
LINUXINCLUDE := \
-I$(srctree)/arch/$(hdr-arch)/include \
+ -Iarch/$(hdr-arch)/include/generated/uapi \
-Iarch/$(hdr-arch)/include/generated \
$(if $(KBUILD_SRC), -I$(srctree)/include) \
-Iinclude \
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 1466580be295..70b1943a86b1 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -203,27 +203,3 @@
compatible = "linux,spdif-dir";
};
};
-
-&pinctrl {
- /*
- * These pins might be muxed as I2S by
- * the bootloader, but it conflicts
- * with the real I2S pins that are
- * muxed using i2s_pins. We must mux
- * those pins to a function other than
- * I2S.
- */
- pinctrl-0 = <&hog_pins1 &hog_pins2>;
- pinctrl-names = "default";
-
- hog_pins1: hog-pins1 {
- marvell,pins = "mpp6", "mpp8", "mpp10",
- "mpp12", "mpp13";
- marvell,function = "gpio";
- };
-
- hog_pins2: hog-pins2 {
- marvell,pins = "mpp5", "mpp7", "mpp9";
- marvell,function = "gpo";
- };
-};
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 2328fe752e9c..bc393b7e5ece 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -338,6 +338,7 @@ CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_MVEBU=y
CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_HCD_STI=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 705bb7620673..0c3f5a0dafd3 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -413,6 +413,7 @@
#define __NR_getrandom (__NR_SYSCALL_BASE+384)
#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
#define __NR_bpf (__NR_SYSCALL_BASE+386)
+#define __NR_execveat (__NR_SYSCALL_BASE+387)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index e51833f8cc38..05745eb838c5 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -396,6 +396,7 @@
CALL(sys_getrandom)
/* 385 */ CALL(sys_memfd_create)
CALL(sys_bpf)
+ CALL(sys_execveat)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
index 6e4379c67cbc..592dda3f21ff 100644
--- a/arch/arm/kernel/perf_regs.c
+++ b/arch/arm/kernel/perf_regs.c
@@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index f9c863911038..715ae19bc7c8 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1046,6 +1046,15 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "model name\t: %s rev %d (%s)\n",
cpu_name, cpuid & 15, elf_platform);
+#if defined(CONFIG_SMP)
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
+ (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
+#else
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000/HZ),
+ (loops_per_jiffy / (5000/HZ)) % 100);
+#endif
/* dump out the processor features */
seq_puts(m, "Features\t: ");
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5e6052e18850..86ef244c5a24 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
+ int cpu;
+ unsigned long bogosum = 0;
+
+ for_each_online_cpu(cpu)
+ bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+
+ printk(KERN_INFO "SMP: Total of %d processors activated "
+ "(%lu.%02lu BogoMIPS).\n",
+ num_online_cpus(),
+ bogosum / (500000/HZ),
+ (bogosum / (5000/HZ)) % 100);
+
hyp_mode_check();
}
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 3585cb394e9b..caa21e9b8cd9 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -246,9 +246,14 @@ static int coherency_type(void)
return type;
}
+/*
+ * As a precaution, we currently completely disable hardware I/O
+ * coherency, until enough testing is done with automatic I/O
+ * synchronization barriers to validate that it is a proper solution.
+ */
int coherency_available(void)
{
- return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
+ return false;
}
int __init coherency_init(void)
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 59424937e52b..9fe8e241335c 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask;
- if (addr < USER_PGTABLES_CEILING)
- return;
-
if (!st->level) {
st->level = level;
st->current_prot = prot;
@@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
pgd_t *pgd = swapper_pg_dir;
struct pg_state st;
unsigned long addr;
- unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
+ unsigned i;
memset(&st, 0, sizeof(st));
st.seq = m;
st.marker = address_markers;
- pgd += pgdoff;
-
- for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
+ for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = i * PGDIR_SIZE;
if (!pgd_none(*pgd)) {
walk_pud(&st, pgd, addr);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 98ad9c79ea0e..2495c8cb47ba 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = {
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
- .mask = ~PMD_SECT_RDONLY,
- .prot = PMD_SECT_RDONLY,
+ .mask = ~L_PMD_SECT_RDONLY,
+ .prot = L_PMD_SECT_RDONLY,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cda7c40999b6..4e6ef896c619 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
struct memblock_region *reg;
- unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
- unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+ phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index dd301be89ecc..5376d908eabe 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,6 +1,7 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
@@ -13,14 +14,12 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RESOURCE_COUNTERS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_MEMCG_KMEM=y
CONFIG_CGROUP_HUGETLB=y
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
-# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
@@ -92,7 +91,6 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-# CONFIG_HMC_DRV is not set
CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_PL061=y
@@ -133,6 +131,8 @@ CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_VFAT_FS=y
@@ -152,14 +152,15 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
+CONFIG_KEYS=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_GHASH_ARM64_CE=y
-CONFIG_CRYPTO_AES_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index b1fa4e614718..fbe0ca31a99c 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -21,6 +21,7 @@
#include <asm/barrier.h>
+#include <linux/bug.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index ace70682499b..8e797b2fcc01 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
+ u32 reg_id_dfr0;
u32 reg_id_isar0;
u32 reg_id_isar1;
u32 reg_id_isar2;
@@ -51,6 +52,10 @@ struct cpuinfo_arm64 {
u32 reg_id_mmfr3;
u32 reg_id_pfr0;
u32 reg_id_pfr1;
+
+ u32 reg_mvfr0;
+ u32 reg_mvfr1;
+ u32 reg_mvfr2;
};
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index d34189bceff7..9ce3e680ae1c 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -52,13 +52,14 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
dev->archdata.dma_ops = ops;
}
-static inline int set_arch_dma_coherent_ops(struct device *dev)
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent)
{
- dev->archdata.dma_coherent = true;
- set_dma_ops(dev, &coherent_swiotlb_dma_ops);
- return 0;
+ dev->archdata.dma_coherent = coherent;
+ if (coherent)
+ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
}
-#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
+#define arch_setup_dma_ops arch_setup_dma_ops
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index df22314f57cf..210d632aa5ad 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -298,7 +298,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
-#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
#define pud_write(pud) pte_write(pud_pte(pud))
#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
@@ -401,7 +400,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
}
-#define pud_page(pud) pmd_page(pud_pmd(pud))
+#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
@@ -437,6 +436,8 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
}
+#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
+
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 286b1bec547c..f9be30ea1cbd 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -31,6 +31,7 @@
#include <asm/fpsimd.h>
#include <asm/hw_breakpoint.h>
+#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@@ -123,9 +124,6 @@ struct task_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk) do { } while (0)
-
unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 49c9aefd24a5..b780c6c76eec 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 386
+#define __NR_compat_syscalls 387
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 57b641747534..07d435cf2eea 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
* If we have AArch32, we care about 32-bit features for compat. These
* registers should be RES0 otherwise.
*/
+ diff |= CHECK(id_dfr0, boot, cur, cpu);
diff |= CHECK(id_isar0, boot, cur, cpu);
diff |= CHECK(id_isar1, boot, cur, cpu);
diff |= CHECK(id_isar2, boot, cur, cpu);
@@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
diff |= CHECK(id_pfr0, boot, cur, cpu);
diff |= CHECK(id_pfr1, boot, cur, cpu);
+ diff |= CHECK(mvfr0, boot, cur, cpu);
+ diff |= CHECK(mvfr1, boot, cur, cpu);
+ diff |= CHECK(mvfr2, boot, cur, cpu);
+
/*
* Mismatched CPU features are a recipe for disaster. Don't even
* pretend to support them.
@@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+ info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
@@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+ info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+
cpuinfo_detect_icache_policy(info);
check_local_cpu_errata();
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 6fac253bc783..2bb4347d0edf 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -326,6 +326,7 @@ void __init efi_idmap_init(void)
/* boot time idmap_pg_dir is incomplete, so fill in missing parts */
efi_setup_idmap();
+ early_memunmap(memmap.map, memmap.map_end - memmap.map);
}
static int __init remap_region(efi_memory_desc_t *md, void **new)
@@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void)
}
mapsize = memmap.map_end - memmap.map;
- early_memunmap(memmap.map, mapsize);
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index fd027b101de5..9b6f71db2709 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -25,6 +25,7 @@
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
+#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/sections.h>
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 6762ad705587..3f62b35fb6f1 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task)
else
return PERF_SAMPLE_REGS_ABI_64;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index b80991166754..20fe2932ad0c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p)
request_standard_resources();
efi_idmap_init();
+ early_ioremap_reset();
unflatten_device_tree();
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 4f93c67e63de..14944e5b28da 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/cputype.h>
+#include <asm/io.h>
#include <asm/smp_plat.h>
extern void secondary_holding_pen(void);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 3771b72b6569..2d6b6065fe7f 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -5,6 +5,7 @@
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
+#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
@@ -98,7 +99,18 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/
ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
- cpu_switch_mm(mm->pgd, mm);
+ /*
+ * We are resuming from reset with TTBR0_EL1 set to the
+ * idmap to enable the MMU; restore the active_mm mappings in
+ * TTBR0_EL1 unless the active_mm == &init_mm, in which case
+ * the thread entered __cpu_suspend with TTBR0_EL1 set to
+ * reserved TTBR0 page tables and should be restored as such.
+ */
+ if (mm == &init_mm)
+ cpu_set_reserved_ttbr0();
+ else
+ cpu_switch_mm(mm->pgd, mm);
+
flush_tlb_all();
/*
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 6f4bac969bf7..23eada79439c 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -7,6 +7,7 @@
*/
#include <linux/device.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index f3b51b57740a..95c39b95e97e 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
-#define NR_syscalls 318 /* length of syscall table */
+#define NR_syscalls 319 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 4c2240c1b0cb..461079560c78 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -331,5 +331,6 @@
#define __NR_getrandom 1339
#define __NR_memfd_create 1340
#define __NR_bpf 1341
+#define __NR_execveat 1342
#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 615ef81def49..e795cb848154 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
{
ia64_cpu_to_sapicid[cpu] = -1;
set_cpu_present(cpu, false);
@@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu)
return (0);
}
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index f5e96dffc63c..fcf8b8cbca0b 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1779,6 +1779,7 @@ sys_call_table:
data8 sys_getrandom
data8 sys_memfd_create // 1340
data8 sys_bpf
+ data8 sys_execveat
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index 51d5bb90d3e5..a223691dff4f 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -72,6 +72,7 @@ void __init setup_cpuinfo(void)
cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
+ cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu");
if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
err_cpu("DIV");
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
index 83bca17d1008..0bdfd13ff98b 100644
--- a/arch/nios2/kernel/entry.S
+++ b/arch/nios2/kernel/entry.S
@@ -365,30 +365,14 @@ ENTRY(ret_from_interrupt)
GET_THREAD_INFO r1
ldw r4, TI_PREEMPT_COUNT(r1)
bne r4, r0, restore_all
-
-need_resched:
ldw r4, TI_FLAGS(r1) /* ? Need resched set */
BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
andi r10, r4, ESTATUS_EPIE
beq r10, r0, restore_all
- movia r4, PREEMPT_ACTIVE
- stw r4, TI_PREEMPT_COUNT(r1)
- rdctl r10, status /* enable intrs again */
- ori r10, r10 ,STATUS_PIE
- wrctl status, r10
- PUSH r1
- call schedule
- POP r1
- mov r4, r0
- stw r4, TI_PREEMPT_COUNT(r1)
- rdctl r10, status /* disable intrs */
- andi r10, r10, %lo(~STATUS_PIE)
- wrctl status, r10
- br need_resched
-#else
- br restore_all
+ call preempt_schedule_irq
#endif
+ br restore_all
/***********************************************************************
* A few syscall wrappers
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index d2d11b7055ba..8121aa6db2ff 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -33,11 +33,18 @@
#endif /*!CONFIG_PA20*/
-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
+ We don't explicitly expose that "*a" may be written as reload
+ fails to find a register in class R1_REGS when "a" needs to be
+ reloaded when generating 64-bit PIC code. Instead, we clobber
+ memory to indicate to the compiler that the assembly code reads
+ or writes to items other than those listed in the input and output
+ operands. This may pessimize the code somewhat but __ldcw is
+ usually used within code blocks surrounded by memory barriors. */
#define __ldcw(a) ({ \
unsigned __ret; \
- __asm__ __volatile__(__LDCW " 0(%2),%0" \
- : "=r" (__ret), "+m" (*(a)) : "r" (a)); \
+ __asm__ __volatile__(__LDCW " 0(%1),%0" \
+ : "=r" (__ret) : "r" (a) : "memory"); \
__ret; \
})
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 19c36cba37c4..a46f5f45570c 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
+static inline bool kdump_in_progress(void)
+{
+ return crashing_cpu >= 0;
+}
+
#else /* !CONFIG_KEXEC */
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
@@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler)
return 0;
}
+static inline bool kdump_in_progress(void)
+{
+ return false;
+}
+
#endif /* CONFIG_KEXEC */
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index ce9577d693be..91062eef582f 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp)
SYSCALL_SPU(getrandom)
SYSCALL_SPU(memfd_create)
SYSCALL_SPU(bpf)
+COMPAT_SYS(execveat)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index e0da021caa00..36b79c31eedd 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 362
+#define __NR_syscalls 363
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index f55351f2e66e..ef5b5b1f3123 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
#define __NR_getrandom 359
#define __NR_memfd_create 360
#define __NR_bpf 361
+#define __NR_execveat 362
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 879b3aacac32..f96d1ec24189 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image)
* using debugger IPI.
*/
- if (crashing_cpu == -1)
+ if (!kdump_in_progress())
kexec_prepare_cpus();
pr_debug("kexec: Starting switchover sequence.\n");
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ec017cb4446..8b2d2dc8ef10 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -700,6 +700,7 @@ void start_secondary(void *unused)
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
preempt_disable();
+ cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
smp_ops->setup_cpu(cpu);
@@ -738,14 +739,6 @@ void start_secondary(void *unused)
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
- /*
- * CPU must be marked active and online before we signal back to the
- * master, because the scheduler needs to see the cpu_online and
- * cpu_active bits set.
- */
- smp_wmb();
- cpu_callin_map[cpu] = 1;
-
local_irq_enable();
cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 469751d92004..b5682fd6c984 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -43,6 +43,7 @@
#include <asm/trace.h>
#include <asm/firmware.h>
#include <asm/plpar_wrappers.h>
+#include <asm/kexec.h>
#include <asm/fadump.h>
#include "pseries.h"
@@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void)
* out to the user, but at least this will stop us from
* continuing on further and creating an even more
* difficult to debug situation.
+ *
+ * There is a known problem when kdump'ing, if cpus are offline
+ * the above call will fail. Rather than panicking again, keep
+ * going and hope the kdump kernel is also little endian, which
+ * it usually is.
*/
- if (rc)
+ if (rc && !kdump_in_progress())
panic("Could not enable big endian exceptions");
}
#endif
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 87bc86821bc9..d195a87ca542 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -3,6 +3,7 @@ config UML
default y
select HAVE_ARCH_AUDITSYSCALL
select HAVE_UID16
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_IO
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 5b016e2498f3..3db07f30636f 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -51,6 +51,7 @@ targets += cpustr.h
$(obj)/cpustr.h: $(obj)/mkcpustr FORCE
$(call if_changed,cpustr)
endif
+clean-files += cpustr.h
# ---------------------------------------------------------------------------
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index fd0f848938cc..5a4a089e8b1f 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
-obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
@@ -46,6 +45,7 @@ endif
ifeq ($(avx2_supported),yes)
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
+ obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
endif
aes-i586-y := aes-i586-asm_32.o aes_glue.o
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index 2df2a0298f5a..a916c4a61165 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -208,7 +208,7 @@ ddq_add_8:
.if (klen == KEY_128)
.if (load_keys)
- vmovdqa 3*16(p_keys), xkeyA
+ vmovdqa 3*16(p_keys), xkey4
.endif
.else
vmovdqa 3*16(p_keys), xkeyA
@@ -224,7 +224,7 @@ ddq_add_8:
add $(16*by), p_in
.if (klen == KEY_128)
- vmovdqa 4*16(p_keys), xkey4
+ vmovdqa 4*16(p_keys), xkeyB
.else
.if (load_keys)
vmovdqa 4*16(p_keys), xkey4
@@ -234,7 +234,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkeyA, var_xdata, var_xdata /* key 3 */
+ /* key 3 */
+ .if (klen == KEY_128)
+ vaesenc xkey4, var_xdata, var_xdata
+ .else
+ vaesenc xkeyA, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
@@ -243,13 +248,18 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkey4, var_xdata, var_xdata /* key 4 */
+ /* key 4 */
+ .if (klen == KEY_128)
+ vaesenc xkeyB, var_xdata, var_xdata
+ .else
+ vaesenc xkey4, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
.if (klen == KEY_128)
.if (load_keys)
- vmovdqa 6*16(p_keys), xkeyB
+ vmovdqa 6*16(p_keys), xkey8
.endif
.else
vmovdqa 6*16(p_keys), xkeyB
@@ -267,12 +277,17 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkeyB, var_xdata, var_xdata /* key 6 */
+ /* key 6 */
+ .if (klen == KEY_128)
+ vaesenc xkey8, var_xdata, var_xdata
+ .else
+ vaesenc xkeyB, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
.if (klen == KEY_128)
- vmovdqa 8*16(p_keys), xkey8
+ vmovdqa 8*16(p_keys), xkeyB
.else
.if (load_keys)
vmovdqa 8*16(p_keys), xkey8
@@ -288,7 +303,7 @@ ddq_add_8:
.if (klen == KEY_128)
.if (load_keys)
- vmovdqa 9*16(p_keys), xkeyA
+ vmovdqa 9*16(p_keys), xkey12
.endif
.else
vmovdqa 9*16(p_keys), xkeyA
@@ -297,7 +312,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkey8, var_xdata, var_xdata /* key 8 */
+ /* key 8 */
+ .if (klen == KEY_128)
+ vaesenc xkeyB, var_xdata, var_xdata
+ .else
+ vaesenc xkey8, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
@@ -306,7 +326,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkeyA, var_xdata, var_xdata /* key 9 */
+ /* key 9 */
+ .if (klen == KEY_128)
+ vaesenc xkey12, var_xdata, var_xdata
+ .else
+ vaesenc xkeyA, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
@@ -412,7 +437,6 @@ ddq_add_8:
/* main body of aes ctr load */
.macro do_aes_ctrmain key_len
-
cmp $16, num_bytes
jb .Ldo_return2\key_len
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index e7e9682a33e9..f556c4843aa1 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
/*
* Load per CPU data from GDT. LSL is faster than RDTSCP and
- * works on all CPUs.
+ * works on all CPUs. This is volatile so that it orders
+ * correctly wrt barrier() and to keep gcc from cleverly
+ * hoisting it out of the calling function.
*/
- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
return p;
}
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 4433a4be8171..d1626364a28a 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
{
#ifdef CONFIG_ACPI_NUMA
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
@@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu)
return (0);
}
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index e27b49d7c922..80091ae54c2b 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -66,3 +66,4 @@ targets += capflags.c
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
$(call if_changed,mkcapflags)
endif
+clean-files += capflags.c
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index e2b22df964cd..36d99a337b49 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -28,7 +28,7 @@ function dump_array()
# If the /* comment */ starts with a quote string, grab that.
VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
[ -z "$VALUE" ] && VALUE="\"$NAME\""
- [ "$VALUE" == '""' ] && continue
+ [ "$VALUE" = '""' ] && continue
# Name is uppercase, VALUE is all lowercase
VALUE="$(echo "$VALUE" | tr A-Z a-z)"
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 18eb78bbdd10..863d9b02563e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -17,7 +17,7 @@
#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
#define UNCORE_EXTRA_PCI_DEV 0xff
-#define UNCORE_EXTRA_PCI_DEV_MAX 2
+#define UNCORE_EXTRA_PCI_DEV_MAX 3
/* support up to 8 sockets */
#define UNCORE_SOCKET_MAX 8
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index 745b158e9a65..21af6149edf2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void)
enum {
SNBEP_PCI_QPI_PORT0_FILTER,
SNBEP_PCI_QPI_PORT1_FILTER,
+ HSWEP_PCI_PCU_3,
};
static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void)
{
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+
+ /* Detect 6-8 core systems with only two SBOXes */
+ if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
+ u32 capid4;
+
+ pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
+ 0x94, &capid4);
+ if (((capid4 >> 6) & 0x3) == 0)
+ hswep_uncore_sbox.num_boxes = 2;
+ }
+
uncore_msr_uncores = hswep_msr_uncores;
}
@@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT1_FILTER),
},
+ { /* PCU.3 (for Capability registers) */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ HSWEP_PCI_PCU_3),
+ },
{ /* end: all zeroes */ }
};
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index e309cc5c276e..781861cc5ee8 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
#else /* CONFIG_X86_64 */
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
(1ULL << PERF_REG_X86_ES) | \
@@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task)
else
return PERF_SAMPLE_REGS_ABI_64;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ struct pt_regs *user_regs = task_pt_regs(current);
+
+ /*
+ * If we're in an NMI that interrupted task_pt_regs setup, then
+ * we can't sample user regs at all. This check isn't really
+ * sufficient, though, as we could be in an NMI inside an interrupt
+ * that happened during task_pt_regs setup.
+ */
+ if (regs->sp > (unsigned long)&user_regs->r11 &&
+ regs->sp <= (unsigned long)(user_regs + 1)) {
+ regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
+ regs_user->regs = NULL;
+ return;
+ }
+
+ /*
+ * RIP, flags, and the argument registers are usually saved.
+ * orig_ax is probably okay, too.
+ */
+ regs_user_copy->ip = user_regs->ip;
+ regs_user_copy->cx = user_regs->cx;
+ regs_user_copy->dx = user_regs->dx;
+ regs_user_copy->si = user_regs->si;
+ regs_user_copy->di = user_regs->di;
+ regs_user_copy->r8 = user_regs->r8;
+ regs_user_copy->r9 = user_regs->r9;
+ regs_user_copy->r10 = user_regs->r10;
+ regs_user_copy->r11 = user_regs->r11;
+ regs_user_copy->orig_ax = user_regs->orig_ax;
+ regs_user_copy->flags = user_regs->flags;
+
+ /*
+ * Don't even try to report the "rest" regs.
+ */
+ regs_user_copy->bx = -1;
+ regs_user_copy->bp = -1;
+ regs_user_copy->r12 = -1;
+ regs_user_copy->r13 = -1;
+ regs_user_copy->r14 = -1;
+ regs_user_copy->r15 = -1;
+
+ /*
+ * For this to be at all useful, we need a reasonable guess for
+ * sp and the ABI. Be careful: we're in NMI context, and we're
+ * considering current to be the current task, so we should
+ * be careful not to look at any other percpu variables that might
+ * change during context switches.
+ */
+ if (IS_ENABLED(CONFIG_IA32_EMULATION) &&
+ task_thread_info(current)->status & TS_COMPAT) {
+ /* Easy case: we're in a compat syscall. */
+ regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
+ regs_user_copy->sp = user_regs->sp;
+ regs_user_copy->cs = user_regs->cs;
+ regs_user_copy->ss = user_regs->ss;
+ } else if (user_regs->orig_ax != -1) {
+ /*
+ * We're probably in a 64-bit syscall.
+ * Warning: this code is severely racy. At least it's better
+ * than just blindly copying user_regs.
+ */
+ regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
+ regs_user_copy->sp = this_cpu_read(old_rsp);
+ regs_user_copy->cs = __USER_CS;
+ regs_user_copy->ss = __USER_DS;
+ regs_user_copy->cx = -1; /* usually contains garbage */
+ } else {
+ /* We're probably in an interrupt or exception. */
+ regs_user->abi = user_64bit_mode(user_regs) ?
+ PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
+ regs_user_copy->sp = user_regs->sp;
+ regs_user_copy->cs = user_regs->cs;
+ regs_user_copy->ss = user_regs->ss;
+ }
+
+ regs_user->regs = regs_user_copy;
+}
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 10fbed126b11..f83fc6c5e0ba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4448,7 +4448,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
* zap all shadow pages.
*/
if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
- printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
+ printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm);
}
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index feb852b04598..d4c58d884838 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5840,53 +5840,10 @@ static __init int hardware_setup(void)
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
- vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
-
- memcpy(vmx_msr_bitmap_legacy_x2apic,
- vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
- vmx_msr_bitmap_longmode, PAGE_SIZE);
-
- if (enable_apicv) {
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr);
-
- /* According SDM, in x2apic mode, the whole id reg is used.
- * But in KVM, it only use the highest eight bits. Need to
- * intercept it */
- vmx_enable_intercept_msr_read_x2apic(0x802);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808);
- /* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b);
- /* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f);
- }
-
- if (enable_ept) {
- kvm_mmu_set_mask_ptes(0ull,
- (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
- (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
- 0ull, VMX_EPT_EXECUTABLE_MASK);
- ept_set_mmio_spte_mask();
- kvm_enable_tdp();
- } else
- kvm_disable_tdp();
-
- update_ple_window_actual_max();
-
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
goto out7;
- }
+ }
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
@@ -5945,6 +5902,49 @@ static __init int hardware_setup(void)
if (nested)
nested_vmx_setup_ctls_msrs();
+ vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
+ vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
+ vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
+
+ memcpy(vmx_msr_bitmap_legacy_x2apic,
+ vmx_msr_bitmap_legacy, PAGE_SIZE);
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
+ vmx_msr_bitmap_longmode, PAGE_SIZE);
+
+ if (enable_apicv) {
+ for (msr = 0x800; msr <= 0x8ff; msr++)
+ vmx_disable_intercept_msr_read_x2apic(msr);
+
+ /* According SDM, in x2apic mode, the whole id reg is used.
+ * But in KVM, it only use the highest eight bits. Need to
+ * intercept it */
+ vmx_enable_intercept_msr_read_x2apic(0x802);
+ /* TMCCT */
+ vmx_enable_intercept_msr_read_x2apic(0x839);
+ /* TPR */
+ vmx_disable_intercept_msr_write_x2apic(0x808);
+ /* EOI */
+ vmx_disable_intercept_msr_write_x2apic(0x80b);
+ /* SELF-IPI */
+ vmx_disable_intercept_msr_write_x2apic(0x83f);
+ }
+
+ if (enable_ept) {
+ kvm_mmu_set_mask_ptes(0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+ 0ull, VMX_EPT_EXECUTABLE_MASK);
+ ept_set_mmio_spte_mask();
+ kvm_enable_tdp();
+ } else
+ kvm_disable_tdp();
+
+ update_ple_window_actual_max();
+
return alloc_kvm_area();
out7:
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 2480978b31cc..1313ae6b478b 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -28,7 +28,7 @@
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
- ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr)
+ ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \
({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a97ee0801475..08a7d313538a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
static unsigned long __init get_new_step_size(unsigned long step_size)
{
/*
- * Explain why we shift by 5 and why we don't have to worry about
- * 'step_size << 5' overflowing:
- *
- * initial mapped size is PMD_SIZE (2M).
+ * Initial mapped size is PMD_SIZE (2M).
* We can not set step_size to be PUD_SIZE (1G) yet.
* In worse case, when we cross the 1G boundary, and
* PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
- * to map 1G range with PTE. Use 5 as shift for now.
+ * to map 1G range with PTE. Hence we use one less than the
+ * difference of page table level shifts.
*
- * Don't need to worry about overflow, on 32bit, when step_size
- * is 0, round_down() returns 0 for start, and that turns it
- * into 0x100000000ULL.
+ * Don't need to worry about overflow in the top-down case, on 32bit,
+ * when step_size is 0, round_down() returns 0 for start, and that
+ * turns it into 0x100000000ULL.
+ * In the bottom-up case, round_up(x, 0) returns 0 though too, which
+ * needs to be taken into consideration by the code below.
*/
- return step_size << 5;
+ return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
}
/**
@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
unsigned long step_size;
unsigned long addr;
unsigned long mapped_ram_size = 0;
- unsigned long new_mapped_ram_size;
/* xen has big range in reserved near end of ram, skip it at first.*/
addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
start = map_start;
} else
start = map_start;
- new_mapped_ram_size = init_range_memory_mapping(start,
+ mapped_ram_size += init_range_memory_mapping(start,
last_start);
last_start = start;
min_pfn_mapped = last_start >> PAGE_SHIFT;
- /* only increase step_size after big range get mapped */
- if (new_mapped_ram_size > mapped_ram_size)
+ if (mapped_ram_size >= step_size)
step_size = get_new_step_size(step_size);
- mapped_ram_size += new_mapped_ram_size;
}
if (real_end < map_end)
@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
static void __init memory_map_bottom_up(unsigned long map_start,
unsigned long map_end)
{
- unsigned long next, new_mapped_ram_size, start;
+ unsigned long next, start;
unsigned long mapped_ram_size = 0;
/* step_size need to be small so pgt_buf from BRK could cover it */
unsigned long step_size = PMD_SIZE;
@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
* for page table.
*/
while (start < map_end) {
- if (map_end - start > step_size) {
+ if (step_size && map_end - start > step_size) {
next = round_up(start + 1, step_size);
if (next > map_end)
next = map_end;
- } else
+ } else {
next = map_end;
+ }
- new_mapped_ram_size = init_range_memory_mapping(start, next);
+ mapped_ram_size += init_range_memory_mapping(start, next);
start = next;
- if (new_mapped_ram_size > mapped_ram_size)
+ if (mapped_ram_size >= step_size)
step_size = get_new_step_size(step_size);
- mapped_ram_size += new_mapped_ram_size;
}
}
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 531d4269e2e3..bd16d6c370ec 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
extern asmlinkage void sys_ni_syscall(void);
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index 20c3649d0691..5cdfa9db2217 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 009495b9ab4b..1c9f750c3859 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
struct linux_binprm;
-/* Put the vdso above the (randomized) stack with another randomized offset.
- This way there is no hole in the middle of address space.
- To save memory make sure it is still in the same PTE as the stack top.
- This doesn't give that many random bits.
-
- Only used for the 64-bit and x32 vdsos. */
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset. This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top. This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
#ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
#else
unsigned long addr, end;
unsigned offset;
- end = (start + PMD_SIZE - 1) & PMD_MASK;
+
+ /*
+ * Round up the start address. It can start out unaligned as a result
+ * of stack start randomization.
+ */
+ start = PAGE_ALIGN(start);
+
+ /* Round the lowest possible end address up to a PMD boundary. */
+ end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
end -= len;
- /* This loses some more bits than a modulo, but is cheaper */
- offset = get_random_int() & (PTRS_PER_PTE - 1);
- addr = start + (offset << PAGE_SHIFT);
- if (addr >= end)
- addr = end;
+
+ if (end > start) {
+ offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+ addr = start + (offset << PAGE_SHIFT);
+ } else {
+ addr = start;
+ }
/*
- * page-align it here so that get_unmapped_area doesn't
- * align it wrongfully again to the next page. addr can come in 4K
- * unaligned here as a result of stack start randomization.
+ * Forcibly align the final address in case we have a hardware
+ * issue that requires alignment for performance reasons.
*/
- addr = PAGE_ALIGN(addr);
addr = align_vdso_addr(addr);
return addr;
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 1fa7bc31be63..4665b79c729a 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -455,6 +455,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
{
struct af_alg_completion *completion = req->data;
+ if (err == -EINPROGRESS)
+ return;
+
completion->err = err;
complete(&completion->completion);
}
diff --git a/drivers/Makefile b/drivers/Makefile
index 67d2334dc41e..527a6da8d539 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER) += reset/
obj-y += tty/
obj-y += char/
-# gpu/ comes after char for AGP vs DRM startup
+# iommu/ comes before gpu as gpu are using iommu controllers
+obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
+
+# gpu/ comes after char for AGP vs DRM startup and after iommu
obj-y += gpu/
obj-$(CONFIG_CONNECTOR) += connector/
@@ -141,7 +144,6 @@ obj-y += clk/
obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
-obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
obj-$(CONFIG_REMOTEPROC) += remoteproc/
obj-$(CONFIG_RPMSG) += rpmsg/
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 1fdf5e07a1c7..1020b1b53a17 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status;
int ret;
- if (pr->apic_id == -1)
+ if (pr->phys_id == -1)
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
cpu_maps_update_begin();
cpu_hotplug_begin();
- ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
+ ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
if (ret)
goto out;
ret = arch_register_cpu(pr->id);
if (ret) {
- acpi_unmap_lsapic(pr->id);
+ acpi_unmap_cpu(pr->id);
goto out;
}
@@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
- int apic_id, cpu_index, device_declaration = 0;
+ int phys_id, cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
@@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->acpi_id = value;
}
- apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
- if (apic_id < 0)
- acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
- pr->apic_id = apic_id;
+ phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
+ if (phys_id < 0)
+ acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
+ pr->phys_id = phys_id;
- cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
+ cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
cpu0_initialized = 1;
- /* Handle UP system running SMP kernel, with no LAPIC in MADT */
+ /*
+ * Handle UP system running SMP kernel, with no CPU
+ * entry in MADT
+ */
if ((cpu_index == -1) && (num_online_cpus() == 1))
cpu_index = 0;
}
@@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
/* Remove the CPU. */
arch_unregister_cpu(pr->id);
- acpi_unmap_lsapic(pr->id);
+ acpi_unmap_cpu(pr->id);
cpu_hotplug_done();
cpu_maps_update_done();
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c2daa85fc9f7..c0d44d394ca3 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
device->power.state = ACPI_STATE_UNKNOWN;
if (!acpi_device_is_present(device))
- return 0;
+ return -ENXIO;
result = acpi_device_get_power(device, &state);
if (result)
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 342942f90a10..02e48394276c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
unsigned long madt_end, entry;
static struct acpi_table_madt *madt;
static int read_madt;
- int apic_id = -1;
+ int phys_id = -1; /* CPU hardware ID */
if (!read_madt) {
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
@@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
}
if (!madt)
- return apic_id;
+ return phys_id;
entry = (unsigned long)madt;
madt_end = entry + madt->header.length;
@@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
- if (!map_lapic_id(header, acpi_id, &apic_id))
+ if (!map_lapic_id(header, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
- if (!map_x2apic_id(header, type, acpi_id, &apic_id))
+ if (!map_x2apic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
- if (!map_lsapic_id(header, type, acpi_id, &apic_id))
+ if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
}
entry += header->length;
}
- return apic_id;
+ return phys_id;
}
static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
@@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
- int apic_id = -1;
+ int phys_id = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
@@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
- map_lapic_id(header, acpi_id, &apic_id);
+ map_lapic_id(header, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
- map_lsapic_id(header, type, acpi_id, &apic_id);
+ map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
- map_x2apic_id(header, type, acpi_id, &apic_id);
+ map_x2apic_id(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
- return apic_id;
+ return phys_id;
}
-int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
- int apic_id;
+ int phys_id;
- apic_id = map_mat_entry(handle, type, acpi_id);
- if (apic_id == -1)
- apic_id = map_madt_entry(type, acpi_id);
+ phys_id = map_mat_entry(handle, type, acpi_id);
+ if (phys_id == -1)
+ phys_id = map_madt_entry(type, acpi_id);
- return apic_id;
+ return phys_id;
}
-int acpi_map_cpuid(int apic_id, u32 acpi_id)
+int acpi_map_cpuid(int phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
int i;
#endif
- if (apic_id == -1) {
+ if (phys_id == -1) {
/*
* On UP processor, there is no _MAT or MADT table.
- * So above apic_id is always set to -1.
+ * So above phys_id is always set to -1.
*
* BIOS may define multiple CPU handles even for UP processor.
* For example,
@@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
- * Ignores apic_id and always returns 0 for the processor
+ * Ignores phys_id and always returns 0 for the processor
* handle with acpi id 0 if nr_cpu_ids is 1.
* This should be the case if SMP tables are not found.
* Return -1 for other CPU's handle.
@@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
- return apic_id;
+ return phys_id;
}
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
- if (cpu_physical_id(i) == apic_id)
+ if (cpu_physical_id(i) == phys_id)
return i;
}
#else
/* In UP kernel, only processor 0 is valid */
- if (apic_id == 0)
- return apic_id;
+ if (phys_id == 0)
+ return phys_id;
#endif
return -1;
}
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
- int apic_id;
+ int phys_id;
- apic_id = acpi_get_apicid(handle, type, acpi_id);
+ phys_id = acpi_get_phys_id(handle, type, acpi_id);
- return acpi_map_cpuid(apic_id, acpi_id);
+ return acpi_map_cpuid(phys_id, acpi_id);
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 499536504698..87b704e41877 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
- if (cx->entry_method != ACPI_CSTATE_FFH)
- state->flags |= CPUIDLE_FLAG_TIME_INVALID;
state->enter = acpi_idle_enter_c1;
state->enter_dead = acpi_idle_play_dead;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 16914cc30882..dc4d8960684a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
if (device->wakeup.flags.valid)
acpi_power_resources_list_free(&device->wakeup.resources);
- if (!device->flags.power_manageable)
+ if (!device->power.flags.power_resources)
return;
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
@@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
device->power.flags.power_resources)
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
- if (acpi_bus_init_power(device)) {
- acpi_free_power_resources_lists(device);
+ if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
- }
}
static void acpi_bus_get_flags(struct acpi_device *device)
@@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device)
/* Skip devices that are not present. */
if (!acpi_device_is_present(device)) {
device->flags.visited = false;
+ device->flags.power_manageable = 0;
return;
}
if (device->handler)
goto ok;
if (!device->flags.initialized) {
- acpi_bus_update_power(device, NULL);
+ device->flags.power_manageable =
+ device->power.states[ACPI_STATE_D0].flags.valid;
+ if (acpi_bus_init_power(device))
+ device->flags.power_manageable = 0;
+
device->flags.initialized = true;
}
device->flags.visited = false;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1eaadff2e198..032db459370f 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -505,6 +505,33 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
},
},
+
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
+ },
+ },
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
+ },
+ },
+
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
+ .callback = video_disable_native_backlight,
+ .ident = "Dell XPS15 L521X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
+ },
+ },
{}
};
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6a103a35ea9b..0d8780c04a5e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2088,7 +2088,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
* on failure.
*/
-static struct generic_pm_domain *of_genpd_get_from_provider(
+struct generic_pm_domain *of_genpd_get_from_provider(
struct of_phandle_args *genpdspec)
{
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
@@ -2108,6 +2108,7 @@ static struct generic_pm_domain *of_genpd_get_from_provider(
return genpd;
}
+EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
/**
* genpd_dev_pm_detach - Detach a device from its PM domain.
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index d24dd614a0bd..106c69359306 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -108,6 +108,14 @@ static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
static DEFINE_MUTEX(dev_opp_list_lock);
+#define opp_rcu_lockdep_assert() \
+do { \
+ rcu_lockdep_assert(rcu_read_lock_held() || \
+ lockdep_is_held(&dev_opp_list_lock), \
+ "Missing rcu_read_lock() or " \
+ "dev_opp_list_lock protection"); \
+} while (0)
+
/**
* find_device_opp() - find device_opp struct using device pointer
* @dev: device pointer used to lookup device OPPs
@@ -208,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
* This function returns the number of available opps if there are any,
* else returns 0 if none or the corresponding error value.
*
- * Locking: This function must be called under rcu_read_lock(). This function
- * internally references two RCU protected structures: device_opp and opp which
- * are safe as long as we are under a common RCU locked section.
+ * Locking: This function takes rcu_read_lock().
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
@@ -218,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev)
struct dev_pm_opp *temp_opp;
int count = 0;
+ rcu_read_lock();
+
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
- return r;
+ count = PTR_ERR(dev_opp);
+ dev_err(dev, "%s: device OPP not found (%d)\n",
+ __func__, count);
+ goto out_unlock;
}
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
@@ -230,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev)
count++;
}
+out_unlock:
+ rcu_read_unlock();
return count;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@@ -267,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
@@ -313,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@@ -361,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@@ -783,9 +800,15 @@ void of_free_opp_table(struct device *dev)
/* Check for existing list for 'dev' */
dev_opp = find_device_opp(dev);
- if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
- PTR_ERR(dev_opp)))
+ if (IS_ERR(dev_opp)) {
+ int error = PTR_ERR(dev_opp);
+ if (error != -ENODEV)
+ WARN(1, "%s: dev_opp: %d\n",
+ IS_ERR_OR_NULL(dev) ?
+ "Invalid device" : dev_name(dev),
+ error);
return;
+ }
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index eb7682dc123b..81bf297f1034 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
}
/* Checks whether the given window number is available */
+
+/* On Armada XP, 375 and 38x the MBus window 13 has the remap
+ * capability, like windows 0 to 7. However, the mvebu-mbus driver
+ * isn't currently taking into account this special case, which means
+ * that when window 13 is actually used, the remap registers are left
+ * to 0, making the device using this MBus window unavailable. The
+ * quick fix for stable is to not use window 13. A follow up patch
+ * will correctly handle this window.
+*/
static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
const int win)
{
void __iomem *addr = mbus->mbuswins_base +
mbus->soc->win_cfg_offset(win);
u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+ if (win == 13)
+ return false;
+
return !(ctrl & WIN_CTRL_ENABLE);
}
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 19db03667650..dcbbb4ea3cc1 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -417,6 +417,6 @@ static void __exit agp_ali_cleanup(void)
module_init(agp_ali_init);
module_exit(agp_ali_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 3b47ed0310e1..0ef350010766 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -813,6 +813,6 @@ static void __exit agp_amd64_cleanup(void)
module_init(agp_amd64_mod_init);
module_exit(agp_amd64_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
+MODULE_AUTHOR("Dave Jones, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 18a7a6baa304..75a9786a77e6 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -579,6 +579,6 @@ static void __exit agp_ati_cleanup(void)
module_init(agp_ati_init);
module_exit(agp_ati_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 317c28ce8328..38ffb281df97 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -356,7 +356,7 @@ static __init int agp_setup(char *s)
__setup("agp=", agp_setup);
#endif
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
MODULE_DESCRIPTION("AGP GART driver");
MODULE_LICENSE("GPL and additional rights");
MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index f9b9ca5d31b7..0a21daed5b62 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -920,5 +920,5 @@ static void __exit agp_intel_cleanup(void)
module_init(agp_intel_init);
module_exit(agp_intel_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index f3334829e55a..92aa43fa8d70 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1438,5 +1438,5 @@ void intel_gmch_remove(void)
}
EXPORT_SYMBOL(intel_gmch_remove);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index a1861b75eb31..6c8d39cb566e 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -1,7 +1,7 @@
/*
* Nvidia AGPGART routines.
* Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
- * to work in 2.5 by Dave Jones <davej@redhat.com>
+ * to work in 2.5 by Dave Jones.
*/
#include <linux/module.h>
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 228f20cddc05..a4961d35e940 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -595,4 +595,4 @@ module_init(agp_via_init);
module_exit(agp_via_cleanup);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 5fa83f751378..6b65fa4e0c55 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -199,18 +199,6 @@ struct bmc_device {
int guid_set;
char name[16];
struct kref usecount;
-
- /* bmc device attributes */
- struct device_attribute device_id_attr;
- struct device_attribute provides_dev_sdrs_attr;
- struct device_attribute revision_attr;
- struct device_attribute firmware_rev_attr;
- struct device_attribute version_attr;
- struct device_attribute add_dev_support_attr;
- struct device_attribute manufacturer_id_attr;
- struct device_attribute product_id_attr;
- struct device_attribute guid_attr;
- struct device_attribute aux_firmware_rev_attr;
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
@@ -2252,7 +2240,7 @@ static ssize_t device_id_show(struct device *dev,
return snprintf(buf, 10, "%u\n", bmc->id.device_id);
}
-DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
+static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
static ssize_t provides_device_sdrs_show(struct device *dev,
struct device_attribute *attr,
@@ -2263,7 +2251,8 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
return snprintf(buf, 10, "%u\n",
(bmc->id.device_revision & 0x80) >> 7);
}
-DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL);
+static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
+ NULL);
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2273,7 +2262,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%u\n",
bmc->id.device_revision & 0x0F);
}
-DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
+static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
static ssize_t firmware_revision_show(struct device *dev,
struct device_attribute *attr,
@@ -2284,7 +2273,7 @@ static ssize_t firmware_revision_show(struct device *dev,
return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
bmc->id.firmware_revision_2);
}
-DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
+static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
static ssize_t ipmi_version_show(struct device *dev,
struct device_attribute *attr,
@@ -2296,7 +2285,7 @@ static ssize_t ipmi_version_show(struct device *dev,
ipmi_version_major(&bmc->id),
ipmi_version_minor(&bmc->id));
}
-DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
+static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
static ssize_t add_dev_support_show(struct device *dev,
struct device_attribute *attr,
@@ -2307,7 +2296,8 @@ static ssize_t add_dev_support_show(struct device *dev,
return snprintf(buf, 10, "0x%02x\n",
bmc->id.additional_device_support);
}
-DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL);
+static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
+ NULL);
static ssize_t manufacturer_id_show(struct device *dev,
struct device_attribute *attr,
@@ -2317,7 +2307,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
}
-DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
+static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr,
@@ -2327,7 +2317,7 @@ static ssize_t product_id_show(struct device *dev,
return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
}
-DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
+static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
static ssize_t aux_firmware_rev_show(struct device *dev,
struct device_attribute *attr,
@@ -2341,7 +2331,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
bmc->id.aux_firmware_revision[1],
bmc->id.aux_firmware_revision[0]);
}
-DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
+static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2352,7 +2342,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
(long long) bmc->guid[0],
(long long) bmc->guid[8]);
}
-DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
+static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
static struct attribute *bmc_dev_attrs[] = {
&dev_attr_device_id.attr,
@@ -2392,10 +2382,10 @@ cleanup_bmc_device(struct kref *ref)
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
if (bmc->guid_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->guid_attr);
+ &dev_attr_guid);
platform_device_unregister(&bmc->pdev);
}
@@ -2422,16 +2412,14 @@ static int create_bmc_files(struct bmc_device *bmc)
int err;
if (bmc->id.aux_firmware_revision_set) {
- bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
err = device_create_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
if (err)
goto out;
}
if (bmc->guid_set) {
- bmc->guid_attr.attr.name = "guid";
err = device_create_file(&bmc->pdev.dev,
- &bmc->guid_attr);
+ &dev_attr_guid);
if (err)
goto out_aux_firm;
}
@@ -2441,7 +2429,7 @@ static int create_bmc_files(struct bmc_device *bmc)
out_aux_firm:
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
out:
return err;
}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index e178ac27e73c..982b96323f82 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -52,6 +52,7 @@
#include <linux/dmi.h>
#include <linux/kthread.h>
#include <linux/acpi.h>
+#include <linux/ctype.h>
#define PFX "ipmi_ssif: "
#define DEVICE_NAME "ipmi_ssif"
@@ -968,7 +969,8 @@ static void sender(void *send_info,
do_gettimeofday(&t);
pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
- msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
+ msg->data[0], msg->data[1],
+ (long) t.tv_sec, (long) t.tv_usec);
}
}
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 6a79fc4f900c..095c1774592c 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -462,7 +462,7 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_CP15_TIMER) {
- if (arch_timer_use_virtual)
+ if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f56147a1daed..fde97d6e31d6 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
/* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev);
+ /*
+ * But we need OPP table to function so if it is not there let's
+ * give platform code chance to provide it for us.
+ */
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ pr_debug("OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a09a29c312a9..46bed4f81cde 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
return 0;
+ /*
+ * Governor might not be initiated here if ACPI _PPC changed
+ * notification happened, so check it.
+ */
+ if (!policy->governor)
+ return -EINVAL;
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 37263d9a1051..401c0106ed34 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state = &ldev->states[last_idx];
- if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
- last_residency = cpuidle_get_last_residency(dev) - \
- drv->states[last_idx].exit_latency;
- }
- else
- last_residency = last_state->threshold.promotion_time + 1;
+ last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 659d7b0c9ebf..40580794e23d 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* power state and occurrence of the wakeup event.
*
* If the entered idle state didn't support residency measurements,
- * we are basically lost in the dark how much time passed.
- * As a compromise, assume we slept for the whole expected time.
+ * we use them anyway if they are short, and if long,
+ * truncate to the whole expected time.
*
* Any measured amount of time will include the exit latency.
* Since we are interested in when the wakeup begun, not when it
@@ -405,22 +405,17 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* the measured amount of time is less than the exit latency,
* assume the state was never reached and the exit latency is 0.
*/
- if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
- /* Use timer value as is */
- measured_us = data->next_timer_us;
- } else {
- /* Use measured value */
- measured_us = cpuidle_get_last_residency(dev);
+ /* measured value */
+ measured_us = cpuidle_get_last_residency(dev);
- /* Deduct exit latency */
- if (measured_us > target->exit_latency)
- measured_us -= target->exit_latency;
+ /* Deduct exit latency */
+ if (measured_us > target->exit_latency)
+ measured_us -= target->exit_latency;
- /* Make sure our coefficients do not exceed unity */
- if (measured_us > data->next_timer_us)
- measured_us = data->next_timer_us;
- }
+ /* Make sure our coefficients do not exceed unity */
+ if (measured_us > data->next_timer_us)
+ measured_us = data->next_timer_us;
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 66e40398b3d3..e620807418ea 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
+obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
@@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
-obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 4f7b275f2f7b..fcfdf23e1913 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -31,7 +31,6 @@
#include <uapi/linux/kfd_ioctl.h>
#include <linux/time.h>
#include <linux/mm.h>
-#include <linux/uaccess.h>
#include <uapi/asm-generic/mman-common.h>
#include <asm/processor.h>
#include "kfd_priv.h"
@@ -121,27 +120,20 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
- process->is_32bit_user_mode = is_32bit_user_mode;
-
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
- kfd_init_apertures(process);
-
return 0;
}
-static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
- void __user *arg)
+static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
+ void *data)
{
- struct kfd_ioctl_get_version_args args;
+ struct kfd_ioctl_get_version_args *args = data;
int err = 0;
- args.major_version = KFD_IOCTL_MAJOR_VERSION;
- args.minor_version = KFD_IOCTL_MINOR_VERSION;
-
- if (copy_to_user(arg, &args, sizeof(args)))
- err = -EFAULT;
+ args->major_version = KFD_IOCTL_MAJOR_VERSION;
+ args->minor_version = KFD_IOCTL_MINOR_VERSION;
return err;
}
@@ -225,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return 0;
}
-static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
- void __user *arg)
+static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+ void *data)
{
- struct kfd_ioctl_create_queue_args args;
+ struct kfd_ioctl_create_queue_args *args = data;
struct kfd_dev *dev;
int err = 0;
unsigned int queue_id;
@@ -237,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
memset(&q_properties, 0, sizeof(struct queue_properties));
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
pr_debug("kfd: creating queue ioctl\n");
- err = set_queue_properties_from_user(&q_properties, &args);
+ err = set_queue_properties_from_user(&q_properties, args);
if (err)
return err;
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
@@ -254,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- err = PTR_ERR(pdd);
+ err = -ESRCH;
goto err_bind_process;
}
@@ -267,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
if (err != 0)
goto err_create_queue;
- args.queue_id = queue_id;
+ args->queue_id = queue_id;
/* Return gpu_id as doorbell offset for mmap usage */
- args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
-
- if (copy_to_user(arg, &args, sizeof(args))) {
- err = -EFAULT;
- goto err_copy_args_out;
- }
+ args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
mutex_unlock(&p->mutex);
- pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
+ pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
pr_debug("ring buffer address == 0x%016llX\n",
- args.ring_base_address);
+ args->ring_base_address);
pr_debug("read ptr address == 0x%016llX\n",
- args.read_pointer_address);
+ args->read_pointer_address);
pr_debug("write ptr address == 0x%016llX\n",
- args.write_pointer_address);
+ args->write_pointer_address);
return 0;
-err_copy_args_out:
- pqm_destroy_queue(&p->pqm, queue_id);
err_create_queue:
err_bind_process:
mutex_unlock(&p->mutex);
@@ -301,99 +283,90 @@ err_bind_process:
}
static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
- void __user *arg)
+ void *data)
{
int retval;
- struct kfd_ioctl_destroy_queue_args args;
-
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
+ struct kfd_ioctl_destroy_queue_args *args = data;
pr_debug("kfd: destroying queue id %d for PASID %d\n",
- args.queue_id,
+ args->queue_id,
p->pasid);
mutex_lock(&p->mutex);
- retval = pqm_destroy_queue(&p->pqm, args.queue_id);
+ retval = pqm_destroy_queue(&p->pqm, args->queue_id);
mutex_unlock(&p->mutex);
return retval;
}
static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
- void __user *arg)
+ void *data)
{
int retval;
- struct kfd_ioctl_update_queue_args args;
+ struct kfd_ioctl_update_queue_args *args = data;
struct queue_properties properties;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
- if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+ if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
- if ((args.ring_base_address) &&
+ if ((args->ring_base_address) &&
(!access_ok(VERIFY_WRITE,
- (const void __user *) args.ring_base_address,
+ (const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("kfd: can't access ring base address\n");
return -EFAULT;
}
- if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
+ if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
pr_err("kfd: ring size must be a power of 2 or 0\n");
return -EINVAL;
}
- properties.queue_address = args.ring_base_address;
- properties.queue_size = args.ring_size;
- properties.queue_percent = args.queue_percentage;
- properties.priority = args.queue_priority;
+ properties.queue_address = args->ring_base_address;
+ properties.queue_size = args->ring_size;
+ properties.queue_percent = args->queue_percentage;
+ properties.priority = args->queue_priority;
pr_debug("kfd: updating queue id %d for PASID %d\n",
- args.queue_id, p->pasid);
+ args->queue_id, p->pasid);
mutex_lock(&p->mutex);
- retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
+ retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
mutex_unlock(&p->mutex);
return retval;
}
-static long kfd_ioctl_set_memory_policy(struct file *filep,
- struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_set_memory_policy(struct file *filep,
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_set_memory_policy_args args;
+ struct kfd_ioctl_set_memory_policy_args *args = data;
struct kfd_dev *dev;
int err = 0;
struct kfd_process_device *pdd;
enum cache_policy default_policy, alternate_policy;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
- && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
- if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
- && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
@@ -401,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- err = PTR_ERR(pdd);
+ err = -ESRCH;
goto out;
}
- default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
alternate_policy =
- (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
if (!dev->dqm->set_cache_memory_policy(dev->dqm,
&pdd->qpd,
default_policy,
alternate_policy,
- (void __user *)args.alternate_aperture_base,
- args.alternate_aperture_size))
+ (void __user *)args->alternate_aperture_base,
+ args->alternate_aperture_size))
err = -EINVAL;
out:
@@ -426,53 +399,44 @@ out:
return err;
}
-static long kfd_ioctl_get_clock_counters(struct file *filep,
- struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_get_clock_counters(struct file *filep,
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_get_clock_counters_args args;
+ struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
struct timespec time;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
/* Reading GPU clock counter from KGD */
- args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+ args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
/* No access to rdtsc. Using raw monotonic time */
getrawmonotonic(&time);
- args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+ args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
get_monotonic_boottime(&time);
- args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
+ args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
/* Since the counter is in nano-seconds we use 1GHz frequency */
- args.system_clock_freq = 1000000000;
-
- if (copy_to_user(arg, &args, sizeof(args)))
- return -EFAULT;
+ args->system_clock_freq = 1000000000;
return 0;
}
static int kfd_ioctl_get_process_apertures(struct file *filp,
- struct kfd_process *p, void __user *arg)
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_get_process_apertures_args args;
+ struct kfd_ioctl_get_process_apertures_args *args = data;
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- args.num_of_nodes = 0;
+ args->num_of_nodes = 0;
mutex_lock(&p->mutex);
@@ -481,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
/* Run over all pdd of the process */
pdd = kfd_get_first_process_device_data(p);
do {
- pAperture = &args.process_apertures[args.num_of_nodes];
+ pAperture =
+ &args->process_apertures[args->num_of_nodes];
pAperture->gpu_id = pdd->dev->id;
pAperture->lds_base = pdd->lds_base;
pAperture->lds_limit = pdd->lds_limit;
@@ -491,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
pAperture->scratch_limit = pdd->scratch_limit;
dev_dbg(kfd_device,
- "node id %u\n", args.num_of_nodes);
+ "node id %u\n", args->num_of_nodes);
dev_dbg(kfd_device,
"gpu id %u\n", pdd->dev->id);
dev_dbg(kfd_device,
@@ -507,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
dev_dbg(kfd_device,
"scratch_limit %llX\n", pdd->scratch_limit);
- args.num_of_nodes++;
+ args->num_of_nodes++;
} while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
- (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+ (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
}
mutex_unlock(&p->mutex);
- if (copy_to_user(arg, &args, sizeof(args)))
- return -EFAULT;
-
return 0;
}
+#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
+ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
+ kfd_ioctl_get_version, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
+ kfd_ioctl_create_queue, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
+ kfd_ioctl_destroy_queue, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
+ kfd_ioctl_set_memory_policy, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
+ kfd_ioctl_get_clock_counters, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
+ kfd_ioctl_get_process_apertures, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
+ kfd_ioctl_update_queue, 0),
+};
+
+#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
+
static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct kfd_process *process;
- long err = -EINVAL;
+ amdkfd_ioctl_t *func;
+ const struct amdkfd_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+ char stack_kdata[128];
+ char *kdata = NULL;
+ unsigned int usize, asize;
+ int retcode = -EINVAL;
+
+ if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+ goto err_i1;
+
+ if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
+ u32 amdkfd_size;
+
+ ioctl = &amdkfd_ioctls[nr];
- dev_dbg(kfd_device,
- "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
- cmd, _IOC_NR(cmd), arg);
+ amdkfd_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (amdkfd_size > asize)
+ asize = amdkfd_size;
+
+ cmd = ioctl->cmd;
+ } else
+ goto err_i1;
+
+ dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
process = kfd_get_process(current);
- if (IS_ERR(process))
- return PTR_ERR(process);
+ if (IS_ERR(process)) {
+ dev_dbg(kfd_device, "no process\n");
+ goto err_i1;
+ }
- switch (cmd) {
- case KFD_IOC_GET_VERSION:
- err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
- break;
- case KFD_IOC_CREATE_QUEUE:
- err = kfd_ioctl_create_queue(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_DESTROY_QUEUE:
- err = kfd_ioctl_destroy_queue(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_SET_MEMORY_POLICY:
- err = kfd_ioctl_set_memory_policy(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_GET_CLOCK_COUNTERS:
- err = kfd_ioctl_get_clock_counters(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_GET_PROCESS_APERTURES:
- err = kfd_ioctl_get_process_apertures(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_UPDATE_QUEUE:
- err = kfd_ioctl_update_queue(filep, process,
- (void __user *)arg);
- break;
-
- default:
- dev_err(kfd_device,
- "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
- cmd, arg);
- err = -EINVAL;
- break;
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+
+ if (unlikely(!func)) {
+ dev_dbg(kfd_device, "no function\n");
+ retcode = -EINVAL;
+ goto err_i1;
}
- if (err < 0)
- dev_err(kfd_device,
- "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
- err, cmd, _IOC_NR(cmd));
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
+ }
+ if (asize > usize)
+ memset(kdata + usize, 0, asize - usize);
+ }
- return err;
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
+ }
+
+ retcode = func(filep, process, kdata);
+
+ if (cmd & IOC_OUT)
+ if (copy_to_user((void __user *)arg, kdata, usize) != 0)
+ retcode = -EFAULT;
+
+err_i1:
+ if (!ioctl)
+ dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current), cmd, nr);
+
+ if (kdata != stack_kdata)
+ kfree(kdata);
+
+ if (retcode)
+ dev_dbg(kfd_device, "ret = %d\n", retcode);
+
+ return retcode;
}
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 924e90c072e5..9c8961d22360 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
{
int bit = qpd->vmid - KFD_VMID_START_OFFSET;
+ /* Release the vmid mapping */
+ set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+
set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
qpd->vmid = 0;
q->properties.vmid = 0;
@@ -272,6 +275,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
return retval;
}
+ pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
+ q->pipe,
+ q->queue);
+
+ retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+ q->queue, q->properties.write_ptr);
+ if (retval != 0) {
+ deallocate_hqd(dqm, q);
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ return retval;
+ }
+
return 0;
}
@@ -320,6 +335,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
int retval;
struct mqd_manager *mqd;
+ bool prev_active = false;
BUG_ON(!dqm || !q || !q->mqd);
@@ -330,10 +346,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
return -ENOMEM;
}
- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
if (q->properties.is_active == true)
+ prev_active = true;
+
+ /*
+ *
+ * check active state vs. the previous state
+ * and modify counter accordingly
+ */
+ retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+ if ((q->properties.is_active == true) && (prev_active == false))
dqm->queue_count++;
- else
+ else if ((q->properties.is_active == false) && (prev_active == true))
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 66df4da01c29..e64aa99e5e41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process)
struct kfd_dev *dev;
struct kfd_process_device *pdd;
- mutex_lock(&process->mutex);
-
/*Iterating over all devices*/
while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_get_process_device_data(dev, process, 1);
+ if (!pdd)
+ return -1;
/*
* For 64 bit process aperture will be statically reserved in
@@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process)
id++;
}
- mutex_unlock(&process->mutex);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index adc31474e786..4c3828cf45bf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
- return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
+ return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
pipe_id, queue_id);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 71699ad97d74..4c25ef504f79 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -32,7 +32,7 @@ int kfd_pasid_init(void)
{
pasid_limit = max_num_of_processes;
- pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
+ pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f9fb81e3bb09..a5edb29507e3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -463,6 +463,24 @@ struct kfd_process {
bool is_32bit_user_mode;
};
+/**
+ * Ioctl function type.
+ *
+ * \param filep pointer to file structure.
+ * \param p amdkfd process pointer.
+ * \param data pointer to arg that was copied from user.
+ */
+typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
+ void *data);
+
+struct amdkfd_ioctl_desc {
+ unsigned int cmd;
+ int flags;
+ amdkfd_ioctl_t *func;
+ unsigned int cmd_drv;
+ const char *name;
+};
+
void kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
struct kfd_process *kfd_create_process(const struct task_struct *);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b85eb0b830b4..3c76ef05cbcf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
+#include <linux/compat.h>
+
struct mm_struct;
#include "kfd_priv.h"
@@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err != 0)
goto err_process_pqm_init;
+ /* init process apertures*/
+ process->is_32bit_user_mode = is_compat_task();
+ if (kfd_init_apertures(process) != 0)
+ goto err_init_apretures;
+
return process;
+err_init_apretures:
+ pqm_uninit(&process->pqm);
err_process_pqm_init:
hash_del_rcu(&process->kfd_processes);
synchronize_rcu();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5733e2859e8a..cca1708fd811 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_per_cu);
sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
dev->node_props.max_slots_scratch_cu);
- sysfs_show_32bit_prop(buffer, "engine_id",
- dev->node_props.engine_id);
sysfs_show_32bit_prop(buffer, "vendor_id",
dev->node_props.vendor_id);
sysfs_show_32bit_prop(buffer, "device_id",
@@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kgd));
sysfs_show_64bit_prop(buffer, "local_mem_size",
kfd2kgd->get_vmem_size(dev->gpu->kgd));
+
+ sysfs_show_32bit_prop(buffer, "fw_version",
+ kfd2kgd->get_fw_version(
+ dev->gpu->kgd,
+ KGD_ENGINE_MEC1));
+
}
ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
@@ -917,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void)
uint32_t i = 0;
list_for_each_entry(dev, &topology_device_list, list) {
- ret = kfd_build_sysfs_node_entry(dev, 0);
+ ret = kfd_build_sysfs_node_entry(dev, i);
if (ret < 0)
return ret;
i++;
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9c729dd8dd50..96a512208fad 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -45,6 +45,17 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3,
};
+enum kgd_engine_type {
+ KGD_ENGINE_PFP = 1,
+ KGD_ENGINE_ME,
+ KGD_ENGINE_CE,
+ KGD_ENGINE_MEC1,
+ KGD_ENGINE_MEC2,
+ KGD_ENGINE_RLC,
+ KGD_ENGINE_SDMA,
+ KGD_ENGINE_MAX
+};
+
struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap;
@@ -137,6 +148,8 @@ struct kgd2kfd_calls {
*
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
*
+ * @get_fw_version: Returns FW versions from the header
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -170,12 +183,14 @@ struct kfd2kgd_calls {
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
- bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
+ bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
+ uint16_t (*get_fw_version)(struct kgd_dev *kgd,
+ enum kgd_engine_type type);
};
bool kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4a78a773151c..bbdbe4721573 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
if (plane->state->crtc) {
- crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
+ crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
if (WARN_ON(!crtc_state))
return;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f5a5f18efa5b..4d79dad9d44f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
+ * This is the legacy version of drm_crtc_vblank_count().
+ *
* Returns:
* The software vblank counter.
*/
@@ -844,6 +846,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(drm_vblank_count);
/**
+ * drm_crtc_vblank_count - retrieve "cooked" vblank counter value
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ *
+ * This is the native KMS version of drm_vblank_count().
+ *
+ * Returns:
+ * The software vblank counter.
+ */
+u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
+{
+ return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_count);
+
+/**
* drm_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value.
*
@@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev,
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
+ *
+ * This is the legacy version of drm_crtc_send_vblank_event().
*/
void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e)
@@ -923,6 +946,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
EXPORT_SYMBOL(drm_send_vblank_event);
/**
+ * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ *
+ * This is the native KMS version of drm_send_vblank_event().
+ */
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e)
+{
+ drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+}
+EXPORT_SYMBOL(drm_crtc_send_vblank_event);
+
+/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
@@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
+ *
+ * This is the legacy version of drm_crtc_handle_vblank().
*/
bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
@@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
+
+/**
+ * drm_crtc_handle_vblank - handle a vblank event
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ *
+ * This is the native KMS version of drm_handle_vblank().
+ *
+ * Returns:
+ * True if the event was successfully handled, false on failure.
+ */
+bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
+{
+ return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_handle_vblank);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f990ab4c3efb..574057cd1d09 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -811,6 +811,8 @@ int i915_reset(struct drm_device *dev)
if (!i915.reset)
return 0;
+ intel_reset_gt_powersave(dev);
+
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
@@ -880,7 +882,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
- intel_reset_gt_powersave(dev);
+ intel_enable_gt_powersave(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
@@ -1584,7 +1586,7 @@ static struct drm_driver driver = {
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_dumb_map_offset,
+ .dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 63bcda5541ec..e9f891c432f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1756,8 +1756,6 @@ struct drm_i915_private {
*/
struct workqueue_struct *dp_wq;
- uint32_t bios_vgacntr;
-
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -2501,9 +2499,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
/**
* Returns true if seq1 is later than seq2.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a9faea626db..c11603b4cf1d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -401,7 +401,6 @@ static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
- bool dumb,
uint32_t *handle_p)
{
struct drm_i915_gem_object *obj;
@@ -417,7 +416,6 @@ i915_gem_create(struct drm_file *file,
if (obj == NULL)
return -ENOMEM;
- obj->base.dumb = dumb;
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
@@ -437,7 +435,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
- args->size, true, &args->handle);
+ args->size, &args->handle);
}
/**
@@ -450,7 +448,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev,
- args->size, false, &args->handle);
+ args->size, &args->handle);
}
static inline int
@@ -1050,6 +1048,7 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1069,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return -EFAULT;
}
+ intel_runtime_pm_get(dev_priv);
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- return ret;
+ goto put_rpm;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
@@ -1123,6 +1124,9 @@ out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
+put_rpm:
+ intel_runtime_pm_put(dev_priv);
+
return ret;
}
@@ -1840,10 +1844,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
drm_gem_free_mmap_offset(&obj->base);
}
-static int
+int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle, bool dumb,
+ uint32_t handle,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1860,13 +1864,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
- "Illegal dumb map of accelerated buffer.\n");
-
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
@@ -1891,15 +1888,6 @@ unlock:
return ret;
}
-int
-i915_gem_dumb_map_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
-{
- return i915_gem_mmap_gtt(file, dev, handle, true, offset);
-}
-
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
@@ -1921,7 +1909,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
static inline int
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d17ff435f276..d011ec82ef1e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -473,7 +473,12 @@ mi_set_context(struct intel_engine_cs *ring,
u32 hw_flags)
{
u32 flags = hw_flags | MI_MM_SPACE_GTT;
- int ret;
+ const int num_rings =
+ /* Use an extended w/a on ivb+ if signalling from other rings */
+ i915_semaphore_is_enabled(ring->dev) ?
+ hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+ 0;
+ int len, i, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
@@ -490,15 +495,31 @@ mi_set_context(struct intel_engine_cs *ring,
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
- ret = intel_ring_begin(ring, 6);
+
+ len = 4;
+ if (INTEL_INFO(ring->dev)->gen >= 7)
+ len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+
+ ret = intel_ring_begin(ring, len);
if (ret)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(ring->dev)->gen >= 7)
+ if (INTEL_INFO(ring->dev)->gen >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
- else
- intel_ring_emit(ring, MI_NOOP);
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_ring(signaller, to_i915(ring->dev), i) {
+ if (signaller == ring)
+ continue;
+
+ intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ }
+ }
+ }
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
@@ -510,10 +531,21 @@ mi_set_context(struct intel_engine_cs *ring,
*/
intel_ring_emit(ring, MI_NOOP);
- if (INTEL_INFO(ring->dev)->gen >= 7)
+ if (INTEL_INFO(ring->dev)->gen >= 7) {
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_ring(signaller, to_i915(ring->dev), i) {
+ if (signaller == ring)
+ continue;
+
+ intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ }
+ }
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
- else
- intel_ring_emit(ring, MI_NOOP);
+ }
intel_ring_advance(ring);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f06027ba3ee5..11738316394a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -121,9 +121,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
goto err;
}
- WARN_ONCE(obj->base.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->obj_exec_link, &objects);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 981834b0f9b6..d0d3dfbe6d2a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -281,10 +281,14 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_irq(&dev_priv->irq_lock);
+
WARN_ON(dev_priv->rps.pm_iir);
WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
+ I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
+ dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3307,8 +3311,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_INFO(dev)->gen >= 6) {
- pm_irqs |= dev_priv->pm_rps_events;
-
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS
+ * itself is enabled/disabled.
+ */
if (HAS_VEBOX(dev))
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
@@ -3520,7 +3526,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled.
+ */
+ GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -3609,7 +3619,7 @@ static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
vlv_display_irq_reset(dev_priv);
- dev_priv->irq_mask = 0;
+ dev_priv->irq_mask = ~0;
}
static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -3715,8 +3725,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if ((iir & flip_pending) == 0)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
-
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3726,6 +3734,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if (I915_READ16(ISR) & flip_pending)
goto check_page_flip;
+ intel_prepare_page_flip(dev, plane);
intel_finish_page_flip(dev, pipe);
return true;
@@ -3897,8 +3906,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
if ((iir & flip_pending) == 0)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
-
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3908,6 +3915,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
if (I915_READ(ISR) & flip_pending)
goto check_page_flip;
+ intel_prepare_page_flip(dev, plane);
intel_finish_page_flip(dev, pipe);
return true;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index eefdc238f70b..172de3b3433b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -395,6 +395,7 @@
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
+#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -1128,6 +1129,7 @@ enum punit_power_well {
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC 0
+#define RING_PSMI_CTL(base) ((base)+0x50)
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@@ -1458,6 +1460,7 @@ enum punit_power_well {
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
+#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fb3e3d429191..e2af1383b179 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13057,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev)
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
- /*
- * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
- * from S3 without preserving (some of?) the other bits.
- */
- I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
@@ -13146,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_shared_dpll_init(dev);
- /* save the BIOS value before clobbering it */
- dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1f4b56e273c8..964b28e3c630 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6191,6 +6191,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
valleyview_cleanup_gt_powersave(dev);
}
+static void gen6_suspend_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ /*
+ * TODO: disable RPS interrupts on GEN9+ too once RPS support
+ * is added for it.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_disable_rps_interrupts(dev);
+}
+
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev: drm device
@@ -6206,14 +6220,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- /*
- * TODO: disable RPS interrupts on GEN9+ too once RPS support
- * is added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_disable_rps_interrupts(dev);
+ gen6_suspend_rps(dev);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
@@ -6316,8 +6323,11 @@ void intel_reset_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ gen6_suspend_rps(dev);
dev_priv->rps.enabled = false;
- intel_enable_gt_powersave(dev);
}
static void ibx_init_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9f445e9a75d1..c7bc93d28d84 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
+
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index f5a78d53e297..ac6da7102fbb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
vlv_power_sequencer_reset(dev_priv);
}
-static void check_power_well_state(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
- if (power_well->always_on || !i915.disable_power_well) {
- if (!enabled)
- goto mismatch;
-
- return;
- }
-
- if (enabled != (power_well->count > 0))
- goto mismatch;
-
- return;
-
-mismatch:
- WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
- power_well->name, power_well->always_on, enabled,
- power_well->count, i915.disable_power_well);
-}
-
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
@@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
power_well->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
-
- check_power_well_state(dev_priv, power_well);
}
power_domains->domain_use_count[domain]++;
@@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
-
- check_power_well_state(dev_priv, power_well);
}
mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index aa873048308b..94a5bee69fe7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
drm_gem_object_unreference(gpu->memptrs_bo);
}
- if (gpu->pm4)
- release_firmware(gpu->pm4);
- if (gpu->pfp)
- release_firmware(gpu->pfp);
+ release_firmware(gpu->pm4);
+ release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index fbebb0405d76..b4e70e0e3cfa 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
uint32_t hpd_ctrl;
int i, ret;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+ }
+
ret = gpio_config(hdmi, true);
if (ret) {
dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
@@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
}
}
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_enable(hdmi->hpd_regs[i]);
- if (ret) {
- dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
- }
-
hdmi_set_mode(hdmi, false);
phy->funcs->reset(phy);
hdmi_set_mode(hdmi, true);
@@ -200,7 +200,7 @@ fail:
return ret;
}
-static int hdp_disable(struct hdmi_connector *hdmi_connector)
+static void hdp_disable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
@@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector)
hdmi_set_mode(hdmi, false);
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_disable(hdmi->hpd_regs[i]);
- if (ret) {
- dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
- }
-
for (i = 0; i < config->hpd_clk_cnt; i++)
clk_disable_unprepare(hdmi->hpd_clks[i]);
ret = gpio_config(hdmi, false);
- if (ret) {
- dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
- goto fail;
- }
-
- return 0;
+ if (ret)
+ dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
-fail:
- return ret;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret)
+ dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ }
}
static void
@@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector)
(hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
- DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
-
- /* ack the irq: */
+ /* ack & disable (temporarily) HPD events: */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
- hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
+ HDMI_HPD_INT_CTRL_INT_ACK);
+
+ DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
/* detect disconnect if we are connected or visa versa: */
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index a7672e100d8b..3449213f1e76 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_device *dev = crtc->dev;
-
DBG("%s: check", mdp4_crtc->name);
-
- if (mdp4_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
-
// TODO anything else to check?
-
return 0;
}
@@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: flush", mdp4_crtc->name);
+ DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
WARN_ON(mdp4_crtc->event);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 0e9a2e3a82d7..f021f960a8a2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -303,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
DBG("%s: check", mdp5_crtc->name);
- if (mdp5_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
-
/* request a free CTL, if none is already allocated for this CRTC */
if (state->enable && !mdp5_crtc->ctl) {
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
@@ -364,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: flush", mdp5_crtc->name);
+ DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
WARN_ON(mdp5_crtc->event);
@@ -460,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
/* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf);
mdp5_crtc->vblank.irqmask = intf2vblank(intf);
-
- /* when called from modeset_init(), skip the rest until later: */
- if (!mdp5_kms)
- return;
+ mdp_irq_update(&mdp5_kms->base);
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index a11f1b80c488..9f01a4f21af2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
goto fail;
}
- /* NOTE: the vsync and error irq's are actually associated with
- * the INTF/encoder.. the easiest way to deal with this (ie. what
- * we do now) is assume a fixed relationship between crtc's and
- * encoders. I'm not sure if there is ever a need to more freely
- * assign crtcs to encoders, but if there is then we need to take
- * care of error and vblank irq's that the crtc has registered,
- * and also update user-requested vblank_mask.
- */
- encoder->possible_crtcs = BIT(0);
- mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
-
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
priv->encoders[priv->num_encoders++] = encoder;
/* Construct bridge/connector for HDMI: */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 03455b64a245..2a731722d840 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms)
mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
}
-static void update_irq_unlocked(struct mdp_kms *mdp_kms)
+/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
+ * link changes, this must be called to figure out the new global irqmask
+ */
+void mdp_irq_update(struct mdp_kms *mdp_kms)
{
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
@@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
- update_irq_unlocked(mdp_kms);
+ mdp_irq_update(mdp_kms);
}
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
@@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
- update_irq_unlocked(mdp_kms);
+ mdp_irq_update(mdp_kms);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 99557b5ad4fd..b268ce95d394 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
-
+void mdp_irq_update(struct mdp_kms *mdp_kms);
/*
* pixel format helpers:
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0de412e13dc..191968256c58 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -23,10 +23,41 @@ struct msm_commit {
struct drm_atomic_state *state;
uint32_t fence;
struct msm_fence_cb fence_cb;
+ uint32_t crtc_mask;
};
static void fence_cb(struct msm_fence_cb *cb);
+/* block until specified crtcs are no longer pending update, and
+ * atomically mark them as pending update
+ */
+static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+{
+ int ret;
+
+ spin_lock(&priv->pending_crtcs_event.lock);
+ ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
+ !(priv->pending_crtcs & crtc_mask));
+ if (ret == 0) {
+ DBG("start: %08x", crtc_mask);
+ priv->pending_crtcs |= crtc_mask;
+ }
+ spin_unlock(&priv->pending_crtcs_event.lock);
+
+ return ret;
+}
+
+/* clear specified crtcs (no longer pending update)
+ */
+static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+{
+ spin_lock(&priv->pending_crtcs_event.lock);
+ DBG("end: %08x", crtc_mask);
+ priv->pending_crtcs &= ~crtc_mask;
+ wake_up_all_locked(&priv->pending_crtcs_event);
+ spin_unlock(&priv->pending_crtcs_event.lock);
+}
+
static struct msm_commit *new_commit(struct drm_atomic_state *state)
{
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
@@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c)
drm_atomic_helper_commit_post_planes(dev, state);
+ /* NOTE: _wait_for_vblanks() only waits for vblank on
+ * enabled CRTCs. So we end up faulting when disabling
+ * due to (potentially) unref'ing the outgoing fb's
+ * before the vblank when the disable has latched.
+ *
+ * But if it did wait on disabled (or newly disabled)
+ * CRTCs, that would be racy (ie. we could have missed
+ * the irq. We need some way to poll for pipe shut
+ * down. Or just live with occasionally hitting the
+ * timeout in the CRTC disable path (which really should
+ * not be critical path)
+ */
+
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
+ end_atomic(dev->dev_private, c->crtc_mask);
+
kfree(c);
}
@@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async)
{
- struct msm_commit *c;
int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ struct msm_commit *c;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev,
return ret;
c = new_commit(state);
+ if (!c)
+ return -ENOMEM;
+
+ /*
+ * Figure out what crtcs we have:
+ */
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ if (!crtc)
+ continue;
+ c->crtc_mask |= (1 << drm_crtc_index(crtc));
+ }
/*
* Figure out what fence to wait for:
@@ -122,6 +181,14 @@ int msm_atomic_commit(struct drm_device *dev,
}
/*
+ * Wait for pending updates on any of the same crtc's and then
+ * mark our set of crtc's as busy:
+ */
+ ret = start_atomic(dev->dev_private, c->crtc_mask);
+ if (ret)
+ return ret;
+
+ /*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c795217e1bfc..9a61546a0b05 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
priv->wq = alloc_ordered_workqueue("msm", 0);
init_waitqueue_head(&priv->fence_event);
+ init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 136303818436..b69ef2d5a26c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -96,6 +96,10 @@ struct msm_drm_private {
/* callbacks deferred until bo is inactive: */
struct list_head fence_cbs;
+ /* crtcs pending async atomic updates: */
+ uint32_t pending_crtcs;
+ wait_queue_head_t pending_crtcs_event;
+
/* registered MMUs: */
unsigned int num_mmus;
struct msm_mmu *mmus[NUM_DOMAINS];
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 94d55e526b4e..1f3af13ccede 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -190,8 +190,7 @@ fail_unlock:
fail:
if (ret) {
- if (fbi)
- framebuffer_release(fbi);
+ framebuffer_release(fbi);
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4a6f0e49d5b5..49dea4fb55ac 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_free_large(msm_obj->pages);
} else {
- if (msm_obj->vaddr)
- vunmap(msm_obj->vaddr);
+ vunmap(msm_obj->vaddr);
put_pages(obj);
}
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index ff2b434b3db4..760947e380c9 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -26,7 +26,7 @@
void
nvkm_event_put(struct nvkm_event *event, u32 types, int index)
{
- BUG_ON(!spin_is_locked(&event->refs_lock));
+ assert_spin_locked(&event->refs_lock);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (--event->refs[index * event->types_nr + type] == 0) {
@@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index)
void
nvkm_event_get(struct nvkm_event *event, u32 types, int index)
{
- BUG_ON(!spin_is_locked(&event->refs_lock));
+ assert_spin_locked(&event->refs_lock);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (++event->refs[index * event->types_nr + type] == 1) {
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c
index d1bcde55e9d7..839a32577680 100644
--- a/drivers/gpu/drm/nouveau/core/core/notify.c
+++ b/drivers/gpu/drm/nouveau/core/core/notify.c
@@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
struct nvkm_event *event = notify->event;
unsigned long flags;
- BUG_ON(!spin_is_locked(&event->list_lock));
+ assert_spin_locked(&event->list_lock);
BUG_ON(size != notify->size);
spin_lock_irqsave(&event->refs_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 674da1f095b2..732922690653 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
break;
+ case 0x106:
+ device->cname = "GK208B";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ break;
case 0x108:
device->cname = "GK208";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
index 5e58bba0dd5c..a7a890fad1e5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
@@ -44,8 +44,10 @@ static void
pramin_fini(void *data)
{
struct priv *priv = data;
- nv_wr32(priv->bios, 0x001700, priv->bar0);
- kfree(priv);
+ if (priv) {
+ nv_wr32(priv->bios, 0x001700, priv->bar0);
+ kfree(priv);
+ }
}
static void *
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
index 00f2ca7e44a5..033a8e999497 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
@@ -24,34 +24,71 @@
#include "nv50.h"
+struct nvaa_ram_priv {
+ struct nouveau_ram base;
+ u64 poller_base;
+};
+
static int
nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 datasize,
struct nouveau_object **pobject)
{
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ u32 rsvd_tail = (1024 * 1024); /* vbios etc */
struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nvaa_ram_priv *priv;
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
+ ret = nouveau_ram_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
if (ret)
return ret;
- ram->size = nv_rd32(pfb, 0x10020c);
- ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
+ priv->base.type = NV_MEM_TYPE_STOLEN;
+ priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+ priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12;
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
- (rsvd_head + rsvd_tail), 1);
+ rsvd_tail += 0x1000;
+ priv->poller_base = priv->base.size - rsvd_tail;
+
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12,
+ (priv->base.size - (rsvd_head + rsvd_tail)) >> 12,
+ 1);
if (ret)
return ret;
- ram->type = NV_MEM_TYPE_STOLEN;
- ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
- ram->get = nv50_ram_get;
- ram->put = nv50_ram_put;
+ priv->base.get = nv50_ram_get;
+ priv->base.put = nv50_ram_put;
+ return 0;
+}
+
+static int
+nvaa_ram_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvaa_ram_priv *priv = (void *)object;
+ int ret;
+ u64 dniso, hostnb, flush;
+
+ ret = nouveau_ram_init(&priv->base);
+ if (ret)
+ return ret;
+
+ dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
+ hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
+ flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+
+ /* Enable NISO poller for various clients and set their associated
+ * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
+ */
+ nv_wr32(pfb, 0x100c18, dniso);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
+ nv_wr32(pfb, 0x100c1c, hostnb);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
+ nv_wr32(pfb, 0x100c24, flush);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
+
return 0;
}
@@ -60,7 +97,7 @@ nvaa_ram_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvaa_ram_ctor,
.dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
+ .init = nvaa_ram_init,
.fini = _nouveau_ram_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
index a75c35ccf25c..165401c4045c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -24,13 +24,6 @@
#include "nv04.h"
-static void
-nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
-{
- struct nv04_mc_priv *priv = (void *)pmc;
- nv_wr08(priv, 0x088050, 0xff);
-}
-
struct nouveau_oclass *
nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x4c),
@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
.fini = _nouveau_mc_fini,
},
.intr = nv04_mc_intr,
- .msi_rearm = nv4c_mc_msi_rearm,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 21ec561edc99..bba2960d3dfb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
* so use the DMA API for them.
*/
if (!nv_device_is_cpu_coherent(device) &&
- ttm->caching_state == tt_uncached)
+ ttm->caching_state == tt_uncached) {
ttm_dma_unpopulate(ttm_dma, dev->dev);
+ return;
+ }
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 5d93902a91ab..f8042433752b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -876,7 +876,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
- bo->gem.dumb = true;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
drm_gem_object_unreference_unlocked(&bo->gem);
return ret;
@@ -892,14 +891,6 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
-
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(!(gem->dumb || gem->import_attach),
- "Illegal dumb map of accelerated buffer.\n");
-
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 28d51a22a4bf..bf0f9e21d714 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -36,7 +36,14 @@ void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
+ struct device *dev = drm->dev->dev;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0 && ret != -EACCES))
+ return;
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
/* reset filp so nouveau_bo_del_ttm() can test for it */
gem->filp = NULL;
ttm_bo_unref(&bo);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
int
@@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_vma *vma;
+ struct device *dev = drm->dev->dev;
int ret;
if (!cli->vm)
@@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0 && ret != -EACCES)
+ goto out;
+
ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
- if (ret) {
+ if (ret)
kfree(vma);
- goto out;
- }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
} else {
vma->refcount++;
}
@@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct device *dev = drm->dev->dev;
struct nouveau_vma *vma;
int ret;
@@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (vma) {
- if (--vma->refcount == 0)
- nouveau_gem_object_unmap(nvbo, vma);
+ if (--vma->refcount == 0) {
+ ret = pm_runtime_get_sync(dev);
+ if (!WARN_ON(ret < 0 && ret != -EACCES)) {
+ nouveau_gem_object_unmap(nvbo, vma);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+ }
}
ttm_bo_unreserve(&nvbo->bo);
}
@@ -444,9 +469,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- WARN_ONCE(nvbo->gem.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 753a6def61e7..3d1cfcb96b6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,6 +28,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
+#include "drm_legacy.h"
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
@@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ return drm_legacy_mmap(filp, vma);
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d59ec491dbb9..ed644a4f6f57 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
return pll;
}
/* otherwise, pick one of the plls */
- if ((rdev->family == CHIP_KAVERI) ||
- (rdev->family == CHIP_KABINI) ||
+ if ((rdev->family == CHIP_KABINI) ||
(rdev->family == CHIP_MULLINS)) {
- /* KB/KV/ML has PPLL1 and PPLL2 */
+ /* KB/ML has PPLL1 and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
@@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
- /* CI has PPLL0, PPLL1, and PPLL2 */
+ /* CI/KV has PPLL0, PPLL1, and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
@@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
case ATOM_PPLL0:
/* disable the ppll */
if ((rdev->family == CHIP_ARUBA) ||
+ (rdev->family == CHIP_KAVERI) ||
(rdev->family == CHIP_BONAIRE) ||
(rdev->family == CHIP_HAWAII))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 11ba9d21b89b..db42a670f995 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct radeon_connector_atom_dig *dig_connector;
int dp_clock;
+ if ((mode->clock > 340000) &&
+ (!radeon_connector_is_dp12_capable(connector)))
+ return MODE_CLOCK_HIGH;
+
if (!radeon_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index ba85986febea..03003f8a6de6 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2156,4 +2156,6 @@
#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
+#define IH_VMID_0_LUT 0x3D40u
+
#endif
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index 2fe8cfc966d9..bafdf92a5732 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 9b42001295ba..e3e9c10cfba9 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
if (radeon_bapm == -1) {
- /* There are stability issues reported on with
- * bapm enabled on an asrock system.
- */
- if (rdev->pdev->subsystem_vendor == 0x1849)
- pi->bapm_enable = false;
- else
+ /* only enable bapm on KB, ML by default */
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
pi->bapm_enable = true;
+ else
+ pi->bapm_enable = false;
} else if (radeon_bapm == 0) {
pi->bapm_enable = false;
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe48f229043e..a46f73737994 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -394,10 +394,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r;
}
-static int radeon_mode_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, bool dumb,
- uint64_t *offset_p)
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -406,14 +405,6 @@ static int radeon_mode_mmap(struct drm_file *filp,
if (gobj == NULL) {
return -ENOENT;
}
-
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
- "Illegal dumb map of GPU buffer.\n");
-
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
@@ -424,20 +415,12 @@ static int radeon_mode_mmap(struct drm_file *filp,
return 0;
}
-int radeon_mode_dumb_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
-{
- return radeon_mode_mmap(filp, dev, handle, true, offset_p);
-}
-
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct drm_radeon_gem_mmap *args = data;
- return radeon_mode_mmap(filp, dev, args->handle, false,
- &args->addr_ptr);
+ return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -763,7 +746,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle);
- gobj->dumb = true;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 065d02068ec3..8bf87f1203cc 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -28,6 +28,8 @@
#include "cikd.h"
#include "cik_reg.h"
#include "radeon_kfd.h"
+#include "radeon_ucode.h"
+#include <linux/firmware.h>
#define CIK_PIPE_PER_MEC (4)
@@ -49,6 +51,7 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd);
static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
/*
* Register access functions
@@ -69,7 +72,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
@@ -89,14 +92,16 @@ static const struct kfd2kgd_calls kfd2kgd = {
.init_memory = kgd_init_memory,
.init_pipeline = kgd_init_pipeline,
.hqd_load = kgd_hqd_load,
- .hqd_is_occupies = kgd_hqd_is_occupies,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
+ .get_fw_version = get_fw_version
};
static const struct kgd2kfd_calls *kgd2kfd;
bool radeon_kfd_init(void)
{
+#if defined(CONFIG_HSA_AMD_MODULE)
bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
const struct kgd2kfd_calls**);
@@ -113,6 +118,17 @@ bool radeon_kfd_init(void)
}
return true;
+#elif defined(CONFIG_HSA_AMD)
+ if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+ kgd2kfd = NULL;
+
+ return false;
+ }
+
+ return true;
+#else
+ return false;
+#endif
}
void radeon_kfd_fini(void)
@@ -374,6 +390,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
cpu_relax();
write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+ /* Mapping vmid to pasid also for IH block */
+ write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
+ pasid_mapping);
+
return 0;
}
@@ -513,7 +533,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id)
{
uint32_t act;
@@ -552,6 +572,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
if (timeout == 0) {
pr_err("kfd: cp queue preemption time out (%dms)\n",
temp);
+ release_queue(kgd);
return -ETIME;
}
msleep(20);
@@ -561,3 +582,52 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
release_queue(kgd);
return 0;
}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+ struct radeon_device *rdev = (struct radeon_device *) kgd;
+ const union radeon_firmware_header *hdr;
+
+ BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
+ break;
+
+ case KGD_ENGINE_ME:
+ hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
+ break;
+
+ case KGD_ENGINE_CE:
+ hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC1:
+ hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC2:
+ hdr = (const union radeon_firmware_header *)
+ rdev->mec2_fw->data;
+ break;
+
+ case KGD_ENGINE_RLC:
+ hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA:
+ hdr = (const union radeon_firmware_header *)
+ rdev->sdma_fw->data;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (hdr == NULL)
+ return 0;
+
+ /* Only 12 bit in use*/
+ return hdr->common.ucode_version;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d68223eb469..86fc56434b28 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -529,9 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
- WARN_ONCE(bo->gem_base.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
*
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 535403e0c8a2..15aee723db77 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
u32 format;
u32 *buffer;
const u8 __user *data;
- int size, dwords, tex_width, blit_width, spitch;
+ unsigned int size, dwords, tex_width, blit_width, spitch;
u32 height;
int i;
u32 texpitch, microtile;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3367960286a6..978993fa3a36 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
const struct tegra_dc_window *window)
{
unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
- unsigned long value;
+ unsigned long value, flags;
bool yuv, planar;
/*
@@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
else
bpp = planar ? 1 : 2;
+ spin_lock_irqsave(&dc->lock, flags);
+
value = WINDOW_A_SELECT << index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
+ spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
@@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_window_commit(dc, index);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
{
struct tegra_dc *dc = to_tegra_dc(plane->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
+ unsigned long flags;
u32 value;
if (!plane->crtc)
return 0;
+ spin_lock_irqsave(&dc->lock, flags);
+
value = WINDOW_A_SELECT << p->index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
tegra_dc_window_commit(dc, p->index);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0;
struct tegra_bo_tiling tiling;
+ unsigned long value, flags;
unsigned int format, swap;
- unsigned long value;
int err;
err = tegra_fb_get_tiling(fb, &tiling);
if (err < 0)
return err;
+ spin_lock_irqsave(&dc->lock, flags);
+
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
value = fb->offsets[0] + y * fb->pitches[0] +
@@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
+ spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
@@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
unsigned long flags, base;
struct tegra_bo *bo;
- if (!dc->event)
+ spin_lock_irqsave(&drm->event_lock, flags);
+
+ if (!dc->event) {
+ spin_unlock_irqrestore(&drm->event_lock, flags);
return;
+ }
bo = tegra_fb_get_plane(crtc->primary->fb, 0);
+ spin_lock_irqsave(&dc->lock, flags);
+
/* check if new start address has been latched */
+ tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
- spin_lock_irqsave(&drm->event_lock, flags);
- drm_send_vblank_event(drm, dc->pipe, dc->event);
- drm_vblank_put(drm, dc->pipe);
+ drm_crtc_send_vblank_event(crtc, dc->event);
+ drm_crtc_vblank_put(crtc);
dc->event = NULL;
- spin_unlock_irqrestore(&drm->event_lock, flags);
}
+
+ spin_unlock_irqrestore(&drm->event_lock, flags);
}
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
@@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
if (dc->event && dc->event->base.file_priv == file) {
dc->event->base.destroy(&dc->event->base);
- drm_vblank_put(drm, dc->pipe);
+ drm_crtc_vblank_put(crtc);
dc->event = NULL;
}
@@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
{
+ unsigned int pipe = drm_crtc_index(crtc);
struct tegra_dc *dc = to_tegra_dc(crtc);
- struct drm_device *drm = crtc->dev;
if (dc->event)
return -EBUSY;
if (event) {
- event->pipe = dc->pipe;
+ event->pipe = pipe;
dc->event = event;
- drm_vblank_get(drm, dc->pipe);
+ drm_crtc_vblank_get(crtc);
}
tegra_dc_set_base(dc, 0, 0, fb);
@@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/*
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
- drm_handle_vblank(dc->base.dev, dc->pipe);
+ drm_crtc_handle_vblank(&dc->base);
tegra_dc_finish_page_flip(dc);
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e549afeece1f..d4f827593dfa 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
+ unsigned int pipe)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (dc->pipe == pipe)
+ if (pipe == drm_crtc_index(crtc))
return crtc;
}
return NULL;
}
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+
+ if (!crtc)
+ return 0;
+
/* TODO: implement real hardware counter using syncpoints */
- return drm_vblank_count(dev, crtc);
+ return drm_crtc_vblank_count(crtc);
}
static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index da32086cbeaf..8777b7f75791 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
}
}
-static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
- size_t size)
+static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
+ struct scatterlist *s;
+ struct sg_table *sgt;
+ unsigned int i;
+
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
return PTR_ERR(bo->pages);
- bo->num_pages = size >> PAGE_SHIFT;
-
- bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
- if (IS_ERR(bo->sgt)) {
- drm_gem_put_pages(&bo->gem, bo->pages, false, false);
- return PTR_ERR(bo->sgt);
+ bo->num_pages = bo->gem.size >> PAGE_SHIFT;
+
+ sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+ if (IS_ERR(sgt))
+ goto put_pages;
+
+ /*
+ * Fake up the SG table so that dma_map_sg() can be used to flush the
+ * pages associated with it. Note that this relies on the fact that
+ * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
+ * only cache maintenance.
+ *
+ * TODO: Replace this by drm_clflash_sg() once it can be implemented
+ * without relying on symbols that are not exported.
+ */
+ for_each_sg(sgt->sgl, s, sgt->nents, i)
+ sg_dma_address(s) = sg_phys(s);
+
+ if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
+ sgt = ERR_PTR(-ENOMEM);
+ goto release_sgt;
}
+ bo->sgt = sgt;
+
return 0;
+
+release_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
+put_pages:
+ drm_gem_put_pages(&bo->gem, bo->pages, false, false);
+ return PTR_ERR(sgt);
}
-static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
- size_t size)
+static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
{
struct tegra_drm *tegra = drm->dev_private;
int err;
if (tegra->domain) {
- err = tegra_bo_get_pages(drm, bo, size);
+ err = tegra_bo_get_pages(drm, bo);
if (err < 0)
return err;
@@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
return err;
}
} else {
+ size_t size = bo->gem.size;
+
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
@@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
if (IS_ERR(bo))
return bo;
- err = tegra_bo_alloc(drm, bo, size);
+ err = tegra_bo_alloc(drm, bo);
if (err < 0)
goto release;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 230b6f887cd8..dfdc26970022 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -27,7 +27,8 @@ if HID
config HID_BATTERY_STRENGTH
bool "Battery level reporting for HID devices"
- depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+ depends on HID
+ select POWER_SUPPLY
default n
---help---
This option adds support of reporting battery strength (for HID devices
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c3d0ac1a0988..8b638792cb43 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1805,6 +1805,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7460f3402298..9243359c1821 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -526,6 +526,7 @@
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a
#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013
#define USB_VENDOR_ID_LABTEC 0x1020
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e0a0f06ac5ef..9505605b6e22 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{}
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index b92bf01a1ae8..158fcf577fae 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
break;
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
rdesc = mousepen_i608x_rdesc_fixed;
*rsize = sizeof(mousepen_i608x_rdesc_fixed);
@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
switch (id->product) {
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
ret = kye_tablet_enable(hdev);
if (ret) {
@@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index c917ab61aafa..5bc6d80d5be7 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -962,10 +962,24 @@ static int logi_dj_raw_event(struct hid_device *hdev,
switch (data[0]) {
case REPORT_ID_DJ_SHORT:
+ if (size != DJREPORT_SHORT_LENGTH) {
+ dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
+ return false;
+ }
return logi_dj_dj_event(hdev, report, data, size);
case REPORT_ID_HIDPP_SHORT:
- /* intentional fallthrough */
+ if (size != HIDPP_REPORT_SHORT_LENGTH) {
+ dev_err(&hdev->dev,
+ "Short HID++ report of bad size (%d)", size);
+ return false;
+ }
+ return logi_dj_hidpp_event(hdev, report, data, size);
case REPORT_ID_HIDPP_LONG:
+ if (size != HIDPP_REPORT_LONG_LENGTH) {
+ dev_err(&hdev->dev,
+ "Long HID++ report of bad size (%d)", size);
+ return false;
+ }
return logi_dj_hidpp_event(hdev, report, data, size);
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 2f420c0b6609..a93cefe0e522 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -282,6 +282,33 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
(report->rap.sub_id == 0x41);
}
+/**
+ * hidpp_prefix_name() prefixes the current given name with "Logitech ".
+ */
+static void hidpp_prefix_name(char **name, int name_length)
+{
+#define PREFIX_LENGTH 9 /* "Logitech " */
+
+ int new_length;
+ char *new_name;
+
+ if (name_length > PREFIX_LENGTH &&
+ strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0)
+ /* The prefix has is already in the name */
+ return;
+
+ new_length = PREFIX_LENGTH + name_length;
+ new_name = kzalloc(new_length, GFP_KERNEL);
+ if (!new_name)
+ return;
+
+ snprintf(new_name, new_length, "Logitech %s", *name);
+
+ kfree(*name);
+
+ *name = new_name;
+}
+
/* -------------------------------------------------------------------------- */
/* HIDP++ 1.0 commands */
/* -------------------------------------------------------------------------- */
@@ -321,6 +348,10 @@ static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev)
return NULL;
memcpy(name, &response.rap.params[2], len);
+
+ /* include the terminating '\0' */
+ hidpp_prefix_name(&name, len + 1);
+
return name;
}
@@ -498,6 +529,9 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp)
index += ret;
}
+ /* include the terminating '\0' */
+ hidpp_prefix_name(&name, __name_length + 1);
+
return name;
}
@@ -794,18 +828,25 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
switch (data[0]) {
case 0x02:
+ if (size < 2) {
+ hid_err(hdev, "Received HID report of bad size (%d)",
+ size);
+ return 1;
+ }
if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
input_event(wd->input, EV_KEY, BTN_LEFT,
!!(data[1] & 0x01));
input_event(wd->input, EV_KEY, BTN_RIGHT,
!!(data[1] & 0x02));
input_sync(wd->input);
+ return 0;
} else {
if (size < 21)
return 1;
return wtp_mouse_raw_xy_event(hidpp, &data[7]);
}
case REPORT_ID_HIDPP_LONG:
+ /* size is already checked in hidpp_raw_event. */
if ((report->fap.feature_index != wd->mt_feature_index) ||
(report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY))
return 1;
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 1a07e07d99a0..47d7e74231e5 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -35,6 +35,8 @@ static struct class *pyra_class;
static void profile_activated(struct pyra_device *pyra,
unsigned int new_profile)
{
+ if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
+ return;
pyra->actual_profile = new_profile;
pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
}
@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
if (off != 0 || count != PYRA_SIZE_SETTINGS)
return -EINVAL;
- mutex_lock(&pyra->pyra_lock);
-
settings = (struct pyra_settings const *)buf;
+ if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
+ return -EINVAL;
+
+ mutex_lock(&pyra->pyra_lock);
retval = pyra_set_settings(usb_dev, settings);
if (retval) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d32037cbf9db..d43e967e7533 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -706,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid)
static void i2c_hid_stop(struct hid_device *hid)
{
- struct i2c_client *client = hid->driver_data;
- struct i2c_hid *ihid = i2c_get_clientdata(client);
-
hid->claimed = 0;
-
- i2c_hid_free_buffers(ihid);
}
static int i2c_hid_open(struct hid_device *hid)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index dc89be90b35e..b27b3d33ebab 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -124,6 +124,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 8afa28e4570e..18d4b2c8fe55 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -28,6 +28,13 @@
#include <linux/cdev.h>
#include "input-compat.h"
+enum evdev_clock_type {
+ EV_CLK_REAL = 0,
+ EV_CLK_MONO,
+ EV_CLK_BOOT,
+ EV_CLK_MAX
+};
+
struct evdev {
int open;
struct input_handle handle;
@@ -49,12 +56,32 @@ struct evdev_client {
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
- int clkid;
+ int clk_type;
bool revoked;
unsigned int bufsize;
struct input_event buffer[];
};
+static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
+{
+ switch (clkid) {
+
+ case CLOCK_REALTIME:
+ client->clk_type = EV_CLK_REAL;
+ break;
+ case CLOCK_MONOTONIC:
+ client->clk_type = EV_CLK_MONO;
+ break;
+ case CLOCK_BOOTTIME:
+ client->clk_type = EV_CLK_BOOT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* flush queued events of type @type, caller must hold client->buffer_lock */
static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
{
@@ -108,8 +135,11 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
struct input_event ev;
ktime_t time;
- time = (client->clkid == CLOCK_MONOTONIC) ?
- ktime_get() : ktime_get_real();
+ time = client->clk_type == EV_CLK_REAL ?
+ ktime_get_real() :
+ client->clk_type == EV_CLK_MONO ?
+ ktime_get() :
+ ktime_get_boottime();
ev.time = ktime_to_timeval(time);
ev.type = EV_SYN;
@@ -159,7 +189,7 @@ static void __pass_event(struct evdev_client *client,
static void evdev_pass_values(struct evdev_client *client,
const struct input_value *vals, unsigned int count,
- ktime_t mono, ktime_t real)
+ ktime_t *ev_time)
{
struct evdev *evdev = client->evdev;
const struct input_value *v;
@@ -169,8 +199,7 @@ static void evdev_pass_values(struct evdev_client *client,
if (client->revoked)
return;
- event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
- mono : real);
+ event.time = ktime_to_timeval(ev_time[client->clk_type]);
/* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
@@ -198,21 +227,22 @@ static void evdev_events(struct input_handle *handle,
{
struct evdev *evdev = handle->private;
struct evdev_client *client;
- ktime_t time_mono, time_real;
+ ktime_t ev_time[EV_CLK_MAX];
- time_mono = ktime_get();
- time_real = ktime_mono_to_real(time_mono);
+ ev_time[EV_CLK_MONO] = ktime_get();
+ ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
+ ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
+ TK_OFFS_BOOT);
rcu_read_lock();
client = rcu_dereference(evdev->grab);
if (client)
- evdev_pass_values(client, vals, count, time_mono, time_real);
+ evdev_pass_values(client, vals, count, ev_time);
else
list_for_each_entry_rcu(client, &evdev->client_list, node)
- evdev_pass_values(client, vals, count,
- time_mono, time_real);
+ evdev_pass_values(client, vals, count, ev_time);
rcu_read_unlock();
}
@@ -877,10 +907,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
case EVIOCSCLOCKID:
if (copy_from_user(&i, p, sizeof(unsigned int)))
return -EFAULT;
- if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME)
- return -EINVAL;
- client->clkid = i;
- return 0;
+
+ return evdev_set_clk_type(client, i);
case EVIOCGKEYCODE:
return evdev_handle_get_keycode(dev, p);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 04217c2e345c..213e3a1903ee 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1974,18 +1974,22 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
- for (i = 0; i < ABS_CNT; i++) {
- if (test_bit(i, dev->absbit)) {
- if (input_is_mt_axis(i))
- events += mt_slots;
- else
- events++;
+ if (test_bit(EV_ABS, dev->evbit)) {
+ for (i = 0; i < ABS_CNT; i++) {
+ if (test_bit(i, dev->absbit)) {
+ if (input_is_mt_axis(i))
+ events += mt_slots;
+ else
+ events++;
+ }
}
}
- for (i = 0; i < REL_CNT; i++)
- if (test_bit(i, dev->relbit))
- events++;
+ if (test_bit(EV_REL, dev->evbit)) {
+ for (i = 0; i < REL_CNT; i++)
+ if (test_bit(i, dev->relbit))
+ events++;
+ }
/* Make room for KEY and MSC events */
events += 7;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 96ee26c555e0..a5d9b3f3c871 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -559,6 +559,7 @@ config KEYBOARD_SH_KEYSC
config KEYBOARD_STMPE
tristate "STMPE keypad support"
depends on MFD_STMPE
+ depends on OF
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad controller on STMPE I/O
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index d4dd78a7d56b..883d6aed5b9a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -35,9 +35,13 @@
struct gpio_button_data {
const struct gpio_keys_button *button;
struct input_dev *input;
- struct timer_list timer;
- struct work_struct work;
- unsigned int timer_debounce; /* in msecs */
+
+ struct timer_list release_timer;
+ unsigned int release_delay; /* in msecs, for IRQ-only buttons */
+
+ struct delayed_work work;
+ unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
+
unsigned int irq;
spinlock_t lock;
bool disabled;
@@ -116,11 +120,14 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
{
if (!bdata->disabled) {
/*
- * Disable IRQ and possible debouncing timer.
+ * Disable IRQ and associated timer/work structure.
*/
disable_irq(bdata->irq);
- if (bdata->timer_debounce)
- del_timer_sync(&bdata->timer);
+
+ if (gpio_is_valid(bdata->button->gpio))
+ cancel_delayed_work_sync(&bdata->work);
+ else
+ del_timer_sync(&bdata->release_timer);
bdata->disabled = true;
}
@@ -343,7 +350,7 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
static void gpio_keys_gpio_work_func(struct work_struct *work)
{
struct gpio_button_data *bdata =
- container_of(work, struct gpio_button_data, work);
+ container_of(work, struct gpio_button_data, work.work);
gpio_keys_gpio_report_event(bdata);
@@ -351,13 +358,6 @@ static void gpio_keys_gpio_work_func(struct work_struct *work)
pm_relax(bdata->input->dev.parent);
}
-static void gpio_keys_gpio_timer(unsigned long _data)
-{
- struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
-
- schedule_work(&bdata->work);
-}
-
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
{
struct gpio_button_data *bdata = dev_id;
@@ -366,11 +366,10 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
if (bdata->button->wakeup)
pm_stay_awake(bdata->input->dev.parent);
- if (bdata->timer_debounce)
- mod_timer(&bdata->timer,
- jiffies + msecs_to_jiffies(bdata->timer_debounce));
- else
- schedule_work(&bdata->work);
+
+ mod_delayed_work(system_wq,
+ &bdata->work,
+ msecs_to_jiffies(bdata->software_debounce));
return IRQ_HANDLED;
}
@@ -408,7 +407,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
input_event(input, EV_KEY, button->code, 1);
input_sync(input);
- if (!bdata->timer_debounce) {
+ if (!bdata->release_delay) {
input_event(input, EV_KEY, button->code, 0);
input_sync(input);
goto out;
@@ -417,9 +416,9 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
bdata->key_pressed = true;
}
- if (bdata->timer_debounce)
- mod_timer(&bdata->timer,
- jiffies + msecs_to_jiffies(bdata->timer_debounce));
+ if (bdata->release_delay)
+ mod_timer(&bdata->release_timer,
+ jiffies + msecs_to_jiffies(bdata->release_delay));
out:
spin_unlock_irqrestore(&bdata->lock, flags);
return IRQ_HANDLED;
@@ -429,10 +428,10 @@ static void gpio_keys_quiesce_key(void *data)
{
struct gpio_button_data *bdata = data;
- if (bdata->timer_debounce)
- del_timer_sync(&bdata->timer);
-
- cancel_work_sync(&bdata->work);
+ if (gpio_is_valid(bdata->button->gpio))
+ cancel_delayed_work_sync(&bdata->work);
+ else
+ del_timer_sync(&bdata->release_timer);
}
static int gpio_keys_setup_key(struct platform_device *pdev,
@@ -466,23 +465,25 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
button->debounce_interval * 1000);
/* use timer if gpiolib doesn't provide debounce */
if (error < 0)
- bdata->timer_debounce =
+ bdata->software_debounce =
button->debounce_interval;
}
- irq = gpio_to_irq(button->gpio);
- if (irq < 0) {
- error = irq;
- dev_err(dev,
- "Unable to get irq number for GPIO %d, error %d\n",
- button->gpio, error);
- return error;
+ if (button->irq) {
+ bdata->irq = button->irq;
+ } else {
+ irq = gpio_to_irq(button->gpio);
+ if (irq < 0) {
+ error = irq;
+ dev_err(dev,
+ "Unable to get irq number for GPIO %d, error %d\n",
+ button->gpio, error);
+ return error;
+ }
+ bdata->irq = irq;
}
- bdata->irq = irq;
- INIT_WORK(&bdata->work, gpio_keys_gpio_work_func);
- setup_timer(&bdata->timer,
- gpio_keys_gpio_timer, (unsigned long)bdata);
+ INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
isr = gpio_keys_gpio_isr;
irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
@@ -499,8 +500,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
return -EINVAL;
}
- bdata->timer_debounce = button->debounce_interval;
- setup_timer(&bdata->timer,
+ bdata->release_delay = button->debounce_interval;
+ setup_timer(&bdata->release_timer,
gpio_keys_irq_timer, (unsigned long)bdata);
isr = gpio_keys_irq_isr;
@@ -510,7 +511,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
input_set_capability(input, button->type ?: EV_KEY, button->code);
/*
- * Install custom action to cancel debounce timer and
+ * Install custom action to cancel release timer and
* workqueue item.
*/
error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata);
@@ -618,33 +619,30 @@ gpio_keys_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
- int gpio = -1;
enum of_gpio_flags flags;
button = &pdata->buttons[i++];
- if (!of_find_property(pp, "gpios", NULL)) {
- button->irq = irq_of_parse_and_map(pp, 0);
- if (button->irq == 0) {
- i--;
- pdata->nbuttons--;
- dev_warn(dev, "Found button without gpios or irqs\n");
- continue;
- }
- } else {
- gpio = of_get_gpio_flags(pp, 0, &flags);
- if (gpio < 0) {
- error = gpio;
+ button->gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (button->gpio < 0) {
+ error = button->gpio;
+ if (error != -ENOENT) {
if (error != -EPROBE_DEFER)
dev_err(dev,
"Failed to get gpio flags, error: %d\n",
error);
return ERR_PTR(error);
}
+ } else {
+ button->active_low = flags & OF_GPIO_ACTIVE_LOW;
}
- button->gpio = gpio;
- button->active_low = flags & OF_GPIO_ACTIVE_LOW;
+ button->irq = irq_of_parse_and_map(pp, 0);
+
+ if (!gpio_is_valid(button->gpio) && !button->irq) {
+ dev_err(dev, "Found button without gpios or irqs\n");
+ return ERR_PTR(-EINVAL);
+ }
if (of_property_read_u32(pp, "linux,code", &button->code)) {
dev_err(dev, "Button without keycode: 0x%x\n",
@@ -659,6 +657,8 @@ gpio_keys_get_devtree_pdata(struct device *dev)
button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+ button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
+
if (of_property_read_u32(pp, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 610a8af795a1..5b152f25a8e1 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -473,7 +473,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
@@ -482,7 +482,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
@@ -491,7 +491,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index ef5e67fb567e..fe6e3f22eed7 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -45,13 +45,14 @@
#define STMPE_KEYPAD_MAX_ROWS 8
#define STMPE_KEYPAD_MAX_COLS 8
#define STMPE_KEYPAD_ROW_SHIFT 3
-#define STMPE_KEYPAD_KEYMAP_SIZE \
+#define STMPE_KEYPAD_KEYMAP_MAX_SIZE \
(STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS)
/**
* struct stmpe_keypad_variant - model-specific attributes
* @auto_increment: whether the KPC_DATA_BYTE register address
* auto-increments on multiple read
+ * @set_pullup: whether the pins need to have their pull-ups set
* @num_data: number of data bytes
* @num_normal_data: number of normal keys' data bytes
* @max_cols: maximum number of columns supported
@@ -61,6 +62,7 @@
*/
struct stmpe_keypad_variant {
bool auto_increment;
+ bool set_pullup;
int num_data;
int num_normal_data;
int max_cols;
@@ -81,6 +83,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
[STMPE2401] = {
.auto_increment = false,
+ .set_pullup = true,
.num_data = 3,
.num_normal_data = 2,
.max_cols = 8,
@@ -90,6 +93,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
[STMPE2403] = {
.auto_increment = true,
+ .set_pullup = true,
.num_data = 5,
.num_normal_data = 3,
.max_cols = 8,
@@ -99,16 +103,30 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
};
+/**
+ * struct stmpe_keypad - STMPE keypad state container
+ * @stmpe: pointer to parent STMPE device
+ * @input: spawned input device
+ * @variant: STMPE variant
+ * @debounce_ms: debounce interval, in ms. Maximum is
+ * %STMPE_KEYPAD_MAX_DEBOUNCE.
+ * @scan_count: number of key scanning cycles to confirm key data.
+ * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
+ * @no_autorepeat: disable key autorepeat
+ * @rows: bitmask for the rows
+ * @cols: bitmask for the columns
+ * @keymap: the keymap
+ */
struct stmpe_keypad {
struct stmpe *stmpe;
struct input_dev *input;
const struct stmpe_keypad_variant *variant;
- const struct stmpe_keypad_platform_data *plat;
-
+ unsigned int debounce_ms;
+ unsigned int scan_count;
+ bool no_autorepeat;
unsigned int rows;
unsigned int cols;
-
- unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
+ unsigned short keymap[STMPE_KEYPAD_KEYMAP_MAX_SIZE];
};
static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
@@ -171,7 +189,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
unsigned int col_gpios = variant->col_gpios;
unsigned int row_gpios = variant->row_gpios;
struct stmpe *stmpe = keypad->stmpe;
+ u8 pureg = stmpe->regs[STMPE_IDX_GPPUR_LSB];
unsigned int pins = 0;
+ unsigned int pu_pins = 0;
+ int ret;
int i;
/*
@@ -188,8 +209,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
for (i = 0; i < variant->max_cols; i++) {
int num = __ffs(col_gpios);
- if (keypad->cols & (1 << i))
+ if (keypad->cols & (1 << i)) {
pins |= 1 << num;
+ pu_pins |= 1 << num;
+ }
col_gpios &= ~(1 << num);
}
@@ -203,20 +226,43 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
row_gpios &= ~(1 << num);
}
- return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+ ret = stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+ if (ret)
+ return ret;
+
+ /*
+ * On STMPE24xx, set pin bias to pull-up on all keypad input
+ * pins (columns), this incidentally happen to be maximum 8 pins
+ * and placed at GPIO0-7 so only the LSB of the pull up register
+ * ever needs to be written.
+ */
+ if (variant->set_pullup) {
+ u8 val;
+
+ ret = stmpe_reg_read(stmpe, pureg);
+ if (ret)
+ return ret;
+
+ /* Do not touch unused pins, may be used for GPIO */
+ val = ret & ~pu_pins;
+ val |= pu_pins;
+
+ ret = stmpe_reg_write(stmpe, pureg, val);
+ }
+
+ return 0;
}
static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
{
- const struct stmpe_keypad_platform_data *plat = keypad->plat;
const struct stmpe_keypad_variant *variant = keypad->variant;
struct stmpe *stmpe = keypad->stmpe;
int ret;
- if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
+ if (keypad->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
return -EINVAL;
- if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
+ if (keypad->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
return -EINVAL;
ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
@@ -245,7 +291,7 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB,
STMPE_KPC_CTRL_MSB_SCAN_COUNT,
- plat->scan_count << 4);
+ keypad->scan_count << 4);
if (ret < 0)
return ret;
@@ -253,17 +299,18 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
STMPE_KPC_CTRL_LSB_SCAN |
STMPE_KPC_CTRL_LSB_DEBOUNCE,
STMPE_KPC_CTRL_LSB_SCAN |
- (plat->debounce_ms << 1));
+ (keypad->debounce_ms << 1));
}
-static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
+static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad,
+ u32 used_rows, u32 used_cols)
{
int row, col;
- for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
- for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
+ for (row = 0; row < used_rows; row++) {
+ for (col = 0; col < used_cols; col++) {
int code = MATRIX_SCAN_CODE(row, col,
- STMPE_KEYPAD_ROW_SHIFT);
+ STMPE_KEYPAD_ROW_SHIFT);
if (keypad->keymap[code] != KEY_RESERVED) {
keypad->rows |= 1 << row;
keypad->cols |= 1 << col;
@@ -272,51 +319,17 @@ static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
}
}
-#ifdef CONFIG_OF
-static const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct stmpe_keypad_platform_data *plat;
-
- if (!np)
- return ERR_PTR(-ENODEV);
-
- plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
- if (!plat)
- return ERR_PTR(-ENOMEM);
-
- of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
- of_property_read_u32(np, "st,scan-count", &plat->scan_count);
-
- plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
-
- return plat;
-}
-#else
-static inline const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif
-
static int stmpe_keypad_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
- const struct stmpe_keypad_platform_data *plat;
+ struct device_node *np = pdev->dev.of_node;
struct stmpe_keypad *keypad;
struct input_dev *input;
+ u32 rows;
+ u32 cols;
int error;
int irq;
- plat = stmpe->pdata->keypad;
- if (!plat) {
- plat = stmpe_keypad_of_probe(&pdev->dev);
- if (IS_ERR(plat))
- return PTR_ERR(plat);
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -326,6 +339,13 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
if (!keypad)
return -ENOMEM;
+ keypad->stmpe = stmpe;
+ keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
+
+ of_property_read_u32(np, "debounce-interval", &keypad->debounce_ms);
+ of_property_read_u32(np, "st,scan-count", &keypad->scan_count);
+ keypad->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
+
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
@@ -334,23 +354,22 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
- error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
- STMPE_KEYPAD_MAX_ROWS,
- STMPE_KEYPAD_MAX_COLS,
+ error = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols);
+ if (error)
+ return error;
+
+ error = matrix_keypad_build_keymap(NULL, NULL, rows, cols,
keypad->keymap, input);
if (error)
return error;
input_set_capability(input, EV_MSC, MSC_SCAN);
- if (!plat->no_autorepeat)
+ if (!keypad->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- stmpe_keypad_fill_used_pins(keypad);
+ stmpe_keypad_fill_used_pins(keypad, rows, cols);
- keypad->stmpe = stmpe;
- keypad->plat = plat;
keypad->input = input;
- keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
error = stmpe_keypad_chip_init(keypad);
if (error < 0)
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d125a019383f..d88d73d83552 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -881,6 +881,34 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
unsigned char *pkt,
unsigned char pkt_id)
{
+ /*
+ * packet-fmt b7 b6 b5 b4 b3 b2 b1 b0
+ * Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0
+ * Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0
+ * Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
+ * Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
+ * Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0
+ * Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
+ * Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1
+ * Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0
+ * Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
+ * Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0
+ * L: Left button
+ * R / M: Non-clickpads: Right / Middle button
+ * Clickpads: When > 2 fingers are down, and some fingers
+ * are in the button area, then the 2 coordinates reported
+ * are for fingers outside the button area and these report
+ * extra fingers being present in the right / left button
+ * area. Note these fingers are not added to the F field!
+ * so if a TWO packet is received and R = 1 then there are
+ * 3 fingers down, etc.
+ * TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt
+ * 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
+ * otherwise byte 0 bit 4 must be set and byte 0/4/5 are
+ * in NEW fmt
+ * F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
+ */
+
mt[0].x = ((pkt[2] & 0x80) << 4);
mt[0].x |= ((pkt[2] & 0x3F) << 5);
mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -919,18 +947,21 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
static int alps_get_mt_count(struct input_mt_pos *mt)
{
- int i;
+ int i, fingers = 0;
- for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++)
- /* empty */;
+ for (i = 0; i < MAX_TOUCHES; i++) {
+ if (mt[i].x != 0 || mt[i].y != 0)
+ fingers++;
+ }
- return i;
+ return fingers;
}
static int alps_decode_packet_v7(struct alps_fields *f,
unsigned char *p,
struct psmouse *psmouse)
{
+ struct alps_data *priv = psmouse->private;
unsigned char pkt_id;
pkt_id = alps_get_packet_id_v7(p);
@@ -938,19 +969,52 @@ static int alps_decode_packet_v7(struct alps_fields *f,
return 0;
if (pkt_id == V7_PACKET_ID_UNKNOWN)
return -1;
+ /*
+ * NEW packets are send to indicate a discontinuity in the finger
+ * coordinate reporting. Specifically a finger may have moved from
+ * slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for
+ * us.
+ *
+ * NEW packets have 3 problems:
+ * 1) They do not contain middle / right button info (on non clickpads)
+ * this can be worked around by preserving the old button state
+ * 2) They do not contain an accurate fingercount, and they are
+ * typically send when the number of fingers changes. We cannot use
+ * the old finger count as that may mismatch with the amount of
+ * touch coordinates we've available in the NEW packet
+ * 3) Their x data for the second touch is inaccurate leading to
+ * a possible jump of the x coordinate by 16 units when the first
+ * non NEW packet comes in
+ * Since problems 2 & 3 cannot be worked around, just ignore them.
+ */
+ if (pkt_id == V7_PACKET_ID_NEW)
+ return 1;
alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
- if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) {
- f->left = (p[0] & 0x80) >> 7;
+ if (pkt_id == V7_PACKET_ID_TWO)
+ f->fingers = alps_get_mt_count(f->mt);
+ else /* pkt_id == V7_PACKET_ID_MULTI */
+ f->fingers = 3 + (p[5] & 0x03);
+
+ f->left = (p[0] & 0x80) >> 7;
+ if (priv->flags & ALPS_BUTTONPAD) {
+ if (p[0] & 0x20)
+ f->fingers++;
+ if (p[0] & 0x10)
+ f->fingers++;
+ } else {
f->right = (p[0] & 0x20) >> 5;
f->middle = (p[0] & 0x10) >> 4;
}
- if (pkt_id == V7_PACKET_ID_TWO)
- f->fingers = alps_get_mt_count(f->mt);
- else if (pkt_id == V7_PACKET_ID_MULTI)
- f->fingers = 3 + (p[5] & 0x03);
+ /* Sometimes a single touch is reported in mt[1] rather then mt[0] */
+ if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) {
+ f->mt[0].x = f->mt[1].x;
+ f->mt[0].y = f->mt[1].y;
+ f->mt[1].x = 0;
+ f->mt[1].y = 0;
+ }
return 0;
}
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 30c8b6998808..354d47ecd66a 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -227,6 +227,7 @@ TRACKPOINT_INT_ATTR(thresh, TP_THRESH, TP_DEF_THRESH);
TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH);
TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
+TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
TP_DEF_PTSON);
@@ -246,6 +247,7 @@ static struct attribute *trackpoint_attrs[] = {
&psmouse_attr_upthresh.dattr.attr,
&psmouse_attr_ztime.dattr.attr,
&psmouse_attr_jenks.dattr.attr,
+ &psmouse_attr_drift_time.dattr.attr,
&psmouse_attr_press_to_select.dattr.attr,
&psmouse_attr_skipback.dattr.attr,
&psmouse_attr_ext_dev.dattr.attr,
@@ -312,6 +314,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks);
+ TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, drift_time);
/* toggles */
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select);
@@ -332,6 +335,7 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks);
+ TRACKPOINT_SET_POWER_ON_DEFAULT(tp, drift_time);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia);
/* toggles */
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index ecd0547964a5..5617ed3a7d7a 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -70,6 +70,9 @@
#define TP_UP_THRESH 0x5A /* Used to generate a 'click' on Z-axis */
#define TP_Z_TIME 0x5E /* How sharp of a press */
#define TP_JENKS_CURV 0x5D /* Minimum curvature for double click */
+#define TP_DRIFT_TIME 0x5F /* How long a 'hands off' condition */
+ /* must last (x*107ms) for drift */
+ /* correction to occur */
/*
* Toggling Flag bits
@@ -120,6 +123,7 @@
#define TP_DEF_UP_THRESH 0xFF
#define TP_DEF_Z_TIME 0x26
#define TP_DEF_JENKS_CURV 0x87
+#define TP_DEF_DRIFT_TIME 0x05
/* Toggles */
#define TP_DEF_MB 0x00
@@ -137,6 +141,7 @@ struct trackpoint_data
unsigned char draghys, mindrag;
unsigned char thresh, upthresh;
unsigned char ztime, jenks;
+ unsigned char drift_time;
/* toggles */
unsigned char press_to_select;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index bb070206223c..95ee92a91bd2 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -99,13 +99,9 @@
#define MXT_T6_STATUS_COMSERR (1 << 2)
/* MXT_GEN_POWER_T7 field */
-struct t7_config {
- u8 idle;
- u8 active;
-} __packed;
-
-#define MXT_POWER_CFG_RUN 0
-#define MXT_POWER_CFG_DEEPSLEEP 1
+#define MXT_POWER_IDLEACQINT 0
+#define MXT_POWER_ACTVACQINT 1
+#define MXT_POWER_ACTV2IDLETO 2
/* MXT_GEN_ACQUIRE_T8 field */
#define MXT_ACQUIRE_CHRGTIME 0
@@ -117,6 +113,7 @@ struct t7_config {
#define MXT_ACQUIRE_ATCHCALSTHR 7
/* MXT_TOUCH_MULTI_T9 field */
+#define MXT_TOUCH_CTRL 0
#define MXT_T9_ORIENT 9
#define MXT_T9_RANGE 18
@@ -256,7 +253,6 @@ struct mxt_data {
bool update_input;
u8 last_message_count;
u8 num_touchids;
- struct t7_config t7_cfg;
/* Cached parameters from object table */
u16 T5_address;
@@ -672,6 +668,20 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
data->t6_status = status;
}
+static int mxt_write_object(struct mxt_data *data,
+ u8 type, u8 offset, u8 val)
+{
+ struct mxt_object *object;
+ u16 reg;
+
+ object = mxt_get_object(data, type);
+ if (!object || offset >= mxt_obj_size(object))
+ return -EINVAL;
+
+ reg = object->start_address;
+ return mxt_write_reg(data->client, reg + offset, val);
+}
+
static void mxt_input_button(struct mxt_data *data, u8 *message)
{
struct input_dev *input = data->input_dev;
@@ -1742,60 +1752,6 @@ err_free_object_table:
return error;
}
-static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
-{
- struct device *dev = &data->client->dev;
- int error;
- struct t7_config *new_config;
- struct t7_config deepsleep = { .active = 0, .idle = 0 };
-
- if (sleep == MXT_POWER_CFG_DEEPSLEEP)
- new_config = &deepsleep;
- else
- new_config = &data->t7_cfg;
-
- error = __mxt_write_reg(data->client, data->T7_address,
- sizeof(data->t7_cfg), new_config);
- if (error)
- return error;
-
- dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
- new_config->active, new_config->idle);
-
- return 0;
-}
-
-static int mxt_init_t7_power_cfg(struct mxt_data *data)
-{
- struct device *dev = &data->client->dev;
- int error;
- bool retry = false;
-
-recheck:
- error = __mxt_read_reg(data->client, data->T7_address,
- sizeof(data->t7_cfg), &data->t7_cfg);
- if (error)
- return error;
-
- if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
- if (!retry) {
- dev_dbg(dev, "T7 cfg zero, resetting\n");
- mxt_soft_reset(data);
- retry = true;
- goto recheck;
- } else {
- dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
- data->t7_cfg.active = 20;
- data->t7_cfg.idle = 100;
- return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
- }
- }
-
- dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
- data->t7_cfg.active, data->t7_cfg.idle);
- return 0;
-}
-
static int mxt_configure_objects(struct mxt_data *data,
const struct firmware *cfg)
{
@@ -1809,12 +1765,6 @@ static int mxt_configure_objects(struct mxt_data *data,
dev_warn(dev, "Error %d updating config\n", error);
}
- error = mxt_init_t7_power_cfg(data);
- if (error) {
- dev_err(dev, "Failed to initialize power cfg\n");
- return error;
- }
-
error = mxt_initialize_t9_input_device(data);
if (error)
return error;
@@ -2093,15 +2043,16 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
- mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
-
- /* Recalibrate since chip has been in deep sleep */
- mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+ /* Touch enable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
}
static void mxt_stop(struct mxt_data *data)
{
- mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+ /* Touch disable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
}
static int mxt_input_open(struct input_dev *dev)
@@ -2266,6 +2217,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
struct mxt_data *data = i2c_get_clientdata(client);
struct input_dev *input_dev = data->input_dev;
+ mxt_soft_reset(data);
+
mutex_lock(&input_dev->mutex);
if (input_dev->users)
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 3793fcc7e5db..d4c24fb7704f 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -850,9 +850,11 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
}
#define EDT_ATTR_CHECKSET(name, reg) \
+do { \
if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \
pdata->name <= edt_ft5x06_attr_##name.limit_high) \
- edt_ft5x06_register_write(tsdata, reg, pdata->name)
+ edt_ft5x06_register_write(tsdata, reg, pdata->name); \
+} while (0)
#define EDT_GET_PROP(name, reg) { \
u32 val; \
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1232336b960e..40dfbc0444c0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb,
if (action != BUS_NOTIFY_REMOVED_DEVICE)
return 0;
- /*
- * If the device is still attached to a device driver we can't
- * tear down the domain yet as DMA mappings may still be in use.
- * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
- */
- if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
- return 0;
-
domain = find_domain(dev);
if (!domain)
return 0;
@@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
domain_remove_one_dev_info(old_domain, dev);
else
domain_remove_dev_info(old_domain);
+
+ if (!domain_type_is_vm_or_si(old_domain) &&
+ list_empty(&old_domain->devices))
+ domain_exit(old_domain);
}
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 68dfb0fd5ee9..748693192c20 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
static u64 ipmmu_page_prot(unsigned int prot, u64 type)
{
- u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
+ u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
| ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
| ARM_VMSA_PTE_NS | type;
@@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type)
if (prot & IOMMU_CACHE)
pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
- if (prot & IOMMU_EXEC)
- pgprot &= ~ARM_VMSA_PTE_XN;
+ if (prot & IOMMU_NOEXEC)
+ pgprot |= ARM_VMSA_PTE_XN;
else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
/* If no access create a faulting entry to avoid TLB fills. */
pgprot &= ~ARM_VMSA_PTE_PAGE;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index b2023af384b9..6a8b1ec4a48a 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = {
.remove = rk_iommu_remove,
.driver = {
.name = "rk_iommu",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rk_iommu_dt_ids),
},
};
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 8735543eacdb..493478989dbd 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1127,6 +1127,24 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
schedule_zero(tc, virt_block, data_dest, cell, bio);
}
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
+static void check_for_space(struct pool *pool)
+{
+ int r;
+ dm_block_t nr_free;
+
+ if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
+ return;
+
+ r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
+ if (r)
+ return;
+
+ if (nr_free)
+ set_pool_mode(pool, PM_WRITE);
+}
+
/*
* A non-zero return indicates read_only or fail_io mode.
* Many callers don't care about the return value.
@@ -1141,6 +1159,8 @@ static int commit(struct pool *pool)
r = dm_pool_commit_metadata(pool->pmd);
if (r)
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
+ else
+ check_for_space(pool);
return r;
}
@@ -1159,8 +1179,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
}
}
-static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
-
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
int r;
@@ -2155,7 +2173,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
pool->process_cell = process_cell_read_only;
pool->process_discard_cell = process_discard_cell;
pool->process_prepared_mapping = process_prepared_mapping;
- pool->process_prepared_discard = process_prepared_discard_passdown;
+ pool->process_prepared_discard = process_prepared_discard;
if (!pool->pf.error_if_no_space && no_space_timeout)
queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
@@ -3814,6 +3832,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -EINVAL;
goto bad;
}
+ atomic_set(&tc->refcount, 1);
+ init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
spin_unlock_irqrestore(&tc->pool->lock, flags);
/*
@@ -3826,9 +3846,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_put(pool_md);
- atomic_set(&tc->refcount, 1);
- init_completion(&tc->can_destroy);
-
return 0;
bad:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4c06585bf165..b98cd9d84435 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -899,7 +899,7 @@ static void disable_write_same(struct mapped_device *md)
static void clone_endio(struct bio *bio, int error)
{
- int r = 0;
+ int r = error;
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index e2f9df1c0c36..2d7fae94c861 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -519,6 +519,7 @@ static const u8 stmpe1601_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB,
+ [STMPE_IDX_GPPUR_LSB] = STMPE1601_REG_GPIO_PU_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
@@ -667,6 +668,7 @@ static const u8 stmpe1801_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW,
[STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW,
[STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW,
+ [STMPE_IDX_GPPUR_LSB] = STMPE1801_REG_GPIO_PULL_UP_LOW,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
[STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
};
@@ -750,6 +752,8 @@ static const u8 stmpe24xx_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB,
+ [STMPE_IDX_GPPUR_LSB] = STMPE24XX_REG_GPPUR_LSB,
+ [STMPE_IDX_GPPDR_LSB] = STMPE24XX_REG_GPPDR_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index bee0abf82040..84adb46b3e2f 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -188,6 +188,7 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE1601_REG_GPIO_ED_MSB 0x8A
#define STMPE1601_REG_GPIO_RE_LSB 0x8D
#define STMPE1601_REG_GPIO_FE_LSB 0x8F
+#define STMPE1601_REG_GPIO_PU_LSB 0x91
#define STMPE1601_REG_GPIO_AF_U_MSB 0x92
#define STMPE1601_SYS_CTRL_ENABLE_GPIO (1 << 3)
@@ -276,6 +277,8 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE24XX_REG_GPEDR_MSB 0x8C
#define STMPE24XX_REG_GPRER_LSB 0x91
#define STMPE24XX_REG_GPFER_LSB 0x94
+#define STMPE24XX_REG_GPPUR_LSB 0x97
+#define STMPE24XX_REG_GPPDR_LSB 0x9a
#define STMPE24XX_REG_GPAFR_U_MSB 0x9B
#define STMPE24XX_SYS_CTRL_ENABLE_GPIO (1 << 3)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 02ad79229f65..7466ce098e60 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -886,7 +886,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
unsigned idx, bus_width = 0;
int err = 0;
- if (!mmc_can_ext_csd(card) &&
+ if (!mmc_can_ext_csd(card) ||
!(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 184c434ae305..0dceba1a2ba1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1648,7 +1648,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
- netdev_err(bond_dev, "cannot release %s\n",
+ netdev_dbg(bond_dev, "cannot release %s\n",
slave_dev->name);
return -EINVAL;
}
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index a5fefb9059c5..b306210b02b7 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -257,7 +257,6 @@ static int cfv_rx_poll(struct napi_struct *napi, int quota)
struct vringh_kiov *riov = &cfv->ctx.riov;
unsigned int skb_len;
-again:
do {
skb = NULL;
@@ -322,7 +321,6 @@ exit:
napi_schedule_prep(napi)) {
vringh_notify_disable_kern(cfv->vr_rx);
__napi_schedule(napi);
- goto again;
}
break;
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 89c8d9fc97de..57e97910c728 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -246,13 +246,13 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
- return -ENODEV;
+ goto err_out;
}
if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
NE_IO_EXTENT, ioaddr);
- return -EBUSY;
+ goto err_out;
}
reg0 = inb(ioaddr);
@@ -392,6 +392,8 @@ err_out_free_netdev:
free_netdev (dev);
err_out_free_res:
release_region (ioaddr, NE_IO_EXTENT);
+err_out:
+ pci_disable_device(pdev);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index df76050d0a9d..eadcb053807e 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -156,18 +156,6 @@ source "drivers/net/ethernet/realtek/Kconfig"
source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
-
-config S6GMAC
- tristate "S6105 GMAC ethernet support"
- depends on XTENSA_VARIANT_S6000
- select PHYLIB
- ---help---
- This driver supports the on chip ethernet device on the
- S6105 xtensa processor.
-
- To compile this driver as a module, choose M here. The module
- will be called s6gmac.
-
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index bf56f8b36e90..1367afcd0a8b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -66,7 +66,6 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
-obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 1fcd5568a352..f3470d96837a 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev)
}
db->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(db->clk))
+ if (IS_ERR(db->clk)) {
+ ret = PTR_ERR(db->clk);
goto out;
+ }
clk_prepare_enable(db->clk);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 3498760dc22a..760c72c6e2ac 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1170,10 +1170,6 @@ tx_request_irq_error:
init_error:
free_skbufs(dev);
alloc_skbuf_error:
- if (priv->phydev) {
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
phy_error:
return ret;
}
@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev)
int ret;
unsigned long int flags;
- /* Stop and disconnect the PHY */
- if (priv->phydev) {
+ /* Stop the PHY */
+ if (priv->phydev)
phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1525,6 +1518,10 @@ err_free_netdev:
static int altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 9f5e38769a29..72eef9fc883e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12553,9 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev,
return 0;
}
-static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
static const struct net_device_ops bnx2x_netdev_ops = {
@@ -12589,7 +12591,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#endif
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
- .ndo_gso_check = bnx2x_gso_check,
+ .ndo_features_check = bnx2x_features_check,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bb48a610b72a..553dcd8a9df2 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17800,23 +17800,6 @@ static int tg3_init_one(struct pci_dev *pdev,
goto err_out_apeunmap;
}
- /*
- * Reset chip in case UNDI or EFI driver did not shutdown
- * DMA self test will enable WDMAC and we'll see (spurious)
- * pending DMA on the PCI bus at that point.
- */
- if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
- (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- }
-
- err = tg3_test_dma(tp);
- if (err) {
- dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
- goto err_out_apeunmap;
- }
-
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
@@ -17861,6 +17844,23 @@ static int tg3_init_one(struct pci_dev *pdev,
sndmbx += 0xc;
}
+ /*
+ * Reset chip in case UNDI or EFI driver did not shutdown
+ * DMA self test will enable WDMAC and we'll see (spurious)
+ * pending DMA on the PCI bus at that point.
+ */
+ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ }
+
+ err = tg3_test_dma(tp);
+ if (err) {
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+ goto err_out_apeunmap;
+ }
+
tg3_init_coal(tp);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7d6aa8c87df8..619083a860a4 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -172,7 +172,7 @@ bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
/* Retrieve flash partition info */
fcomp.comp_status = 0;
- init_completion(&fcomp.comp);
+ reinit_completion(&fcomp.comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
bnad_cb_completion, &fcomp);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index d00a751f0588..6049f70e110c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -96,6 +96,9 @@ struct port_info {
s16 xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
u8 pidx; /* index into adapter port[] */
+ s8 mdio_addr;
+ u8 port_type; /* firmware port type */
+ u8 mod_type; /* firmware module type */
u8 port_id; /* physical port ID */
u8 nqsets; /* # of "Queue Sets" */
u8 first_qset; /* index of first "Queue Set" */
@@ -522,6 +525,7 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
* is "contracted" to provide for the common code.
*/
void t4vf_os_link_changed(struct adapter *, int, int);
+void t4vf_os_portmod_changed(struct adapter *, int);
/*
* SGE function prototype declarations.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index aa74ec34a467..2215d432a059 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -44,6 +44,7 @@
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
#include <linux/ethtool.h>
+#include <linux/mdio.h>
#include "t4vf_common.h"
#include "t4vf_defs.h"
@@ -210,6 +211,38 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
}
/*
+ * THe port module type has changed on the indicated "port" (Virtual
+ * Interface).
+ */
+void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
+{
+ static const char * const mod_str[] = {
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+ };
+ const struct net_device *dev = adapter->port[pidx];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
+ dev->name);
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
+ dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
+ dev->name, mod_str[pi->mod_type]);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ dev_info(adapter->pdev_dev, "%s: unsupported optical port "
+ "module inserted\n", dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
+ "forcing TWINAX\n", dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
+ dev->name);
+ else
+ dev_info(adapter->pdev_dev, "%s: unknown module type %d "
+ "inserted\n", dev->name, pi->mod_type);
+}
+
+/*
* Net device operations.
* ======================
*/
@@ -1193,24 +1226,103 @@ static void cxgb4vf_poll_controller(struct net_device *dev)
* state of the port to which we're linked.
*/
-/*
- * Return current port link settings.
- */
-static int cxgb4vf_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- const struct port_info *pi = netdev_priv(dev);
+static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
+ unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+ type == FW_PORT_TYPE_BT_XAUI) {
+ v |= SUPPORTED_TP;
+ if (caps & FW_PORT_CAP_SPEED_100M)
+ v |= SUPPORTED_100baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+ v |= SUPPORTED_Backplane;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseKX_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseKX4_Full;
+ } else if (type == FW_PORT_TYPE_KR)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+ else if (type == FW_PORT_TYPE_BP_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+ else if (type == FW_PORT_TYPE_BP4_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKX4_Full;
+ else if (type == FW_PORT_TYPE_FIBER_XFI ||
+ type == FW_PORT_TYPE_FIBER_XAUI ||
+ type == FW_PORT_TYPE_SFP ||
+ type == FW_PORT_TYPE_QSFP_10G ||
+ type == FW_PORT_TYPE_QSA) {
+ v |= SUPPORTED_FIBRE;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_BP40_BA ||
+ type == FW_PORT_TYPE_QSFP) {
+ v |= SUPPORTED_40000baseSR4_Full;
+ v |= SUPPORTED_FIBRE;
+ }
+
+ if (caps & FW_PORT_CAP_ANEG)
+ v |= SUPPORTED_Autoneg;
+ return v;
+}
+
+static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ const struct port_info *p = netdev_priv(dev);
+
+ if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+ p->port_type == FW_PORT_TYPE_BT_XFI ||
+ p->port_type == FW_PORT_TYPE_BT_XAUI)
+ cmd->port = PORT_TP;
+ else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+ p->port_type == FW_PORT_TYPE_FIBER_XAUI)
+ cmd->port = PORT_FIBRE;
+ else if (p->port_type == FW_PORT_TYPE_SFP ||
+ p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSA ||
+ p->port_type == FW_PORT_TYPE_QSFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+ p->mod_type == FW_PORT_MOD_TYPE_SR ||
+ p->mod_type == FW_PORT_MOD_TYPE_ER ||
+ p->mod_type == FW_PORT_MOD_TYPE_LRM)
+ cmd->port = PORT_FIBRE;
+ else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ cmd->port = PORT_DA;
+ else
+ cmd->port = PORT_OTHER;
+ } else
+ cmd->port = PORT_OTHER;
- cmd->supported = pi->link_cfg.supported;
- cmd->advertising = pi->link_cfg.advertising;
+ if (p->mdio_addr >= 0) {
+ cmd->phy_address = p->mdio_addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+ MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ } else {
+ cmd->phy_address = 0; /* not really, but no better option */
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->mdio_support = 0;
+ }
+
+ cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
+ p->link_cfg.supported);
+ cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
+ p->link_cfg.advertising);
ethtool_cmd_speed_set(cmd,
- netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
+ netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
cmd->duplex = DUPLEX_FULL;
-
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = pi->port_id;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = pi->link_cfg.autoneg;
+ cmd->autoneg = p->link_cfg.autoneg;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 8d3237f5e364..b9debb4f29a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -230,7 +230,7 @@ struct adapter_params {
static inline bool is_10g_port(const struct link_config *lc)
{
- return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
static inline bool is_x_10g_port(const struct link_config *lc)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 02e8833b7797..21dc9a20308c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -245,6 +245,10 @@ static int hash_mac_addr(const u8 *addr)
return a & 0x3f;
}
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
@@ -259,8 +263,8 @@ static void init_link_config(struct link_config *lc, unsigned int caps)
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
- if (lc->supported & SUPPORTED_Autoneg) {
- lc->advertising = lc->supported;
+ if (lc->supported & FW_PORT_CAP_ANEG) {
+ lc->advertising = lc->supported & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
lc->requested_fc |= PAUSE_AUTONEG;
} else {
@@ -280,7 +284,6 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
struct fw_vi_cmd vi_cmd, vi_rpl;
struct fw_port_cmd port_cmd, port_rpl;
int v;
- u32 word;
/*
* Execute a VI Read command to get our Virtual Interface information
@@ -319,19 +322,11 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
if (v)
return v;
- v = 0;
- word = be16_to_cpu(port_rpl.u.info.pcap);
- if (word & FW_PORT_CAP_SPEED_100M)
- v |= SUPPORTED_100baseT_Full;
- if (word & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (word & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- if (word & FW_PORT_CAP_SPEED_40G)
- v |= SUPPORTED_40000baseSR4_Full;
- if (word & FW_PORT_CAP_ANEG)
- v |= SUPPORTED_Autoneg;
- init_link_config(&pi->link_cfg, v);
+ v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+ pi->port_type = FW_PORT_CMD_PTYPE_G(v);
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
return 0;
}
@@ -1491,7 +1486,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
*/
const struct fw_port_cmd *port_cmd =
(const struct fw_port_cmd *)rpl;
- u32 word;
+ u32 stat, mod;
int action, port_id, link_ok, speed, fc, pidx;
/*
@@ -1509,21 +1504,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
port_id = FW_PORT_CMD_PORTID_G(
be32_to_cpu(port_cmd->op_to_portid));
- word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
- link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0;
+ stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
+ link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
speed = 0;
fc = 0;
- if (word & FW_PORT_CMD_RXPAUSE_F)
+ if (stat & FW_PORT_CMD_RXPAUSE_F)
fc |= PAUSE_RX;
- if (word & FW_PORT_CMD_TXPAUSE_F)
+ if (stat & FW_PORT_CMD_TXPAUSE_F)
fc |= PAUSE_TX;
- if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+ if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
speed = 100;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
speed = 1000;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
/*
@@ -1540,12 +1535,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
continue;
lc = &pi->link_cfg;
+
+ mod = FW_PORT_CMD_MODTYPE_G(stat);
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4vf_os_portmod_changed(adapter, pidx);
+ }
+
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) {
/* something changed */
lc->link_ok = link_ok;
lc->speed = speed;
lc->fc = fc;
+ lc->supported =
+ be16_to_cpu(port_cmd->u.info.pcap);
t4vf_os_link_changed(adapter, pidx, link_ok);
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 868d0f605d60..b29e027c476e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1060,10 +1060,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
- skb->csum = htons(checksum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
+ ipv4_csum_ok)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
if (vlan_stripped)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
@@ -1612,7 +1616,7 @@ static int enic_open(struct net_device *netdev)
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM;
- goto err_out_notify_unset;
+ goto err_out_free_rq;
}
}
@@ -1645,7 +1649,9 @@ static int enic_open(struct net_device *netdev)
return 0;
-err_out_notify_unset:
+err_out_free_rq:
+ for (i = 0; i < enic->rq_count; i++)
+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
enic_dev_notify_unset(enic);
err_out_free_intr:
enic_free_intr(enic);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 196073110e32..41a0a5498da7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4459,9 +4459,11 @@ done:
adapter->vxlan_port_count--;
}
-static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t be_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
#endif
@@ -4492,7 +4494,7 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_BE2NET_VXLAN
.ndo_add_vxlan_port = be_add_vxlan_port,
.ndo_del_vxlan_port = be_del_vxlan_port,
- .ndo_gso_check = be_gso_check,
+ .ndo_features_check = be_features_check,
#endif
};
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 781065eb5431..e9c3a87e5b11 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
- !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
/* enable/disable MDI/MDI-X auto-switching. */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 433a55886ad2..cb0de455683e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
if (desc_n >= ring->count || desc_n < 0) {
dev_info(&pf->pdev->dev,
"descriptor %d not found\n", desc_n);
- return;
+ goto out;
}
if (!is_rx_ring) {
txd = I40E_TX_DESC(ring, desc_n);
@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
+
+out:
kfree(ring);
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 051ea94bdcd3..0f69ef81751a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = 0;
- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+ s32 i = 0, timeout = 200;
while (i < timeout) {
if (igb_get_hw_semaphore(hw)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 190cbd931f6b..d0d6dc1b8e46 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2365,9 +2365,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
}
-static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
#endif
@@ -2400,7 +2402,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
- .ndo_gso_check = mlx4_en_gso_check,
+ .ndo_features_check = mlx4_en_features_check,
#endif
};
@@ -2434,7 +2436,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
- .ndo_gso_check = mlx4_en_gso_check,
+ .ndo_features_check = mlx4_en_features_check,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a308d41e4de0..e3357bf523df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -962,7 +962,17 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_desc->ctrl.owner_opcode = op_own;
if (send_doorbell) {
wmb();
- iowrite32(ring->doorbell_qpn,
+ /* Since there is no iowrite*_native() that writes the
+ * value as is, without byteswapping - using the one
+ * the doesn't do byteswapping in the relevant arch
+ * endianness.
+ */
+#if defined(__LITTLE_ENDIAN)
+ iowrite32(
+#else
+ iowrite32be(
+#endif
+ ring->doorbell_qpn,
ring->bf.uar->map + MLX4_SEND_DOORBELL);
} else {
ring->xmit_more++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 943cbd47d832..03e9eb0dc761 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1829,7 +1829,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
- goto err_stop_fw;
+ return err;
}
choose_steering_mode(dev, &dev_cap);
@@ -1860,7 +1860,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
&init_hca);
if ((long long) icm_size < 0) {
err = icm_size;
- goto err_stop_fw;
+ return err;
}
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
@@ -1874,7 +1874,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
if (err)
- goto err_stop_fw;
+ return err;
err = mlx4_INIT_HCA(dev, &init_hca);
if (err) {
@@ -1886,7 +1886,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_query_func(dev, &dev_cap);
if (err < 0) {
mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
- goto err_stop_fw;
+ goto err_close;
} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
dev->caps.num_eqs = dev_cap.max_eqs;
dev->caps.reserved_eqs = dev_cap.reserved_eqs;
@@ -2006,11 +2006,6 @@ err_free_icm:
if (!mlx4_is_slave(dev))
mlx4_free_icms(dev);
-err_stop_fw:
- if (!mlx4_is_slave(dev)) {
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, priv->fw.fw_icm, 0);
- }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d6f549685c0f..7094a9c70fd5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
mlx4_mtt_cleanup(dev, &mr->mtt);
+ mr->mtt.order = -1;
}
EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
@@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
{
int err;
- mpt_entry->start = cpu_to_be64(iova);
- mpt_entry->length = cpu_to_be64(size);
- mpt_entry->entity_size = cpu_to_be32(page_shift);
-
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
if (err)
return err;
+ mpt_entry->start = cpu_to_be64(mr->iova);
+ mpt_entry->length = cpu_to_be64(mr->size);
+ mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+
mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
MLX4_MPT_PD_FLAG_EN_INV);
mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index f1ebed6c63b1..2fa6ae026e4f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -2303,12 +2303,6 @@ static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
/* Spanning Tree */
-static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
-}
-
static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index af099057f0e9..71af98bb72cb 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL);
- if (mgp->cmd == NULL)
+ if (!mgp->cmd) {
+ status = -ENOMEM;
goto abort_with_enabled;
+ }
mgp->board_span = pci_resource_len(pdev, 0);
mgp->iomem_base = pci_resource_start(pdev, 0);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index c2f09af5c25b..4847713211ca 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
{
int i = 0;
- while (i < 10) {
- if (i)
- ssleep(1);
-
+ do {
if (ql_sem_lock(qdev,
QL_DRVR_SEM_MASK,
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
"driver lock acquired\n");
return 1;
}
- }
+ ssleep(1);
+ } while (++i < 10);
netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1aa25b13ace1..2528c3fb6b90 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -505,9 +505,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
}
-static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
#endif
@@ -532,7 +534,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
#ifdef CONFIG_QLCNIC_VXLAN
.ndo_add_vxlan_port = qlcnic_add_vxlan_port,
.ndo_del_vxlan_port = qlcnic_del_vxlan_port,
- .ndo_gso_check = qlcnic_gso_check,
+ .ndo_features_check = qlcnic_features_check,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
@@ -2603,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
dev_err(&pdev->dev,
"%s: failed. Please Reboot\n", __func__);
+ err = -ENODEV;
goto err_out_free_hw;
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 6d0b9dfac313..78bb4ceb1cdd 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -787,10 +787,10 @@ static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
if (rc)
goto err_out;
+ disable_dev_on_err = 1;
rc = pci_request_regions (pdev, DRV_NAME);
if (rc)
goto err_out;
- disable_dev_on_err = 1;
pci_set_master (pdev);
@@ -1110,6 +1110,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
return 0;
err_out:
+ netif_napi_del(&tp->napi);
__rtl8139_cleanup_dev (dev);
pci_disable_device (pdev);
return i;
@@ -1124,6 +1125,7 @@ static void rtl8139_remove_one(struct pci_dev *pdev)
assert (dev != NULL);
cancel_delayed_work_sync(&tp->thread);
+ netif_napi_del(&tp->napi);
unregister_netdev (dev);
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
deleted file mode 100644
index f537cbea20e5..000000000000
--- a/drivers/net/ethernet/s6gmac.c
+++ /dev/null
@@ -1,1058 +0,0 @@
-/*
- * Ethernet driver for S6105 on chip network device
- * (c)2008 emlix GmbH http://www.emlix.com
- * Authors: Oskar Schirmer <oskar@scara.com>
- * Daniel Gloeckner <dg@emlix.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if.h>
-#include <linux/stddef.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
-#include <variant/hardware.h>
-#include <variant/dmac.h>
-
-#define DRV_NAME "s6gmac"
-#define DRV_PRMT DRV_NAME ": "
-
-
-/* register declarations */
-
-#define S6_GMAC_MACCONF1 0x000
-#define S6_GMAC_MACCONF1_TXENA 0
-#define S6_GMAC_MACCONF1_SYNCTX 1
-#define S6_GMAC_MACCONF1_RXENA 2
-#define S6_GMAC_MACCONF1_SYNCRX 3
-#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
-#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
-#define S6_GMAC_MACCONF1_LOOPBACK 8
-#define S6_GMAC_MACCONF1_RESTXFUNC 16
-#define S6_GMAC_MACCONF1_RESRXFUNC 17
-#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
-#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
-#define S6_GMAC_MACCONF1_SIMULRES 30
-#define S6_GMAC_MACCONF1_SOFTRES 31
-#define S6_GMAC_MACCONF2 0x004
-#define S6_GMAC_MACCONF2_FULL 0
-#define S6_GMAC_MACCONF2_CRCENA 1
-#define S6_GMAC_MACCONF2_PADCRCENA 2
-#define S6_GMAC_MACCONF2_LENGTHFCHK 4
-#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
-#define S6_GMAC_MACCONF2_IFMODE 8
-#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
-#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
-#define S6_GMAC_MACCONF2_IFMODE_MASK 3
-#define S6_GMAC_MACCONF2_PREAMBLELEN 12
-#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
-#define S6_GMAC_MACIPGIFG 0x008
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
-#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
-#define S6_GMAC_MACHALFDUPLEX 0x00C
-#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
-#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
-#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
-#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
-#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
-#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
-#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
-#define S6_GMAC_MACMAXFRAMELEN 0x010
-#define S6_GMAC_MACMIICONF 0x020
-#define S6_GMAC_MACMIICONF_CSEL 0
-#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
-#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
-#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
-#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
-#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
-#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
-#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
-#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
-#define S6_GMAC_MACMIICONF_CSEL_MASK 7
-#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
-#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
-#define S6_GMAC_MACMIICMD 0x024
-#define S6_GMAC_MACMIICMD_READ 0
-#define S6_GMAC_MACMIICMD_SCAN 1
-#define S6_GMAC_MACMIIADDR 0x028
-#define S6_GMAC_MACMIIADDR_REG 0
-#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
-#define S6_GMAC_MACMIIADDR_PHY 8
-#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
-#define S6_GMAC_MACMIICTRL 0x02C
-#define S6_GMAC_MACMIISTAT 0x030
-#define S6_GMAC_MACMIIINDI 0x034
-#define S6_GMAC_MACMIIINDI_BUSY 0
-#define S6_GMAC_MACMIIINDI_SCAN 1
-#define S6_GMAC_MACMIIINDI_INVAL 2
-#define S6_GMAC_MACINTERFSTAT 0x03C
-#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
-#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
-#define S6_GMAC_MACSTATADDR1 0x040
-#define S6_GMAC_MACSTATADDR2 0x044
-
-#define S6_GMAC_FIFOCONF0 0x048
-#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
-#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
-#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
-#define S6_GMAC_FIFOCONF0_HSTRSTST 3
-#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
-#define S6_GMAC_FIFOCONF0_WTMENREQ 8
-#define S6_GMAC_FIFOCONF0_SRFENREQ 9
-#define S6_GMAC_FIFOCONF0_FRFENREQ 10
-#define S6_GMAC_FIFOCONF0_STFENREQ 11
-#define S6_GMAC_FIFOCONF0_FTFENREQ 12
-#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
-#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
-#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
-#define S6_GMAC_FIFOCONF0_STFENRPLY 19
-#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
-#define S6_GMAC_FIFOCONF1 0x04C
-#define S6_GMAC_FIFOCONF2 0x050
-#define S6_GMAC_FIFOCONF2_CFGLWM 0
-#define S6_GMAC_FIFOCONF2_CFGHWM 16
-#define S6_GMAC_FIFOCONF3 0x054
-#define S6_GMAC_FIFOCONF3_CFGFTTH 0
-#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
-#define S6_GMAC_FIFOCONF4 0x058
-#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
-#define S6_GMAC_FIFOCONF_RSV_RUNT 1
-#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
-#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
-#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
-#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
-#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
-#define S6_GMAC_FIFOCONF_RSV_OK 7
-#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
-#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
-#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
-#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
-#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
-#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
-#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
-#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
-#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
-#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
-#define S6_GMAC_FIFOCONF5 0x05C
-#define S6_GMAC_FIFOCONF5_DROPLT64 18
-#define S6_GMAC_FIFOCONF5_CFGBYTM 19
-#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
-#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
-
-#define S6_GMAC_STAT_REGS 0x080
-#define S6_GMAC_STAT_SIZE_MIN 12
-#define S6_GMAC_STATTR64 0x080
-#define S6_GMAC_STATTR64_SIZE 18
-#define S6_GMAC_STATTR127 0x084
-#define S6_GMAC_STATTR127_SIZE 18
-#define S6_GMAC_STATTR255 0x088
-#define S6_GMAC_STATTR255_SIZE 18
-#define S6_GMAC_STATTR511 0x08C
-#define S6_GMAC_STATTR511_SIZE 18
-#define S6_GMAC_STATTR1K 0x090
-#define S6_GMAC_STATTR1K_SIZE 18
-#define S6_GMAC_STATTRMAX 0x094
-#define S6_GMAC_STATTRMAX_SIZE 18
-#define S6_GMAC_STATTRMGV 0x098
-#define S6_GMAC_STATTRMGV_SIZE 18
-#define S6_GMAC_STATRBYT 0x09C
-#define S6_GMAC_STATRBYT_SIZE 24
-#define S6_GMAC_STATRPKT 0x0A0
-#define S6_GMAC_STATRPKT_SIZE 18
-#define S6_GMAC_STATRFCS 0x0A4
-#define S6_GMAC_STATRFCS_SIZE 12
-#define S6_GMAC_STATRMCA 0x0A8
-#define S6_GMAC_STATRMCA_SIZE 18
-#define S6_GMAC_STATRBCA 0x0AC
-#define S6_GMAC_STATRBCA_SIZE 22
-#define S6_GMAC_STATRXCF 0x0B0
-#define S6_GMAC_STATRXCF_SIZE 18
-#define S6_GMAC_STATRXPF 0x0B4
-#define S6_GMAC_STATRXPF_SIZE 12
-#define S6_GMAC_STATRXUO 0x0B8
-#define S6_GMAC_STATRXUO_SIZE 12
-#define S6_GMAC_STATRALN 0x0BC
-#define S6_GMAC_STATRALN_SIZE 12
-#define S6_GMAC_STATRFLR 0x0C0
-#define S6_GMAC_STATRFLR_SIZE 16
-#define S6_GMAC_STATRCDE 0x0C4
-#define S6_GMAC_STATRCDE_SIZE 12
-#define S6_GMAC_STATRCSE 0x0C8
-#define S6_GMAC_STATRCSE_SIZE 12
-#define S6_GMAC_STATRUND 0x0CC
-#define S6_GMAC_STATRUND_SIZE 12
-#define S6_GMAC_STATROVR 0x0D0
-#define S6_GMAC_STATROVR_SIZE 12
-#define S6_GMAC_STATRFRG 0x0D4
-#define S6_GMAC_STATRFRG_SIZE 12
-#define S6_GMAC_STATRJBR 0x0D8
-#define S6_GMAC_STATRJBR_SIZE 12
-#define S6_GMAC_STATRDRP 0x0DC
-#define S6_GMAC_STATRDRP_SIZE 12
-#define S6_GMAC_STATTBYT 0x0E0
-#define S6_GMAC_STATTBYT_SIZE 24
-#define S6_GMAC_STATTPKT 0x0E4
-#define S6_GMAC_STATTPKT_SIZE 18
-#define S6_GMAC_STATTMCA 0x0E8
-#define S6_GMAC_STATTMCA_SIZE 18
-#define S6_GMAC_STATTBCA 0x0EC
-#define S6_GMAC_STATTBCA_SIZE 18
-#define S6_GMAC_STATTXPF 0x0F0
-#define S6_GMAC_STATTXPF_SIZE 12
-#define S6_GMAC_STATTDFR 0x0F4
-#define S6_GMAC_STATTDFR_SIZE 12
-#define S6_GMAC_STATTEDF 0x0F8
-#define S6_GMAC_STATTEDF_SIZE 12
-#define S6_GMAC_STATTSCL 0x0FC
-#define S6_GMAC_STATTSCL_SIZE 12
-#define S6_GMAC_STATTMCL 0x100
-#define S6_GMAC_STATTMCL_SIZE 12
-#define S6_GMAC_STATTLCL 0x104
-#define S6_GMAC_STATTLCL_SIZE 12
-#define S6_GMAC_STATTXCL 0x108
-#define S6_GMAC_STATTXCL_SIZE 12
-#define S6_GMAC_STATTNCL 0x10C
-#define S6_GMAC_STATTNCL_SIZE 13
-#define S6_GMAC_STATTPFH 0x110
-#define S6_GMAC_STATTPFH_SIZE 12
-#define S6_GMAC_STATTDRP 0x114
-#define S6_GMAC_STATTDRP_SIZE 12
-#define S6_GMAC_STATTJBR 0x118
-#define S6_GMAC_STATTJBR_SIZE 12
-#define S6_GMAC_STATTFCS 0x11C
-#define S6_GMAC_STATTFCS_SIZE 12
-#define S6_GMAC_STATTXCF 0x120
-#define S6_GMAC_STATTXCF_SIZE 12
-#define S6_GMAC_STATTOVR 0x124
-#define S6_GMAC_STATTOVR_SIZE 12
-#define S6_GMAC_STATTUND 0x128
-#define S6_GMAC_STATTUND_SIZE 12
-#define S6_GMAC_STATTFRG 0x12C
-#define S6_GMAC_STATTFRG_SIZE 12
-#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
-#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
-#define S6_GMAC_STATCARRY1_RDRP 0
-#define S6_GMAC_STATCARRY1_RJBR 1
-#define S6_GMAC_STATCARRY1_RFRG 2
-#define S6_GMAC_STATCARRY1_ROVR 3
-#define S6_GMAC_STATCARRY1_RUND 4
-#define S6_GMAC_STATCARRY1_RCSE 5
-#define S6_GMAC_STATCARRY1_RCDE 6
-#define S6_GMAC_STATCARRY1_RFLR 7
-#define S6_GMAC_STATCARRY1_RALN 8
-#define S6_GMAC_STATCARRY1_RXUO 9
-#define S6_GMAC_STATCARRY1_RXPF 10
-#define S6_GMAC_STATCARRY1_RXCF 11
-#define S6_GMAC_STATCARRY1_RBCA 12
-#define S6_GMAC_STATCARRY1_RMCA 13
-#define S6_GMAC_STATCARRY1_RFCS 14
-#define S6_GMAC_STATCARRY1_RPKT 15
-#define S6_GMAC_STATCARRY1_RBYT 16
-#define S6_GMAC_STATCARRY1_TRMGV 25
-#define S6_GMAC_STATCARRY1_TRMAX 26
-#define S6_GMAC_STATCARRY1_TR1K 27
-#define S6_GMAC_STATCARRY1_TR511 28
-#define S6_GMAC_STATCARRY1_TR255 29
-#define S6_GMAC_STATCARRY1_TR127 30
-#define S6_GMAC_STATCARRY1_TR64 31
-#define S6_GMAC_STATCARRY2_TDRP 0
-#define S6_GMAC_STATCARRY2_TPFH 1
-#define S6_GMAC_STATCARRY2_TNCL 2
-#define S6_GMAC_STATCARRY2_TXCL 3
-#define S6_GMAC_STATCARRY2_TLCL 4
-#define S6_GMAC_STATCARRY2_TMCL 5
-#define S6_GMAC_STATCARRY2_TSCL 6
-#define S6_GMAC_STATCARRY2_TEDF 7
-#define S6_GMAC_STATCARRY2_TDFR 8
-#define S6_GMAC_STATCARRY2_TXPF 9
-#define S6_GMAC_STATCARRY2_TBCA 10
-#define S6_GMAC_STATCARRY2_TMCA 11
-#define S6_GMAC_STATCARRY2_TPKT 12
-#define S6_GMAC_STATCARRY2_TBYT 13
-#define S6_GMAC_STATCARRY2_TFRG 14
-#define S6_GMAC_STATCARRY2_TUND 15
-#define S6_GMAC_STATCARRY2_TOVR 16
-#define S6_GMAC_STATCARRY2_TXCF 17
-#define S6_GMAC_STATCARRY2_TFCS 18
-#define S6_GMAC_STATCARRY2_TJBR 19
-
-#define S6_GMAC_HOST_PBLKCTRL 0x140
-#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
-#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
-#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
-#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
-#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
-#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
-#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
-#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
-#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
-#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
-#define S6_GMAC_HOST_INTMASK 0x144
-#define S6_GMAC_HOST_INTSTAT 0x148
-#define S6_GMAC_HOST_INT_TXBURSTOVER 3
-#define S6_GMAC_HOST_INT_TXPREWOVER 4
-#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
-#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
-#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
-#define S6_GMAC_HOST_RXFIFOHWM 0x14C
-#define S6_GMAC_HOST_CTRLFRAMXP 0x150
-#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
-#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
-#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
-#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
-
-#define S6_GMAC_BURST_PREWR 0x1B0
-#define S6_GMAC_BURST_PREWR_LEN 0
-#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
-#define S6_GMAC_BURST_PREWR_CFE 20
-#define S6_GMAC_BURST_PREWR_PPE 21
-#define S6_GMAC_BURST_PREWR_FCS 22
-#define S6_GMAC_BURST_PREWR_PAD 23
-#define S6_GMAC_BURST_POSTRD 0x1D0
-#define S6_GMAC_BURST_POSTRD_LEN 0
-#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
-#define S6_GMAC_BURST_POSTRD_DROP 20
-
-
-/* data handling */
-
-#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
-#define S6_NUM_RX_SKB 16
-#define S6_MAX_FRLEN 1536
-
-struct s6gmac {
- u32 reg;
- u32 tx_dma;
- u32 rx_dma;
- u32 io;
- u8 tx_chan;
- u8 rx_chan;
- spinlock_t lock;
- u8 tx_skb_i, tx_skb_o;
- u8 rx_skb_i, rx_skb_o;
- struct sk_buff *tx_skb[S6_NUM_TX_SKB];
- struct sk_buff *rx_skb[S6_NUM_RX_SKB];
- unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
- unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
- struct phy_device *phydev;
- struct {
- struct mii_bus *bus;
- int irq[PHY_MAX_ADDR];
- } mii;
- struct {
- unsigned int mbit;
- u8 giga;
- u8 isup;
- u8 full;
- } link;
-};
-
-static void s6gmac_rx_fillfifo(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct sk_buff *skb;
- while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
- (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
- (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) {
- pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
- s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
- pd->io, (u32)skb->data, S6_MAX_FRLEN);
- }
-}
-
-static void s6gmac_rx_interrupt(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- u32 pfx;
- struct sk_buff *skb;
- while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
- s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
- skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
- pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
- if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
- dev_kfree_skb_irq(skb);
- } else {
- skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
- & S6_GMAC_BURST_POSTRD_LEN_MASK);
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx(skb);
- }
- }
-}
-
-static void s6gmac_tx_interrupt(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
- s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
- dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
- }
- if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_wake_queue(dev);
-}
-
-struct s6gmac_statinf {
- unsigned reg_size : 4; /* 0: unused */
- unsigned reg_off : 6;
- unsigned net_index : 6;
-};
-
-#define S6_STATS_B (8 * sizeof(u32))
-#define S6_STATS_C(b, r, f) [b] = { \
- BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
- BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
- >= (1<<4)) + \
- r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
- BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
- >= ((1<<6)-1)) + \
- (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
- BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
- % sizeof(unsigned long)) + \
- BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
- / sizeof(unsigned long)) >= (1<<6))) + \
- BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
- != sizeof(unsigned long))) + \
- (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
-
-static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
- S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
- S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
- S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
-}, {
- S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
- S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
- S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
- S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
- S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
-} };
-
-static void s6gmac_stats_collect(struct s6gmac *pd,
- const struct s6gmac_statinf *inf)
-{
- int b;
- for (b = 0; b < S6_STATS_B; b++) {
- if (inf[b].reg_size) {
- pd->stats[inf[b].net_index] +=
- readl(pd->reg + S6_GMAC_STAT_REGS
- + sizeof(u32) * inf[b].reg_off);
- }
- }
-}
-
-static void s6gmac_stats_carry(struct s6gmac *pd,
- const struct s6gmac_statinf *inf, u32 mask)
-{
- int b;
- while (mask) {
- b = fls(mask) - 1;
- mask &= ~(1 << b);
- pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
- }
-}
-
-static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
-{
- int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
- ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
- return r;
-}
-
-static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
-{
- u32 mask;
- mask = s6gmac_stats_pending(pd, carry);
- if (mask) {
- writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
- s6gmac_stats_carry(pd, &statinf[carry][0], mask);
- }
-}
-
-static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct s6gmac *pd = netdev_priv(dev);
- if (!dev)
- return IRQ_NONE;
- spin_lock(&pd->lock);
- if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
- s6gmac_rx_interrupt(dev);
- s6gmac_rx_fillfifo(dev);
- if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
- s6gmac_tx_interrupt(dev);
- s6gmac_stats_interrupt(pd, 0);
- s6gmac_stats_interrupt(pd, 1);
- spin_unlock(&pd->lock);
- return IRQ_HANDLED;
-}
-
-static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
- u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
-{
- writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
- writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
- writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
- writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
-}
-
-static inline void s6gmac_stop_device(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- writel(0, pd->reg + S6_GMAC_MACCONF1);
-}
-
-static inline void s6gmac_init_device(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- int is_rgmii = !!(pd->phydev->supported
- & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
-#if 0
- writel(1 << S6_GMAC_MACCONF1_SYNCTX |
- 1 << S6_GMAC_MACCONF1_SYNCRX |
- 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
- 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
- 1 << S6_GMAC_MACCONF1_RESTXFUNC |
- 1 << S6_GMAC_MACCONF1_RESRXFUNC |
- 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
- 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
- pd->reg + S6_GMAC_MACCONF1);
-#endif
- writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
- udelay(1000);
- writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
- pd->reg + S6_GMAC_MACCONF1);
- writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
- 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
- writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
- is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
- writel(1 << S6_GMAC_MACCONF1_TXENA |
- 1 << S6_GMAC_MACCONF1_RXENA |
- (dev->flags & IFF_LOOPBACK ? 1 : 0)
- << S6_GMAC_MACCONF1_LOOPBACK,
- pd->reg + S6_GMAC_MACCONF1);
- writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
- dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
- pd->reg + S6_GMAC_MACMAXFRAMELEN);
- writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
- 1 << S6_GMAC_MACCONF2_PADCRCENA |
- 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
- (pd->link.giga ?
- S6_GMAC_MACCONF2_IFMODE_BYTE :
- S6_GMAC_MACCONF2_IFMODE_NIBBLE)
- << S6_GMAC_MACCONF2_IFMODE |
- 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
- pd->reg + S6_GMAC_MACCONF2);
- writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
- writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
- writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
- 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
- 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
- 1 << S6_GMAC_FIFOCONF0_STFENREQ |
- 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
- pd->reg + S6_GMAC_FIFOCONF0);
- writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
- 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
- pd->reg + S6_GMAC_FIFOCONF3);
- writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
- 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
- 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
- 1 << S6_GMAC_FIFOCONF_RSV_OK |
- 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
- 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
- 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
- 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
- 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
- 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
- pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
- 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
- pd->reg + S6_GMAC_FIFOCONF5);
- writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
- 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
- 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
- 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
- 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
- 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
- 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
- pd->reg + S6_GMAC_FIFOCONF4);
- s6gmac_set_dstaddr(pd, 0,
- 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
- s6gmac_set_dstaddr(pd, 1,
- dev->dev_addr[5] |
- dev->dev_addr[4] << 8 |
- dev->dev_addr[3] << 16 |
- dev->dev_addr[2] << 24,
- dev->dev_addr[1] |
- dev->dev_addr[0] << 8,
- 0xFFFFFFFF, 0x0000FFFF);
- s6gmac_set_dstaddr(pd, 2,
- 0x00000000, 0x00000100, 0x00000000, 0x00000100);
- s6gmac_set_dstaddr(pd, 3,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000);
- writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
- is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
-}
-
-static void s6mii_enable(struct s6gmac *pd)
-{
- writel(readl(pd->reg + S6_GMAC_MACCONF1) &
- ~(1 << S6_GMAC_MACCONF1_SOFTRES),
- pd->reg + S6_GMAC_MACCONF1);
- writel((readl(pd->reg + S6_GMAC_MACMIICONF)
- & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
- | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
- pd->reg + S6_GMAC_MACMIICONF);
-}
-
-static int s6mii_busy(struct s6gmac *pd, int tmo)
-{
- while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
- if (--tmo == 0)
- return -ETIME;
- udelay(64);
- }
- return 0;
-}
-
-static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
- regnum << S6_GMAC_MACMIIADDR_REG,
- pd->reg + S6_GMAC_MACMIIADDR);
- writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
- writel(0, pd->reg + S6_GMAC_MACMIICMD);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
-}
-
-static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
- regnum << S6_GMAC_MACMIIADDR_REG,
- pd->reg + S6_GMAC_MACMIIADDR);
- writel(value, pd->reg + S6_GMAC_MACMIICTRL);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- return 0;
-}
-
-static int s6mii_reset(struct mii_bus *bus)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
- return -ETIME;
- return 0;
-}
-
-static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
-{
- u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
- pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
- switch (pd->link.mbit) {
- case 10:
- pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- case 100:
- pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- case 1000:
- pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- default:
- return;
- }
- writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
-}
-
-static inline void s6gmac_linkisup(struct net_device *dev, int isup)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct phy_device *phydev = pd->phydev;
-
- pd->link.full = phydev->duplex;
- pd->link.giga = (phydev->speed == 1000);
- if (pd->link.mbit != phydev->speed) {
- pd->link.mbit = phydev->speed;
- s6gmac_set_rgmii_txclock(pd);
- }
- pd->link.isup = isup;
- if (isup)
- netif_carrier_on(dev);
- phy_print_status(phydev);
-}
-
-static void s6gmac_adjust_link(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct phy_device *phydev = pd->phydev;
- if (pd->link.isup &&
- (!phydev->link ||
- (pd->link.mbit != phydev->speed) ||
- (pd->link.full != phydev->duplex))) {
- pd->link.isup = 0;
- netif_tx_disable(dev);
- if (!phydev->link) {
- netif_carrier_off(dev);
- phy_print_status(phydev);
- }
- }
- if (!pd->link.isup && phydev->link) {
- if (pd->link.full != phydev->duplex) {
- u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
- if (phydev->duplex)
- maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
- else
- maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
- writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
- }
-
- if (pd->link.giga != (phydev->speed == 1000)) {
- u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
- u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
- maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
- << S6_GMAC_MACCONF2_IFMODE);
- if (phydev->speed == 1000) {
- fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
- maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
- << S6_GMAC_MACCONF2_IFMODE;
- } else {
- fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
- maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
- << S6_GMAC_MACCONF2_IFMODE;
- }
- writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
- writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
- }
-
- if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_wake_queue(dev);
- s6gmac_linkisup(dev, 1);
- }
-}
-
-static inline int s6gmac_phy_start(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- int i = 0;
- struct phy_device *p = NULL;
- while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
- i++;
- p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link,
- PHY_INTERFACE_MODE_RGMII);
- if (IS_ERR(p)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(p);
- }
- p->supported &= PHY_GBIT_FEATURES;
- p->advertising = p->supported;
- pd->phydev = p;
- return 0;
-}
-
-static inline void s6gmac_init_stats(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- u32 mask;
- mask = 1 << S6_GMAC_STATCARRY1_RDRP |
- 1 << S6_GMAC_STATCARRY1_RJBR |
- 1 << S6_GMAC_STATCARRY1_RFRG |
- 1 << S6_GMAC_STATCARRY1_ROVR |
- 1 << S6_GMAC_STATCARRY1_RUND |
- 1 << S6_GMAC_STATCARRY1_RCDE |
- 1 << S6_GMAC_STATCARRY1_RFLR |
- 1 << S6_GMAC_STATCARRY1_RALN |
- 1 << S6_GMAC_STATCARRY1_RMCA |
- 1 << S6_GMAC_STATCARRY1_RFCS |
- 1 << S6_GMAC_STATCARRY1_RPKT |
- 1 << S6_GMAC_STATCARRY1_RBYT;
- writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
- writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
- mask = 1 << S6_GMAC_STATCARRY2_TDRP |
- 1 << S6_GMAC_STATCARRY2_TNCL |
- 1 << S6_GMAC_STATCARRY2_TXCL |
- 1 << S6_GMAC_STATCARRY2_TEDF |
- 1 << S6_GMAC_STATCARRY2_TPKT |
- 1 << S6_GMAC_STATCARRY2_TBYT |
- 1 << S6_GMAC_STATCARRY2_TFRG |
- 1 << S6_GMAC_STATCARRY2_TUND |
- 1 << S6_GMAC_STATCARRY2_TOVR |
- 1 << S6_GMAC_STATCARRY2_TFCS |
- 1 << S6_GMAC_STATCARRY2_TJBR;
- writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
- writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
-}
-
-static inline void s6gmac_init_dmac(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
- s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
- s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
- s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
-}
-
-static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&pd->lock, flags);
- writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
- 0 << S6_GMAC_BURST_PREWR_CFE |
- 1 << S6_GMAC_BURST_PREWR_PPE |
- 1 << S6_GMAC_BURST_PREWR_FCS |
- ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
- pd->reg + S6_GMAC_BURST_PREWR);
- s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
- (u32)skb->data, pd->io, skb->len);
- if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_stop_queue(dev);
- if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
- printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
- pd->tx_skb_o, pd->tx_skb_i);
- BUG();
- }
- pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
- spin_unlock_irqrestore(&pd->lock, flags);
- return 0;
-}
-
-static void s6gmac_tx_timeout(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- spin_lock_irqsave(&pd->lock, flags);
- s6gmac_tx_interrupt(dev);
- spin_unlock_irqrestore(&pd->lock, flags);
-}
-
-static int s6gmac_open(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- phy_read_status(pd->phydev);
- spin_lock_irqsave(&pd->lock, flags);
- pd->link.mbit = 0;
- s6gmac_linkisup(dev, pd->phydev->link);
- s6gmac_init_device(dev);
- s6gmac_init_stats(dev);
- s6gmac_init_dmac(dev);
- s6gmac_rx_fillfifo(dev);
- s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
- 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
- s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
- 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
- writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
- 0 << S6_GMAC_HOST_INT_TXPREWOVER |
- 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
- 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
- 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
- pd->reg + S6_GMAC_HOST_INTMASK);
- spin_unlock_irqrestore(&pd->lock, flags);
- phy_start(pd->phydev);
- netif_start_queue(dev);
- return 0;
-}
-
-static int s6gmac_stop(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- netif_stop_queue(dev);
- phy_stop(pd->phydev);
- spin_lock_irqsave(&pd->lock, flags);
- s6gmac_init_dmac(dev);
- s6gmac_stop_device(dev);
- while (pd->tx_skb_i != pd->tx_skb_o)
- dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
- while (pd->rx_skb_i != pd->rx_skb_o)
- dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
- spin_unlock_irqrestore(&pd->lock, flags);
- return 0;
-}
-
-static struct net_device_stats *s6gmac_stats(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
- int i;
- do {
- unsigned long flags;
- spin_lock_irqsave(&pd->lock, flags);
- for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
- pd->stats[i] =
- pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
- s6gmac_stats_collect(pd, &statinf[0][0]);
- s6gmac_stats_collect(pd, &statinf[1][0]);
- i = s6gmac_stats_pending(pd, 0) |
- s6gmac_stats_pending(pd, 1);
- spin_unlock_irqrestore(&pd->lock, flags);
- } while (i);
- st->rx_errors = st->rx_crc_errors +
- st->rx_frame_errors +
- st->rx_length_errors +
- st->rx_missed_errors;
- st->tx_errors += st->tx_aborted_errors;
- return st;
-}
-
-static int s6gmac_probe(struct platform_device *pdev)
-{
- struct net_device *dev;
- struct s6gmac *pd;
- int res;
- unsigned long i;
- struct mii_bus *mb;
-
- dev = alloc_etherdev(sizeof(*pd));
- if (!dev)
- return -ENOMEM;
-
- dev->open = s6gmac_open;
- dev->stop = s6gmac_stop;
- dev->hard_start_xmit = s6gmac_tx;
- dev->tx_timeout = s6gmac_tx_timeout;
- dev->watchdog_timeo = HZ;
- dev->get_stats = s6gmac_stats;
- dev->irq = platform_get_irq(pdev, 0);
- pd = netdev_priv(dev);
- memset(pd, 0, sizeof(*pd));
- spin_lock_init(&pd->lock);
- pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
- i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
- pd->tx_dma = DMA_MASK_DMAC(i);
- pd->tx_chan = DMA_INDEX_CHNL(i);
- i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
- pd->rx_dma = DMA_MASK_DMAC(i);
- pd->rx_chan = DMA_INDEX_CHNL(i);
- pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
- res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev);
- if (res) {
- printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
- goto errirq;
- }
- res = register_netdev(dev);
- if (res) {
- printk(KERN_ERR DRV_PRMT "error registering device %s\n",
- dev->name);
- goto errdev;
- }
- mb = mdiobus_alloc();
- if (!mb) {
- printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
- res = -ENOMEM;
- goto errmii;
- }
- mb->name = "s6gmac_mii";
- mb->read = s6mii_read;
- mb->write = s6mii_write;
- mb->reset = s6mii_reset;
- mb->priv = pd;
- snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id);
- mb->phy_mask = ~(1 << 0);
- mb->irq = &pd->mii.irq[0];
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- int n = platform_get_irq(pdev, i + 1);
- if (n < 0)
- n = PHY_POLL;
- pd->mii.irq[i] = n;
- }
- mdiobus_register(mb);
- pd->mii.bus = mb;
- res = s6gmac_phy_start(dev);
- if (res)
- return res;
- platform_set_drvdata(pdev, dev);
- return 0;
-errmii:
- unregister_netdev(dev);
-errdev:
- free_irq(dev->irq, dev);
-errirq:
- free_netdev(dev);
- return res;
-}
-
-static int s6gmac_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- if (dev) {
- struct s6gmac *pd = netdev_priv(dev);
- mdiobus_unregister(pd->mii.bus);
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- free_netdev(dev);
- }
- return 0;
-}
-
-static struct platform_driver s6gmac_driver = {
- .probe = s6gmac_probe,
- .remove = s6gmac_remove,
- .driver = {
- .name = "s6gmac",
- },
-};
-
-module_platform_driver(s6gmac_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
-MODULE_AUTHOR("Oskar Schirmer <oskar@scara.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 118a427d1942..8c6b7c1651e5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1671,7 +1671,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev)
+static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
@@ -1708,9 +1708,11 @@ static int stmmac_hw_setup(struct net_device *dev)
stmmac_mmc_setup(priv);
- ret = stmmac_init_ptp(priv);
- if (ret && ret != -EOPNOTSUPP)
- pr_warn("%s: failed PTP initialisation\n", __func__);
+ if (init_ptp) {
+ ret = stmmac_init_ptp(priv);
+ if (ret && ret != -EOPNOTSUPP)
+ pr_warn("%s: failed PTP initialisation\n", __func__);
+ }
#ifdef CONFIG_DEBUG_FS
ret = stmmac_init_fs(dev);
@@ -1787,7 +1789,7 @@ static int stmmac_open(struct net_device *dev)
goto init_error;
}
- ret = stmmac_hw_setup(dev);
+ ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
pr_err("%s: Hw setup failed\n", __func__);
goto init_error;
@@ -3036,7 +3038,7 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
init_dma_desc_rings(ndev, GFP_ATOMIC);
- stmmac_hw_setup(ndev);
+ stmmac_hw_setup(ndev, false);
stmmac_init_tx_coalesce(priv);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 4032b170fe24..3039de2465ba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -430,7 +430,6 @@ static struct platform_driver stmmac_pltfr_driver = {
.remove = stmmac_pltfr_remove,
.driver = {
.name = STMMAC_RESOURCE_NAME,
- .owner = THIS_MODULE,
.pm = &stmmac_pltfr_pm_ops,
.of_match_table = of_match_ptr(stmmac_dt_ids),
},
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 45c408ef67d0..d2835bf7b4fb 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1201,6 +1201,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
if (IS_ERR(segs)) {
dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c560f9aeb55d..e61ee8351272 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -757,6 +757,14 @@ requeue:
static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
+ int value = irq - priv->irqs_table[0];
+
+ /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
+ * is to make sure we will always write the correct value to the EOI
+ * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
+ * for TX Interrupt and 3 for MISC Interrupt.
+ */
+ cpdma_ctlr_eoi(priv->dma, value);
cpsw_intr_disable(priv);
if (priv->irq_enabled == true) {
@@ -786,8 +794,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
int num_tx, num_rx;
num_tx = cpdma_chan_process(priv->txch, 128);
- if (num_tx)
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
@@ -795,7 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
prim_cpsw->irq_enabled = true;
@@ -1310,8 +1315,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
napi_enable(&priv->napi);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
@@ -1578,9 +1581,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpdma_chan_start(priv->txch);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
}
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
@@ -1620,9 +1620,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
cpsw_interrupt(ndev->irq, priv);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
}
#endif
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9c2d91ea0af4..dbcbf0c5bcfa 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op)
lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map temac regs.\n");
+ rc = -ENOMEM;
goto nodev;
}
@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op)
np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
+ rc = -ENODEV;
goto err_iounmap;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 44b8d2bad8c3..4c9b4fa1d3c1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -388,7 +388,6 @@ struct axidma_bd {
* @dma_err_tasklet: Tasklet structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
- * @temac_type: axienet type to identify between soft and hard temac
* @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
* @options: AxiEthernet option word
* @last_link: Phy link state in which the PHY was negotiated earlier
@@ -431,7 +430,6 @@ struct axienet_local {
int tx_irq;
int rx_irq;
- u32 temac_type;
u32 phy_type;
u32 options; /* Current options word */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4ea2d4e6f1d1..a6d2860b712c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op)
lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+ ret = -ENOMEM;
goto nodev;
}
/* Setup checksum offload, but default to off if not specified */
@@ -1555,10 +1556,6 @@ static int axienet_of_probe(struct platform_device *op)
if ((be32_to_cpup(p)) >= 0x4000)
lp->jumbo_support = 1;
}
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
- NULL);
- if (p)
- lp->temac_type = be32_to_cpup(p);
p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
if (p)
lp->phy_type = be32_to_cpup(p);
@@ -1567,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op)
np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
+ ret = -ENODEV;
goto err_iounmap;
}
lp->dma_regs = of_iomap(np, 0);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 24858799c204..9d4ce388510a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "no IRQ found\n");
+ rc = -ENXIO;
goto error;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 2f48f790c9b4..384ca4f4de4a 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -590,6 +590,7 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
+#define NETVSC_SEND_BUFFER_ID 0
#define NETVSC_PACKET_SIZE 4096
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index dd867e6cabd6..9f49c0129a78 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -161,8 +161,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
/* Deal with the send buffer we may have setup.
* If we got a send section size, it means we received a
- * SendsendBufferComplete msg (ie sent
- * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+ * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
+ * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
* to send a revoke msg here
*/
if (net_device->send_section_size) {
@@ -172,7 +172,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
revoke_packet->hdr.msg_type =
NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
- revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
+ revoke_packet->msg.v1_msg.revoke_send_buf.id =
+ NETVSC_SEND_BUFFER_ID;
ret = vmbus_sendpacket(net_device->dev->channel,
revoke_packet,
@@ -204,7 +205,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
net_device->send_buf_gpadl_handle = 0;
}
if (net_device->send_buf) {
- /* Free up the receive buffer */
+ /* Free up the send buffer */
vfree(net_device->send_buf);
net_device->send_buf = NULL;
}
@@ -339,9 +340,9 @@ static int netvsc_init_buf(struct hv_device *device)
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
- init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
+ init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
net_device->send_buf_gpadl_handle;
- init_packet->msg.v1_msg.send_recv_buf.id = 0;
+ init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
/* Send the gpadl notification request */
ret = vmbus_sendpacket(device->channel, init_packet,
@@ -364,7 +365,7 @@ static int netvsc_init_buf(struct hv_device *device)
netdev_err(ndev, "Unable to complete send buffer "
"initialization with NetVsp - status %d\n",
init_packet->msg.v1_msg.
- send_recv_buf_complete.status);
+ send_send_buf_complete.status);
ret = -EINVAL;
goto cleanup;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c530de1e63f5..3ad8ca76196d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -88,6 +88,7 @@ struct kszphy_priv {
static const struct kszphy_type ksz8021_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
+ .has_broadcast_disable = true,
.has_rmii_ref_clk_sel = true,
};
@@ -258,19 +259,6 @@ static int kszphy_config_init(struct phy_device *phydev)
return 0;
}
-static int ksz8021_config_init(struct phy_device *phydev)
-{
- int rc;
-
- rc = kszphy_config_init(phydev);
- if (rc)
- return rc;
-
- rc = kszphy_broadcast_disable(phydev);
-
- return rc < 0 ? rc : 0;
-}
-
static int ksz9021_load_values_from_of(struct phy_device *phydev,
struct device_node *of_node, u16 reg,
char *field1, char *field2,
@@ -584,7 +572,7 @@ static struct phy_driver ksphy_driver[] = {
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
- .config_init = ksz8021_config_init,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -601,7 +589,7 @@ static struct phy_driver ksphy_driver[] = {
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
- .config_init = ksz8021_config_init,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b8a82b86f909..602dc6668c3a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -56,6 +56,8 @@ struct qmi_wwan_state {
/* default ethernet address used by the modem */
static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
+static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00};
+
/* Make up an ethernet header if the packet doesn't have one.
*
* A firmware bug common among several devices cause them to send raw
@@ -332,10 +334,12 @@ next_desc:
usb_driver_release_interface(driver, info->data);
}
- /* Never use the same address on both ends of the link, even
- * if the buggy firmware told us to.
+ /* Never use the same address on both ends of the link, even if the
+ * buggy firmware told us to. Or, if device is assigned the well-known
+ * buggy firmware MAC address, replace it with a random address,
*/
- if (ether_addr_equal(dev->net->dev_addr, default_modem_addr))
+ if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) ||
+ ether_addr_equal(dev->net->dev_addr, buggy_fw_addr))
eth_hw_addr_random(dev->net);
/* make MAC addr easily distinguishable from an IP header */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2d1c77e81836..57ec23e8ccfa 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1897,6 +1897,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
netif_wake_queue(netdev);
}
+static netdev_features_t
+rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ u32 mss = skb_shinfo(skb)->gso_size;
+ int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
+ int offset = skb_transport_offset(skb);
+
+ if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+ features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+ else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
+ features &= ~NETIF_F_GSO_MASK;
+
+ return features;
+}
+
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -3706,6 +3722,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
.ndo_set_mac_address = rtl8152_set_mac_address,
.ndo_change_mtu = rtl8152_change_mtu,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_features_check = rtl8152_features_check,
};
static void r8152b_get_version(struct r8152 *tp)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b8bd7191572d..5ca97713bfb3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -760,7 +760,6 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
container_of(napi, struct receive_queue, napi);
unsigned int r, received = 0;
-again:
received += virtnet_receive(rq, budget - received);
/* Out of packets? */
@@ -771,7 +770,6 @@ again:
napi_schedule_prep(napi)) {
virtqueue_disable_cb(rq->vq);
__napi_schedule(napi);
- goto again;
}
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 49d9f2291998..7fbd89fbe107 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1579,8 +1579,10 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
skb = udp_tunnel_handle_offloads(skb, udp_sum);
- if (IS_ERR(skb))
- return -EINVAL;
+ if (IS_ERR(skb)) {
+ err = -EINVAL;
+ goto err;
+ }
skb_scrub_packet(skb, xnet);
@@ -1590,12 +1592,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
- return err;
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto err;
+ }
skb = vlan_hwaccel_push_inside(skb);
- if (WARN_ON(!skb))
- return -ENOMEM;
+ if (WARN_ON(!skb)) {
+ err = -ENOMEM;
+ goto err;
+ }
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
@@ -1606,6 +1612,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
ttl, src_port, dst_port);
return 0;
+err:
+ dst_release(dst);
+ return err;
}
#endif
@@ -1621,7 +1630,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
skb = udp_tunnel_handle_offloads(skb, udp_sum);
if (IS_ERR(skb))
- return -EINVAL;
+ return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ VXLAN_HLEN + sizeof(struct iphdr)
@@ -1629,8 +1638,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
+ if (unlikely(err)) {
+ kfree_skb(skb);
return err;
+ }
skb = vlan_hwaccel_push_inside(skb);
if (WARN_ON(!skb))
@@ -1776,9 +1787,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
tos, ttl, df, src_port, dst_port,
htonl(vni << 8),
!net_eq(vxlan->net, dev_net(vxlan->dev)));
-
- if (err < 0)
+ if (err < 0) {
+ /* skb is already freed. */
+ skb = NULL;
goto rt_tx_error;
+ }
+
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
#if IS_ENABLED(CONFIG_IPV6)
} else {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3c06e9365949..9880dae2a569 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -1070,7 +1070,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
*/
if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
- (sdiodev->pdata->oob_irq_supported)))
+ (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)))
bus_if->wowl_supported = true;
#endif
@@ -1167,7 +1167,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(SDIO, "Enter\n");
- if (sdiodev->pdata->oob_irq_supported)
+ if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)
disable_irq_wake(sdiodev->pdata->oob_irq_nr);
brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
atomic_set(&sdiodev->suspend, false);
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index 91c0cb3c368e..21de4fe6cf2d 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -65,7 +65,8 @@ config IPW2100_DEBUG
config IPW2200
tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
- depends on PCI && CFG80211 && CFG80211_WEXT
+ depends on PCI && CFG80211
+ select CFG80211_WEXT
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 38de1513e4de..850b85a47806 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1323,10 +1323,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
try_again:
/* try next, if any */
- kfree(pieces);
release_firmware(ucode_raw);
if (iwl_request_firmware(drv, false))
goto out_unbind;
+ kfree(pieces);
return;
out_free_fw:
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 9564ae173d06..1f7f15eb86da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -310,6 +310,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+#define FH_MEM_TB_MAX_LENGTH (0x00020000)
/* TFDB Area - TFDs buffer table */
#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 31a5b3f4266c..e880f9d4717b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1004,8 +1004,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
- /* disallow low power states when the FW is down */
- iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ /*
+ * Disallow low power states when the FW is down by taking
+ * the UCODE_DOWN ref. in case of ongoing hw restart the
+ * ref is already taken, so don't take it again.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
/* async_handlers_wk is now blocked */
@@ -1023,6 +1028,12 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
+ /*
+ * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
+ * won't be called in this case).
+ */
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
mvm->ucode_loaded = false;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3ee8e3848876..2f0c4b170344 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,7 +367,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 5d79a1f44b8e..523fe0c88dcb 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -614,7 +614,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
{
u8 *v_addr;
dma_addr_t p_addr;
- u32 offset, chunk_sz = section->len;
+ u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
int ret = 0;
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
@@ -1012,16 +1012,21 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans);
- /* Upon stop, the APM issues an interrupt if HW RF kill is set.
- * Clean again the interrupt here
+ /* stop and reset the on-board processor */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ udelay(20);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * This is a bug in certain verions of the hardware.
+ * Certain devices also keep sending HW RF kill interrupt all
+ * the time, unless the interrupt is ACKed even if the interrupt
+ * should be masked. Re-ACK all the interrupts here.
*/
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
- /* stop and reset the on-board processor */
- iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
- udelay(20);
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index efbaf2ae1999..794204e34fba 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -737,6 +737,7 @@ static void connect(struct backend_info *be)
}
queue->remaining_credit = credit_bytes;
+ queue->credit_usec = credit_usec;
err = connect_rings(be, queue);
if (err) {
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index ba74f0aa60c7..3c22dbebc80f 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -89,6 +89,7 @@ struct rockchip_iomux {
* @reg_pull: optional separate register for additional pull settings
* @clk: clock of the gpio bank
* @irq: interrupt of the gpio bank
+ * @saved_enables: Saved content of GPIO_INTEN at suspend time.
* @pin_base: first pin number
* @nr_pins: number of pins in this bank
* @name: name of the bank
@@ -107,6 +108,7 @@ struct rockchip_pin_bank {
struct regmap *regmap_pull;
struct clk *clk;
int irq;
+ u32 saved_enables;
u32 pin_base;
u8 nr_pins;
char *name;
@@ -1543,6 +1545,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static void rockchip_irq_suspend(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN);
+ irq_reg_writel(gc, gc->wake_active, GPIO_INTEN);
+}
+
+static void rockchip_irq_resume(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN);
+}
+
+static void rockchip_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ irq_gc_lock(gc);
+
+ val = irq_reg_readl(gc, GPIO_INTEN);
+ val &= ~d->mask;
+ irq_reg_writel(gc, val, GPIO_INTEN);
+
+ irq_gc_unlock(gc);
+}
+
+static void rockchip_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ irq_gc_lock(gc);
+
+ val = irq_reg_readl(gc, GPIO_INTEN);
+ val |= d->mask;
+ irq_reg_writel(gc, val, GPIO_INTEN);
+
+ irq_gc_unlock(gc);
+}
+
static int rockchip_interrupts_register(struct platform_device *pdev,
struct rockchip_pinctrl *info)
{
@@ -1581,12 +1628,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
gc = irq_get_domain_generic_chip(bank->domain, 0);
gc->reg_base = bank->reg_base;
gc->private = bank;
- gc->chip_types[0].regs.mask = GPIO_INTEN;
+ gc->chip_types[0].regs.mask = GPIO_INTMASK;
gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
- gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
- gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
+ gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+ gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
+ gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
gc->wake_enabled = IRQ_MSK(bank->nr_pins);
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 7c9d51382248..9e5ec00084bb 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin_id)
{
unsigned long config;
- st_pinconf_get(pctldev, pin_id, &config);
+ mutex_unlock(&pctldev->mutex);
+ st_pinconf_get(pctldev, pin_id, &config);
+ mutex_lock(&pctldev->mutex);
seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n"
"\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
"de:%ld,rt-clk:%ld,rt-delay:%ld]",
@@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = {
static struct irq_chip st_gpio_irqchip = {
.name = "GPIO",
+ .irq_disable = st_gpio_irq_mask,
.irq_mask = st_gpio_irq_mask,
.irq_unmask = st_gpio_irq_unmask,
.irq_set_type = st_gpio_irq_set_type,
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index c71443c4f265..97b5e4ee1ca4 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1041,6 +1041,7 @@ static const struct x86_cpu_id rapl_ids[] = {
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
+ RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
{}
};
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index c1444c3d84c2..2809ae0d6bcd 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -570,7 +570,7 @@ static struct regulator_ops s2mps14_reg_ops = {
.enable_mask = S2MPS14_ENABLE_MASK \
}
-#define regulator_desc_s2mps14_buck(num, min, step) { \
+#define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \
.name = "BUCK"#num, \
.id = S2MPS14_BUCK##num, \
.ops = &s2mps14_reg_ops, \
@@ -579,7 +579,7 @@ static struct regulator_ops s2mps14_reg_ops = {
.min_uV = min, \
.uV_step = step, \
.n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
- .linear_min_sel = S2MPS14_BUCK1235_START_SEL, \
+ .linear_min_sel = min_sel, \
.ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
.vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
@@ -613,11 +613,16 @@ static const struct regulator_desc s2mps14_regulators[] = {
regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV),
- regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV),
- regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV,
+ S2MPS14_BUCK4_START_SEL),
+ regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
};
static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 3b73b96619e2..26270c351624 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.16"
+#define DRV_VERSION "1.6.0.17"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2097de42a147..155b286f1a9d 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1892,6 +1892,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
goto fnic_abort_cmd_end;
}
+ /* IO out of order */
+
+ if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Issuing Host reset due to out of order IO\n");
+
+ if (fnic_host_reset(sc) == FAILED) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_host_reset failed.\n");
+ }
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
/*
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 12ca291c1380..cce1cbc1a927 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -734,7 +734,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
* Return target busy if we've received a non-zero retry_delay_timer
* in a FCP_RSP.
*/
- if (time_after(jiffies, fcport->retry_delay_timestamp))
+ if (fcport->retry_delay_timestamp == 0) {
+ /* retry delay not set */
+ } else if (time_after(jiffies, fcport->retry_delay_timestamp))
fcport->retry_delay_timestamp = 0;
else
goto qc24_target_busy;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e42fff6e8c10..8afb01604d51 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1041,7 +1041,7 @@ retry:
}
/* signal not to enter either branch of the if () below */
timeleft = 0;
- rtn = NEEDS_RETRY;
+ rtn = FAILED;
} else {
timeleft = wait_for_completion_timeout(&done, timeout);
rtn = SUCCESS;
@@ -1081,7 +1081,7 @@ retry:
rtn = FAILED;
break;
}
- } else if (!rtn) {
+ } else if (rtn != FAILED) {
scsi_abort_eh_cmnd(scmd);
rtn = FAILED;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fedab3c21ddf..399516925d80 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2623,8 +2623,9 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
sd_config_discard(sdkp, SD_LBP_WS16);
} else { /* LBP VPD page tells us what to use */
-
- if (sdkp->lbpws)
+ if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (sdkp->lbpws)
sd_config_discard(sdkp, SD_LBP_WS16);
else if (sdkp->lbpws10)
sd_config_discard(sdkp, SD_LBP_WS10);
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index b410499cddca..aad6683db81b 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -341,7 +341,7 @@ static int img_spfi_start_dma(struct spi_master *master,
default:
rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
rxconf.src_addr_width = 1;
- rxconf.src_maxburst = 1;
+ rxconf.src_maxburst = 4;
}
dmaengine_slave_config(spfi->rx_ch, &rxconf);
@@ -368,7 +368,7 @@ static int img_spfi_start_dma(struct spi_master *master,
default:
txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
txconf.dst_addr_width = 1;
- txconf.dst_maxburst = 1;
+ txconf.dst_maxburst = 4;
break;
}
dmaengine_slave_config(spfi->tx_ch, &txconf);
@@ -390,14 +390,14 @@ static int img_spfi_start_dma(struct spi_master *master,
dma_async_issue_pending(spfi->rx_ch);
}
+ spfi_start(spfi);
+
if (xfer->tx_buf) {
spfi->tx_dma_busy = true;
dmaengine_submit(txdesc);
dma_async_issue_pending(spfi->tx_ch);
}
- spfi_start(spfi);
-
return 1;
stop_dma:
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 239be7cbe5a8..96a5fc0878d8 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -480,6 +480,8 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
struct device_node *np = spi->master->dev.of_node;
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ pm_runtime_get_sync(&p->pdev->dev);
+
if (!np) {
/*
* Use spi->controller_data for CS (same strategy as spi_gpio),
@@ -498,6 +500,9 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
if (spi->cs_gpio >= 0)
gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+
+ pm_runtime_put_sync(&p->pdev->dev);
+
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 1bf891bd321a..4f361b77c749 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -264,7 +264,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
inode->i_sb->s_root != NULL &&
- is_root_inode(inode))
+ !is_root_inode(inode))
ll_invalidate_aliases(inode);
iput(inode);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index ad09e51ffae4..f65f0d109fc8 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -4,6 +4,8 @@
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*
+ * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
+ *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,6 +30,20 @@
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
+/*
+ * Cooling state <-> CPUFreq frequency
+ *
+ * Cooling states are translated to frequencies throughout this driver and this
+ * is the relation between them.
+ *
+ * Highest cooling state corresponds to lowest possible frequency.
+ *
+ * i.e.
+ * level 0 --> 1st Max Freq
+ * level 1 --> 2nd Max Freq
+ * ...
+ */
+
/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
@@ -38,25 +54,27 @@
* cooling devices.
* @cpufreq_val: integer value representing the absolute value of the clipped
* frequency.
+ * @max_level: maximum cooling level. One less than total number of valid
+ * cpufreq frequencies.
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
+ * @node: list_head to link all cpufreq_cooling_device together.
*
- * This structure is required for keeping information of each
- * cpufreq_cooling_device registered. In order to prevent corruption of this a
- * mutex lock cooling_cpufreq_lock is used.
+ * This structure is required for keeping information of each registered
+ * cpufreq_cooling_device.
*/
struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
unsigned int cpufreq_val;
+ unsigned int max_level;
+ unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
struct list_head node;
};
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
-static unsigned int cpufreq_dev_count;
-
static LIST_HEAD(cpufreq_dev_list);
/**
@@ -98,120 +116,30 @@ static void release_idr(struct idr *idr, int id)
/* Below code defines functions to be used for cpufreq as cooling device */
/**
- * is_cpufreq_valid - function to check frequency transitioning capability.
- * @cpu: cpu for which check is needed.
+ * get_level: Find the level for a particular frequency
+ * @cpufreq_dev: cpufreq_dev for which the property is required
+ * @freq: Frequency
*
- * This function will check the current state of the system if
- * it is capable of changing the frequency for a given @cpu.
- *
- * Return: 0 if the system is not currently capable of changing
- * the frequency of given cpu. !0 in case the frequency is changeable.
+ * Return: level on success, THERMAL_CSTATE_INVALID on error.
*/
-static int is_cpufreq_valid(int cpu)
+static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
+ unsigned int freq)
{
- struct cpufreq_policy policy;
-
- return !cpufreq_get_policy(&policy, cpu);
-}
-
-enum cpufreq_cooling_property {
- GET_LEVEL,
- GET_FREQ,
- GET_MAXL,
-};
-
-/**
- * get_property - fetch a property of interest for a give cpu.
- * @cpu: cpu for which the property is required
- * @input: query parameter
- * @output: query return
- * @property: type of query (frequency, level, max level)
- *
- * This is the common function to
- * 1. get maximum cpu cooling states
- * 2. translate frequency to cooling state
- * 3. translate cooling state to frequency
- * Note that the code may be not in good shape
- * but it is written in this way in order to:
- * a) reduce duplicate code as most of the code can be shared.
- * b) make sure the logic is consistent when translating between
- * cooling states and frequencies.
- *
- * Return: 0 on success, -EINVAL when invalid parameters are passed.
- */
-static int get_property(unsigned int cpu, unsigned long input,
- unsigned int *output,
- enum cpufreq_cooling_property property)
-{
- int i;
- unsigned long max_level = 0, level = 0;
- unsigned int freq = CPUFREQ_ENTRY_INVALID;
- int descend = -1;
- struct cpufreq_frequency_table *pos, *table =
- cpufreq_frequency_get_table(cpu);
-
- if (!output)
- return -EINVAL;
-
- if (!table)
- return -EINVAL;
-
- cpufreq_for_each_valid_entry(pos, table) {
- /* ignore duplicate entry */
- if (freq == pos->frequency)
- continue;
-
- /* get the frequency order */
- if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
- descend = freq > pos->frequency;
-
- freq = pos->frequency;
- max_level++;
- }
-
- /* No valid cpu frequency entry */
- if (max_level == 0)
- return -EINVAL;
+ unsigned long level;
- /* max_level is an index, not a counter */
- max_level--;
+ for (level = 0; level <= cpufreq_dev->max_level; level++) {
+ if (freq == cpufreq_dev->freq_table[level])
+ return level;
- /* get max level */
- if (property == GET_MAXL) {
- *output = (unsigned int)max_level;
- return 0;
+ if (freq > cpufreq_dev->freq_table[level])
+ break;
}
- if (property == GET_FREQ)
- level = descend ? input : (max_level - input);
-
- i = 0;
- cpufreq_for_each_valid_entry(pos, table) {
- /* ignore duplicate entry */
- if (freq == pos->frequency)
- continue;
-
- /* now we have a valid frequency entry */
- freq = pos->frequency;
-
- if (property == GET_LEVEL && (unsigned int)input == freq) {
- /* get level by frequency */
- *output = descend ? i : (max_level - i);
- return 0;
- }
- if (property == GET_FREQ && level == i) {
- /* get frequency by level */
- *output = freq;
- return 0;
- }
- i++;
- }
-
- return -EINVAL;
+ return THERMAL_CSTATE_INVALID;
}
/**
- * cpufreq_cooling_get_level - for a give cpu, return the cooling level.
+ * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
* @cpu: cpu for which the level is required
* @freq: the frequency of interest
*
@@ -223,77 +151,21 @@ static int get_property(unsigned int cpu, unsigned long input,
*/
unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
- unsigned int val;
-
- if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
- return THERMAL_CSTATE_INVALID;
-
- return (unsigned long)val;
-}
-EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
-
-/**
- * get_cpu_frequency - get the absolute value of frequency from level.
- * @cpu: cpu for which frequency is fetched.
- * @level: cooling level
- *
- * This function matches cooling level with frequency. Based on a cooling level
- * of frequency, equals cooling state of cpu cooling device, it will return
- * the corresponding frequency.
- * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
- *
- * Return: 0 on error, the corresponding frequency otherwise.
- */
-static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
-{
- int ret = 0;
- unsigned int freq;
-
- ret = get_property(cpu, level, &freq, GET_FREQ);
- if (ret)
- return 0;
-
- return freq;
-}
-
-/**
- * cpufreq_apply_cooling - function to apply frequency clipping.
- * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
- * clipping data.
- * @cooling_state: value of the cooling state.
- *
- * Function used to make sure the cpufreq layer is aware of current thermal
- * limits. The limits are applied by updating the cpufreq policy.
- *
- * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
- * cooling state).
- */
-static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
- unsigned long cooling_state)
-{
- unsigned int cpuid, clip_freq;
- struct cpumask *mask = &cpufreq_device->allowed_cpus;
- unsigned int cpu = cpumask_any(mask);
-
-
- /* Check if the old cooling action is same as new cooling action */
- if (cpufreq_device->cpufreq_state == cooling_state)
- return 0;
-
- clip_freq = get_cpu_frequency(cpu, cooling_state);
- if (!clip_freq)
- return -EINVAL;
-
- cpufreq_device->cpufreq_state = cooling_state;
- cpufreq_device->cpufreq_val = clip_freq;
+ struct cpufreq_cooling_device *cpufreq_dev;
- for_each_cpu(cpuid, mask) {
- if (is_cpufreq_valid(cpuid))
- cpufreq_update_policy(cpuid);
+ mutex_lock(&cooling_cpufreq_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
+ mutex_unlock(&cooling_cpufreq_lock);
+ return get_level(cpufreq_dev, freq);
+ }
}
+ mutex_unlock(&cooling_cpufreq_lock);
- return 0;
+ pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
+ return THERMAL_CSTATE_INVALID;
}
+EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
/**
* cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
@@ -323,11 +195,6 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
&cpufreq_dev->allowed_cpus))
continue;
- if (!cpufreq_dev->cpufreq_val)
- cpufreq_dev->cpufreq_val = get_cpu_frequency(
- cpumask_any(&cpufreq_dev->allowed_cpus),
- cpufreq_dev->cpufreq_state);
-
max_freq = cpufreq_dev->cpufreq_val;
if (policy->max != max_freq)
@@ -354,19 +221,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
- struct cpumask *mask = &cpufreq_device->allowed_cpus;
- unsigned int cpu;
- unsigned int count = 0;
- int ret;
-
- cpu = cpumask_any(mask);
-
- ret = get_property(cpu, 0, &count, GET_MAXL);
- if (count > 0)
- *state = count;
-
- return ret;
+ *state = cpufreq_device->max_level;
+ return 0;
}
/**
@@ -403,8 +260,24 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
+ unsigned int clip_freq;
+
+ /* Request state should be less than max_level */
+ if (WARN_ON(state > cpufreq_device->max_level))
+ return -EINVAL;
+
+ /* Check if the old cooling action is same as new cooling action */
+ if (cpufreq_device->cpufreq_state == state)
+ return 0;
- return cpufreq_apply_cooling(cpufreq_device, state);
+ clip_freq = cpufreq_device->freq_table[state];
+ cpufreq_device->cpufreq_state = state;
+ cpufreq_device->cpufreq_val = clip_freq;
+
+ cpufreq_update_policy(cpu);
+
+ return 0;
}
/* Bind cpufreq callbacks to thermal cooling device ops */
@@ -419,10 +292,25 @@ static struct notifier_block thermal_cpufreq_notifier_block = {
.notifier_call = cpufreq_thermal_notifier,
};
+static unsigned int find_next_max(struct cpufreq_frequency_table *table,
+ unsigned int prev_max)
+{
+ struct cpufreq_frequency_table *pos;
+ unsigned int max = 0;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ if (pos->frequency > max && pos->frequency < prev_max)
+ max = pos->frequency;
+ }
+
+ return max;
+}
+
/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ * Normally this should be same as cpufreq policy->related_cpus.
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -437,37 +325,42 @@ __cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
struct thermal_cooling_device *cool_dev;
- struct cpufreq_cooling_device *cpufreq_dev = NULL;
- unsigned int min = 0, max = 0;
+ struct cpufreq_cooling_device *cpufreq_dev;
char dev_name[THERMAL_NAME_LENGTH];
- int ret = 0, i;
- struct cpufreq_policy policy;
+ struct cpufreq_frequency_table *pos, *table;
+ unsigned int freq, i;
+ int ret;
- /* Verify that all the clip cpus have same freq_min, freq_max limit */
- for_each_cpu(i, clip_cpus) {
- /* continue if cpufreq policy not found and not return error */
- if (!cpufreq_get_policy(&policy, i))
- continue;
- if (min == 0 && max == 0) {
- min = policy.cpuinfo.min_freq;
- max = policy.cpuinfo.max_freq;
- } else {
- if (min != policy.cpuinfo.min_freq ||
- max != policy.cpuinfo.max_freq)
- return ERR_PTR(-EINVAL);
- }
+ table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
+ if (!table) {
+ pr_debug("%s: CPUFreq table not found\n", __func__);
+ return ERR_PTR(-EPROBE_DEFER);
}
- cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
- GFP_KERNEL);
+
+ cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
if (!cpufreq_dev)
return ERR_PTR(-ENOMEM);
+ /* Find max levels */
+ cpufreq_for_each_valid_entry(pos, table)
+ cpufreq_dev->max_level++;
+
+ cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
+ cpufreq_dev->max_level, GFP_KERNEL);
+ if (!cpufreq_dev->freq_table) {
+ cool_dev = ERR_PTR(-ENOMEM);
+ goto free_cdev;
+ }
+
+ /* max_level is an index, not a counter */
+ cpufreq_dev->max_level--;
+
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
if (ret) {
- kfree(cpufreq_dev);
- return ERR_PTR(-EINVAL);
+ cool_dev = ERR_PTR(ret);
+ goto free_table;
}
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -475,25 +368,44 @@ __cpufreq_cooling_register(struct device_node *np,
cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
&cpufreq_cooling_ops);
- if (IS_ERR(cool_dev)) {
- release_idr(&cpufreq_idr, cpufreq_dev->id);
- kfree(cpufreq_dev);
- return cool_dev;
+ if (IS_ERR(cool_dev))
+ goto remove_idr;
+
+ /* Fill freq-table in descending order of frequencies */
+ for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
+ freq = find_next_max(table, freq);
+ cpufreq_dev->freq_table[i] = freq;
+
+ /* Warn for duplicate entries */
+ if (!freq)
+ pr_warn("%s: table has duplicate entries\n", __func__);
+ else
+ pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
+
+ cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
- cpufreq_dev->cpufreq_state = 0;
+
mutex_lock(&cooling_cpufreq_lock);
/* Register the notifier for first cpufreq cooling device */
- if (cpufreq_dev_count == 0)
+ if (list_empty(&cpufreq_dev_list))
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- cpufreq_dev_count++;
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
+
+remove_idr:
+ release_idr(&cpufreq_idr, cpufreq_dev->id);
+free_table:
+ kfree(cpufreq_dev->freq_table);
+free_cdev:
+ kfree(cpufreq_dev);
+
+ return cool_dev;
}
/**
@@ -554,16 +466,16 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
cpufreq_dev = cdev->devdata;
mutex_lock(&cooling_cpufreq_lock);
list_del(&cpufreq_dev->node);
- cpufreq_dev_count--;
/* Unregister the notifier for the last cpufreq cooling device */
- if (cpufreq_dev_count == 0)
+ if (list_empty(&cpufreq_dev_list))
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
release_idr(&cpufreq_idr, cpufreq_dev->id);
+ kfree(cpufreq_dev->freq_table);
kfree(cpufreq_dev);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 000d53e934a0..607b62c7e611 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -18,7 +18,6 @@
*/
#include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -28,18 +27,17 @@
static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
- struct cpumask mask_val;
-
- /* make sure cpufreq driver has been initialized */
- if (!cpufreq_frequency_get_table(0))
- return -EPROBE_DEFER;
-
- cpumask_set_cpu(0, &mask_val);
- cdev = cpufreq_cooling_register(&mask_val);
+ cdev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(cdev)) {
- dev_err(&pdev->dev, "Failed to register cooling device\n");
- return PTR_ERR(cdev);
+ int ret = PTR_ERR(cdev);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to register cooling device %d\n",
+ ret);
+
+ return ret;
}
platform_set_drvdata(pdev, cdev);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 88b32f942dcf..c1188ac053c9 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -454,15 +453,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
const struct of_device_id *of_id =
of_match_device(of_imx_thermal_match, &pdev->dev);
struct imx_thermal_data *data;
- struct cpumask clip_cpus;
struct regmap *map;
int measure_freq;
int ret;
- if (!cpufreq_get_current_driver()) {
- dev_dbg(&pdev->dev, "no cpufreq driver!");
- return -EPROBE_DEFER;
- }
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -516,12 +510,13 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
- cpumask_set_cpu(0, &clip_cpus);
- data->cdev = cpufreq_cooling_register(&clip_cpus);
+ data->cdev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(data->cdev)) {
ret = PTR_ERR(data->cdev);
- dev_err(&pdev->dev,
- "failed to register cpufreq cooling device: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to register cpufreq cooling device: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index ffe40bffaf1a..d4413698a85f 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
+obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
index e4e61b3fb11e..231cabc16e16 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -82,7 +82,7 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
if (!acpi_has_method(handle, "_TRT"))
- return 0;
+ return -ENODEV;
status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -167,7 +167,7 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
if (!acpi_has_method(handle, "_ART"))
- return 0;
+ return -ENODEV;
status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -321,8 +321,8 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
unsigned long length = 0;
int count = 0;
char __user *arg = (void __user *)__arg;
- struct trt *trts;
- struct art *arts;
+ struct trt *trts = NULL;
+ struct art *arts = NULL;
switch (cmd) {
case ACPI_THERMAL_GET_TRT_COUNT:
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index dcb306ea14a4..65a98a97df07 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -335,7 +335,6 @@ static struct platform_driver int3400_thermal_driver = {
.remove = int3400_thermal_remove,
.driver = {
.name = "int3400 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(int3400_thermal_match),
},
};
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
index a5d08c14ba24..c5cbc3af3a05 100644
--- a/drivers/thermal/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -231,7 +231,6 @@ static struct platform_driver int3402_thermal_driver = {
.remove = int3402_thermal_remove,
.driver = {
.name = "int3402 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = int3402_thermal_match,
},
};
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 1bfa6a69e77a..0faf500d8a77 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -301,6 +301,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv)
{
struct int3403_sensor *obj = priv->priv;
+ acpi_remove_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, int3403_notify);
thermal_zone_device_unregister(obj->tzone);
return 0;
}
@@ -369,6 +371,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
p = buf.pointer;
if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
printk(KERN_WARNING "Invalid PPSS data\n");
+ kfree(buf.pointer);
return -EFAULT;
}
@@ -381,6 +384,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
priv->priv = obj;
+ kfree(buf.pointer);
/* TODO: add ACPI notification support */
return result;
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
new file mode 100644
index 000000000000..31bb553aac26
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -0,0 +1,309 @@
+/*
+ * processor_thermal_device.c
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+
+/* Broadwell-U/HSB thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
+#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03
+
+/* Braswell thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
+
+struct power_config {
+ u32 index;
+ u32 min_uw;
+ u32 max_uw;
+ u32 tmin_us;
+ u32 tmax_us;
+ u32 step_uw;
+};
+
+struct proc_thermal_device {
+ struct device *dev;
+ struct acpi_device *adev;
+ struct power_config power_limits[2];
+};
+
+enum proc_thermal_emum_mode_type {
+ PROC_THERMAL_NONE,
+ PROC_THERMAL_PCI,
+ PROC_THERMAL_PLATFORM_DEV
+};
+
+/*
+ * We can have only one type of enumeration, PCI or Platform,
+ * not both. So we don't need instance specific data.
+ */
+static enum proc_thermal_emum_mode_type proc_thermal_emum_mode =
+ PROC_THERMAL_NONE;
+
+#define POWER_LIMIT_SHOW(index, suffix) \
+static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct pci_dev *pci_dev; \
+ struct platform_device *pdev; \
+ struct proc_thermal_device *proc_dev; \
+\
+ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
+ pdev = to_platform_device(dev); \
+ proc_dev = platform_get_drvdata(pdev); \
+ } else { \
+ pci_dev = to_pci_dev(dev); \
+ proc_dev = pci_get_drvdata(pci_dev); \
+ } \
+ return sprintf(buf, "%lu\n",\
+ (unsigned long)proc_dev->power_limits[index].suffix * 1000); \
+}
+
+POWER_LIMIT_SHOW(0, min_uw)
+POWER_LIMIT_SHOW(0, max_uw)
+POWER_LIMIT_SHOW(0, step_uw)
+POWER_LIMIT_SHOW(0, tmin_us)
+POWER_LIMIT_SHOW(0, tmax_us)
+
+POWER_LIMIT_SHOW(1, min_uw)
+POWER_LIMIT_SHOW(1, max_uw)
+POWER_LIMIT_SHOW(1, step_uw)
+POWER_LIMIT_SHOW(1, tmin_us)
+POWER_LIMIT_SHOW(1, tmax_us)
+
+static DEVICE_ATTR_RO(power_limit_0_min_uw);
+static DEVICE_ATTR_RO(power_limit_0_max_uw);
+static DEVICE_ATTR_RO(power_limit_0_step_uw);
+static DEVICE_ATTR_RO(power_limit_0_tmin_us);
+static DEVICE_ATTR_RO(power_limit_0_tmax_us);
+
+static DEVICE_ATTR_RO(power_limit_1_min_uw);
+static DEVICE_ATTR_RO(power_limit_1_max_uw);
+static DEVICE_ATTR_RO(power_limit_1_step_uw);
+static DEVICE_ATTR_RO(power_limit_1_tmin_us);
+static DEVICE_ATTR_RO(power_limit_1_tmax_us);
+
+static struct attribute *power_limit_attrs[] = {
+ &dev_attr_power_limit_0_min_uw.attr,
+ &dev_attr_power_limit_1_min_uw.attr,
+ &dev_attr_power_limit_0_max_uw.attr,
+ &dev_attr_power_limit_1_max_uw.attr,
+ &dev_attr_power_limit_0_step_uw.attr,
+ &dev_attr_power_limit_1_step_uw.attr,
+ &dev_attr_power_limit_0_tmin_us.attr,
+ &dev_attr_power_limit_1_tmin_us.attr,
+ &dev_attr_power_limit_0_tmax_us.attr,
+ &dev_attr_power_limit_1_tmax_us.attr,
+ NULL
+};
+
+static struct attribute_group power_limit_attribute_group = {
+ .attrs = power_limit_attrs,
+ .name = "power_limits"
+};
+
+static int proc_thermal_add(struct device *dev,
+ struct proc_thermal_device **priv)
+{
+ struct proc_thermal_device *proc_priv;
+ struct acpi_device *adev;
+ acpi_status status;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *elements, *ppcc;
+ union acpi_object *p;
+ int i;
+ int ret;
+
+ adev = ACPI_COMPANION(dev);
+
+ status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ p = buf.pointer;
+ if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+ dev_err(dev, "Invalid PPCC data\n");
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+ if (!p->package.count) {
+ dev_err(dev, "Invalid PPCC package size\n");
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+
+ proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
+ if (!proc_priv) {
+ ret = -ENOMEM;
+ goto free_buffer;
+ }
+
+ proc_priv->dev = dev;
+ proc_priv->adev = adev;
+
+ for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
+ elements = &(p->package.elements[i+1]);
+ if (elements->type != ACPI_TYPE_PACKAGE ||
+ elements->package.count != 6) {
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+ ppcc = elements->package.elements;
+ proc_priv->power_limits[i].index = ppcc[0].integer.value;
+ proc_priv->power_limits[i].min_uw = ppcc[1].integer.value;
+ proc_priv->power_limits[i].max_uw = ppcc[2].integer.value;
+ proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value;
+ proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value;
+ proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
+ }
+
+ *priv = proc_priv;
+
+ ret = sysfs_create_group(&dev->kobj,
+ &power_limit_attribute_group);
+
+free_buffer:
+ kfree(buf.pointer);
+
+ return ret;
+}
+
+void proc_thermal_remove(struct proc_thermal_device *proc_priv)
+{
+ sysfs_remove_group(&proc_priv->dev->kobj,
+ &power_limit_attribute_group);
+}
+
+static int int3401_add(struct platform_device *pdev)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ if (proc_thermal_emum_mode == PROC_THERMAL_PCI) {
+ dev_err(&pdev->dev, "error: enumerated as PCI dev\n");
+ return -ENODEV;
+ }
+
+ ret = proc_thermal_add(&pdev->dev, &proc_priv);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, proc_priv);
+ proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
+
+ return 0;
+}
+
+static int int3401_remove(struct platform_device *pdev)
+{
+ proc_thermal_remove(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static int proc_thermal_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) {
+ dev_err(&pdev->dev, "error: enumerated as platform dev\n");
+ return -ENODEV;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ ret = proc_thermal_add(&pdev->dev, &proc_priv);
+ if (ret) {
+ pci_disable_device(pdev);
+ return ret;
+ }
+
+ pci_set_drvdata(pdev, proc_priv);
+ proc_thermal_emum_mode = PROC_THERMAL_PCI;
+
+ return 0;
+}
+
+static void proc_thermal_pci_remove(struct pci_dev *pdev)
+{
+ proc_thermal_remove(pci_get_drvdata(pdev));
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id proc_thermal_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
+
+static struct pci_driver proc_thermal_pci_driver = {
+ .name = "proc_thermal",
+ .probe = proc_thermal_pci_probe,
+ .remove = proc_thermal_pci_remove,
+ .id_table = proc_thermal_pci_ids,
+};
+
+static const struct acpi_device_id int3401_device_ids[] = {
+ {"INT3401", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3401_device_ids);
+
+static struct platform_driver int3401_driver = {
+ .probe = int3401_add,
+ .remove = int3401_remove,
+ .driver = {
+ .name = "int3401 thermal",
+ .acpi_match_table = int3401_device_ids,
+ },
+};
+
+static int __init proc_thermal_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&int3401_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&proc_thermal_pci_driver);
+
+ return ret;
+}
+
+static void __exit proc_thermal_exit(void)
+{
+ platform_driver_unregister(&int3401_driver);
+ pci_unregister_driver(&proc_thermal_pci_driver);
+}
+
+module_init(proc_thermal_init);
+module_exit(proc_thermal_exit);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index e98b4249187c..6ceebd659dd4 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x45},
{ X86_VENDOR_INTEL, 6, 0x46},
{ X86_VENDOR_INTEL, 6, 0x4c},
+ { X86_VENDOR_INTEL, 6, 0x56},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 1bcddfc60e91..9c6ce548e363 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -677,7 +677,6 @@ static SIMPLE_DEV_PM_OPS(rockchip_thermal_pm_ops,
static struct platform_driver rockchip_thermal_driver = {
.driver = {
.name = "rockchip-thermal",
- .owner = THIS_MODULE,
.pm = &rockchip_thermal_pm_ops,
.of_match_table = of_rockchip_thermal_match,
},
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index f760389a204c..c43306ecc0ab 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
config EXYNOS_THERMAL
tristate "Exynos thermal management unit driver"
- depends on ARCH_HAS_BANDGAP && OF
+ depends on OF
help
If you say yes here you get support for the TMU (Thermal Management
Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index b6be572704a4..6dc3815cc73f 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -347,7 +347,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
{
int ret;
- struct cpumask mask_val;
struct exynos_thermal_zone *th_zone;
if (!sensor_conf || !sensor_conf->read_temperature) {
@@ -367,13 +366,14 @@ int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
* sensor
*/
if (sensor_conf->cooling_data.freq_clip_count > 0) {
- cpumask_set_cpu(0, &mask_val);
th_zone->cool_dev[th_zone->cool_dev_size] =
- cpufreq_cooling_register(&mask_val);
+ cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
- dev_err(sensor_conf->dev,
- "Failed to register cpufreq cooling device\n");
- ret = -EINVAL;
+ ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(sensor_conf->dev,
+ "Failed to register cpufreq cooling device: %d\n",
+ ret);
goto err_unregister;
}
th_zone->cool_dev_size++;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index d44d91d681d4..d2f1e62a4232 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -927,7 +927,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
/* Register the sensor with thermal management interface */
ret = exynos_register_thermal(sensor_conf);
if (ret) {
- dev_err(&pdev->dev, "Failed to register thermal interface\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to register thermal interface: %d\n",
+ ret);
goto err_clk;
}
data->reg_conf = sensor_conf;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 84fdf0792e27..87e0b0782023 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -930,7 +930,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
struct thermal_zone_device *pos1;
struct thermal_cooling_device *pos2;
unsigned long max_state;
- int result;
+ int result, ret;
if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
return -EINVAL;
@@ -947,7 +947,9 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (tz != pos1 || cdev != pos2)
return -EINVAL;
- cdev->ops->get_max_state(cdev, &max_state);
+ ret = cdev->ops->get_max_state(cdev, &max_state);
+ if (ret)
+ return ret;
/* lower default 0, upper default max_state */
lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 5fd03865e396..3fb054a10f6a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/thermal.h>
-#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
#include <linux/of.h>
@@ -407,17 +406,17 @@ int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
if (!data)
return -EINVAL;
- if (!cpufreq_get_current_driver()) {
- dev_dbg(bgp->dev, "no cpufreq driver yet\n");
- return -EPROBE_DEFER;
- }
-
/* Register cooling device */
data->cool_dev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(data->cool_dev)) {
- dev_err(bgp->dev,
- "Failed to register cpufreq cooling device\n");
- return PTR_ERR(data->cool_dev);
+ int ret = PTR_ERR(data->cool_dev);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(bgp->dev,
+ "Failed to register cpu cooling device %d\n",
+ ret);
+
+ return ret;
}
ti_bandgap_set_sensor_data(bgp, id, data);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 255201f22126..7cc0122a18ce 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -840,13 +840,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- u8 type;
struct vfio_pci_device *vdev;
struct iommu_group *group;
int ret;
- pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
- if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
+ if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
group = iommu_group_get(&pdev->dev);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 14419a8ccbb6..d415d69dc237 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -538,7 +538,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
++headcount;
seg += in;
}
- heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen);
+ heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ed71b5347a76..cb807d0ea498 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -713,9 +713,13 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EFAULT;
break;
}
- if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
- (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
- (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
+
+ /* Make sure it's safe to cast pointers to vring types. */
+ BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
+ BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
+ if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
+ (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
+ (a.log_guest_addr & (sizeof(u64) - 1))) {
r = -EINVAL;
break;
}
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 900aa4ecd617..d6cab1fd9a47 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -83,9 +83,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
cancel_delayed_work_sync(&info->deferred_work);
/* Run it immediately */
- err = schedule_delayed_work(&info->deferred_work, 0);
+ schedule_delayed_work(&info->deferred_work, 0);
mutex_unlock(&inode->i_mutex);
- return err;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index 87accdb59c81..ac83ef5cfd7d 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
@@ -132,7 +132,6 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
.mX_max = 127,
.fint_min = 500000,
.fint_max = 2500000,
- .clkdco_max = 1800000000,
.clkdco_min = 500000000,
.clkdco_low = 1000000000,
@@ -156,7 +155,6 @@ static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
.mX_max = 127,
.fint_min = 620000,
.fint_max = 2500000,
- .clkdco_max = 1800000000,
.clkdco_min = 750000000,
.clkdco_low = 1500000000,
diff --git a/drivers/video/fbdev/omap2/dss/pll.c b/drivers/video/fbdev/omap2/dss/pll.c
index 50bc62c5d367..335ffac224b9 100644
--- a/drivers/video/fbdev/omap2/dss/pll.c
+++ b/drivers/video/fbdev/omap2/dss/pll.c
@@ -97,7 +97,8 @@ int dss_pll_enable(struct dss_pll *pll)
return 0;
err_enable:
- regulator_disable(pll->regulator);
+ if (pll->regulator)
+ regulator_disable(pll->regulator);
err_reg:
clk_disable_unprepare(pll->clkin);
return r;
diff --git a/drivers/video/fbdev/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c
index d51a983075bc..5c2ccab5a958 100644
--- a/drivers/video/fbdev/omap2/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/dss/sdi.c
@@ -342,6 +342,8 @@ static void sdi_init_output(struct platform_device *pdev)
out->output_type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
+ /* We have SDI only on OMAP3, where it's on port 1 */
+ out->port_num = 1;
out->ops.sdi = &sdi_ops;
out->owner = THIS_MODULE;
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 940cd196eef5..10fbfd8ab963 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -21,6 +21,21 @@ static bool nologo;
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo");
+/*
+ * Logos are located in the initdata, and will be freed in kernel_init.
+ * Use late_init to mark the logos as freed to prevent any further use.
+ */
+
+static bool logos_freed;
+
+static int __init fb_logo_late_init(void)
+{
+ logos_freed = true;
+ return 0;
+}
+
+late_initcall(fb_logo_late_init);
+
/* logo's are marked __initdata. Use __init_refok to tell
* modpost that it is intended that this function uses data
* marked __initdata.
@@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
{
const struct linux_logo *logo = NULL;
- if (nologo)
+ if (nologo || logos_freed)
return NULL;
if (depth >= 1) {
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 2ef9529809d8..9756f21b809e 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_free_vectors(vdev);
kfree(vp_dev->vqs);
+ vp_dev->vqs = NULL;
}
static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
@@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
return 0;
}
-void virtio_pci_release_dev(struct device *_d)
-{
- /*
- * No need for a release method as we allocate/free
- * all devices together with the pci devices.
- * Provide an empty one to avoid getting a warning from core.
- */
-}
-
#ifdef CONFIG_PM_SLEEP
static int virtio_pci_freeze(struct device *dev)
{
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index adddb647b21d..5a497289b7e9 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev);
* - ignore the affinity request if we're using INTX
*/
int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
-void virtio_pci_release_dev(struct device *);
int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 6c76f0f5658c..a5486e65e04b 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.set_vq_affinity = vp_set_vq_affinity,
};
+static void virtio_pci_release_dev(struct device *_d)
+{
+ struct virtio_device *vdev = dev_to_virtio(_d);
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ /* As struct device is a kobject, it's not safe to
+ * free the memory (including the reference counter itself)
+ * until it's release callback. */
+ kfree(vp_dev);
+}
+
/* the PCI probing function */
int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
@@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev)
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
- kfree(vp_dev);
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 2d3e32ebfd15..8729cf68d2fe 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
{
int ret;
int type;
- struct btrfs_tree_block_info *info;
struct btrfs_extent_inline_ref *eiref;
if (*ptr == (unsigned long)-1)
@@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
}
/* we can treat both ref types equally here */
- info = (struct btrfs_tree_block_info *)(ei + 1);
*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
- *out_level = btrfs_tree_block_level(eb, info);
+
+ if (key->type == BTRFS_EXTENT_ITEM_KEY) {
+ struct btrfs_tree_block_info *info;
+
+ info = (struct btrfs_tree_block_info *)(ei + 1);
+ *out_level = btrfs_tree_block_level(eb, info);
+ } else {
+ ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
+ *out_level = (u8)key->offset;
+ }
if (ret == 1)
*ptr = (unsigned long)-1;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 054577bddaf2..de4e70fb3cbb 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
{
struct btrfs_delayed_node *delayed_node;
+ /*
+ * we don't do delayed inode updates during log recovery because it
+ * leads to enospc problems. This means we also can't do
+ * delayed inode refs
+ */
+ if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+ return -EAGAIN;
+
delayed_node = btrfs_get_or_create_delayed_node(inode);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a80b97100d90..15116585e714 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
- if (ret < 0)
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
goto fail;
- BUG_ON(ret); /* Corruption */
+ }
leaf = path->nodes[0];
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
fail:
- if (ret) {
+ if (ret)
btrfs_abort_transaction(trans, root, ret);
- return ret;
- }
- return 0;
+ return ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e687bb0dc73a..8bf326affb94 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
out_fail:
btrfs_end_transaction(trans, root);
- if (drop_on_err)
+ if (drop_on_err) {
+ inode_dec_link_count(inode);
iput(inode);
+ }
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
return err;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f2bb13a23f86..9e1569ffbf6e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
flags, gen, mirror_num,
have_csum ? csum : NULL);
-skip:
if (ret)
return ret;
+skip:
len -= l;
logical += l;
physical += l;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index f5013d92a7e6..c81c0e004588 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
}
}
- dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n",
+ dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
inode, ceph_vinop(inode), len, locked_page);
if (len > 0) {
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6e139111fdb2..22b289a3b1c4 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -661,16 +661,16 @@ set_credits(struct TCP_Server_Info *server, const int val)
server->ops->set_credits(server, val);
}
-static inline __u64
+static inline __le64
get_next_mid64(struct TCP_Server_Info *server)
{
- return server->ops->get_next_mid(server);
+ return cpu_to_le64(server->ops->get_next_mid(server));
}
static inline __le16
get_next_mid(struct TCP_Server_Info *server)
{
- __u16 mid = get_next_mid64(server);
+ __u16 mid = server->ops->get_next_mid(server);
/*
* The value in the SMB header should be little endian for easy
* on-the-wire decoding.
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index b333ff60781d..abae6dd2c6b9 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -926,6 +926,7 @@ cifs_NTtimeToUnix(__le64 ntutc)
/* Subtract the NTFS time offset, then convert to 1s intervals. */
s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+ u64 abs_t;
/*
* Unfortunately can not use normal 64 bit division on 32 bit arch, but
@@ -933,13 +934,14 @@ cifs_NTtimeToUnix(__le64 ntutc)
* to special case them
*/
if (t < 0) {
- t = -t;
- ts.tv_nsec = (long)(do_div(t, 10000000) * 100);
+ abs_t = -t;
+ ts.tv_nsec = (long)(do_div(abs_t, 10000000) * 100);
ts.tv_nsec = -ts.tv_nsec;
- ts.tv_sec = -t;
+ ts.tv_sec = -abs_t;
} else {
- ts.tv_nsec = (long)do_div(t, 10000000) * 100;
- ts.tv_sec = t;
+ abs_t = t;
+ ts.tv_nsec = (long)do_div(abs_t, 10000000) * 100;
+ ts.tv_sec = abs_t;
}
return ts;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 8eaf20a80649..c295338e0a98 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -69,7 +69,8 @@ static inline void dump_cifs_file_struct(struct file *file, char *label)
* Attempt to preload the dcache with the results from the FIND_FIRST/NEXT
*
* Find the dentry that matches "name". If there isn't one, create one. If it's
- * a negative dentry or the uniqueid changed, then drop it and recreate it.
+ * a negative dentry or the uniqueid or filetype(mode) changed,
+ * then drop it and recreate it.
*/
static void
cifs_prime_dcache(struct dentry *parent, struct qstr *name,
@@ -97,8 +98,11 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
- /* update inode in place if i_ino didn't change */
- if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+ /* update inode in place
+ * if both i_ino and i_mode didn't change */
+ if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid &&
+ (inode->i_mode & S_IFMT) ==
+ (fattr->cf_mode & S_IFMT)) {
cifs_fattr_to_inode(inode, fattr);
goto out;
}
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index f1cefc9763ed..689f035915cf 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -32,12 +32,14 @@
static int
check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
{
+ __u64 wire_mid = le64_to_cpu(hdr->MessageId);
+
/*
* Make sure that this really is an SMB, that it is a response,
* and that the message ids match.
*/
if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) &&
- (mid == hdr->MessageId)) {
+ (mid == wire_mid)) {
if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
return 0;
else {
@@ -51,11 +53,11 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER)
cifs_dbg(VFS, "Bad protocol string signature header %x\n",
*(unsigned int *) hdr->ProtocolId);
- if (mid != hdr->MessageId)
+ if (mid != wire_mid)
cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
- mid, hdr->MessageId);
+ mid, wire_mid);
}
- cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", hdr->MessageId);
+ cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid);
return 1;
}
@@ -95,7 +97,7 @@ smb2_check_message(char *buf, unsigned int length)
{
struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
- __u64 mid = hdr->MessageId;
+ __u64 mid = le64_to_cpu(hdr->MessageId);
__u32 len = get_rfc1002_length(buf);
__u32 clc_len; /* calculated length */
int command;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 93fd0586f9ec..96b5d40a2ece 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -176,10 +176,11 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
{
struct mid_q_entry *mid;
struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+ __u64 wire_mid = le64_to_cpu(hdr->MessageId);
spin_lock(&GlobalMid_Lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
- if ((mid->mid == hdr->MessageId) &&
+ if ((mid->mid == wire_mid) &&
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
(mid->command == hdr->Command)) {
spin_unlock(&GlobalMid_Lock);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index ce858477002a..70867d54fb8b 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -110,7 +110,7 @@ struct smb2_hdr {
__le16 CreditRequest; /* CreditResponse */
__le32 Flags;
__le32 NextCommand;
- __u64 MessageId; /* opaque - so can stay little endian */
+ __le64 MessageId;
__le32 ProcessId;
__u32 TreeId; /* opaque - so do not make little endian */
__u64 SessionId; /* opaque - so do not make little endian */
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 5111e7272db6..d4c5b6f109a7 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -490,7 +490,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *smb_buffer,
return temp;
else {
memset(temp, 0, sizeof(struct mid_q_entry));
- temp->mid = smb_buffer->MessageId; /* always LE */
+ temp->mid = le64_to_cpu(smb_buffer->MessageId);
temp->pid = current->pid;
temp->command = smb_buffer->Command; /* Always LE */
temp->when_alloc = jiffies;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e5d3eadf47b1..bed43081720f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
/* fallback to generic here if not in extents fmt */
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- return __generic_block_fiemap(inode, fieinfo, start, len,
- ext4_get_block);
+ return generic_block_fiemap(inode, fieinfo, start, len,
+ ext4_get_block);
if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
return -EBADR;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 513c12cf444c..8131be8c0af3 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* we determine this extent as a data or a hole according to whether the
* page cache has data or not.
*/
-static int ext4_find_unwritten_pgoff(struct inode *inode, int whence,
- loff_t endoff, loff_t *offset)
+static int ext4_find_unwritten_pgoff(struct inode *inode,
+ int whence,
+ struct ext4_map_blocks *map,
+ loff_t *offset)
{
struct pagevec pvec;
+ unsigned int blkbits;
pgoff_t index;
pgoff_t end;
+ loff_t endoff;
loff_t startoff;
loff_t lastoff;
int found = 0;
+ blkbits = inode->i_sb->s_blocksize_bits;
startoff = *offset;
lastoff = startoff;
-
+ endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
index = startoff >> PAGE_CACHE_SHIFT;
end = endoff >> PAGE_CACHE_SHIFT;
@@ -403,144 +408,147 @@ out:
static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
{
struct inode *inode = file->f_mapping->host;
- struct fiemap_extent_info fie;
- struct fiemap_extent ext[2];
- loff_t next;
- int i, ret = 0;
+ struct ext4_map_blocks map;
+ struct extent_status es;
+ ext4_lblk_t start, last, end;
+ loff_t dataoff, isize;
+ int blkbits;
+ int ret = 0;
mutex_lock(&inode->i_mutex);
- if (offset >= inode->i_size) {
+
+ isize = i_size_read(inode);
+ if (offset >= isize) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
- fie.fi_flags = 0;
- fie.fi_extents_max = 2;
- fie.fi_extents_start = (struct fiemap_extent __user *) &ext;
- while (1) {
- mm_segment_t old_fs = get_fs();
-
- fie.fi_extents_mapped = 0;
- memset(ext, 0, sizeof(*ext) * fie.fi_extents_max);
-
- set_fs(get_ds());
- ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
- set_fs(old_fs);
- if (ret)
+
+ blkbits = inode->i_sb->s_blocksize_bits;
+ start = offset >> blkbits;
+ last = start;
+ end = isize >> blkbits;
+ dataoff = offset;
+
+ do {
+ map.m_lblk = last;
+ map.m_len = end - last + 1;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+ if (last != start)
+ dataoff = (loff_t)last << blkbits;
break;
+ }
- /* No extents found, EOF */
- if (!fie.fi_extents_mapped) {
- ret = -ENXIO;
+ /*
+ * If there is a delay extent at this offset,
+ * it will be as a data.
+ */
+ ext4_es_find_delayed_extent_range(inode, last, last, &es);
+ if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+ if (last != start)
+ dataoff = (loff_t)last << blkbits;
break;
}
- for (i = 0; i < fie.fi_extents_mapped; i++) {
- next = (loff_t)(ext[i].fe_length + ext[i].fe_logical);
- if (offset < (loff_t)ext[i].fe_logical)
- offset = (loff_t)ext[i].fe_logical;
- /*
- * If extent is not unwritten, then it contains valid
- * data, mapped or delayed.
- */
- if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN))
- goto out;
+ /*
+ * If there is a unwritten extent at this offset,
+ * it will be as a data or a hole according to page
+ * cache that has data or not.
+ */
+ if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+ int unwritten;
+ unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
+ &map, &dataoff);
+ if (unwritten)
+ break;
+ }
- /*
- * If there is a unwritten extent at this offset,
- * it will be as a data or a hole according to page
- * cache that has data or not.
- */
- if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
- next, &offset))
- goto out;
+ last++;
+ dataoff = (loff_t)last << blkbits;
+ } while (last <= end);
- if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) {
- ret = -ENXIO;
- goto out;
- }
- offset = next;
- }
- }
- if (offset > inode->i_size)
- offset = inode->i_size;
-out:
mutex_unlock(&inode->i_mutex);
- if (ret)
- return ret;
- return vfs_setpos(file, offset, maxsize);
+ if (dataoff > isize)
+ return -ENXIO;
+
+ return vfs_setpos(file, dataoff, maxsize);
}
/*
- * ext4_seek_hole() retrieves the offset for SEEK_HOLE
+ * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
*/
static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
{
struct inode *inode = file->f_mapping->host;
- struct fiemap_extent_info fie;
- struct fiemap_extent ext[2];
- loff_t next;
- int i, ret = 0;
+ struct ext4_map_blocks map;
+ struct extent_status es;
+ ext4_lblk_t start, last, end;
+ loff_t holeoff, isize;
+ int blkbits;
+ int ret = 0;
mutex_lock(&inode->i_mutex);
- if (offset >= inode->i_size) {
+
+ isize = i_size_read(inode);
+ if (offset >= isize) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
- fie.fi_flags = 0;
- fie.fi_extents_max = 2;
- fie.fi_extents_start = (struct fiemap_extent __user *)&ext;
- while (1) {
- mm_segment_t old_fs = get_fs();
-
- fie.fi_extents_mapped = 0;
- memset(ext, 0, sizeof(*ext));
+ blkbits = inode->i_sb->s_blocksize_bits;
+ start = offset >> blkbits;
+ last = start;
+ end = isize >> blkbits;
+ holeoff = offset;
- set_fs(get_ds());
- ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
- set_fs(old_fs);
- if (ret)
- break;
+ do {
+ map.m_lblk = last;
+ map.m_len = end - last + 1;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+ last += ret;
+ holeoff = (loff_t)last << blkbits;
+ continue;
+ }
- /* No extents found */
- if (!fie.fi_extents_mapped)
- break;
+ /*
+ * If there is a delay extent at this offset,
+ * we will skip this extent.
+ */
+ ext4_es_find_delayed_extent_range(inode, last, last, &es);
+ if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+ last = es.es_lblk + es.es_len;
+ holeoff = (loff_t)last << blkbits;
+ continue;
+ }
- for (i = 0; i < fie.fi_extents_mapped; i++) {
- next = (loff_t)(ext[i].fe_logical + ext[i].fe_length);
- /*
- * If extent is not unwritten, then it contains valid
- * data, mapped or delayed.
- */
- if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) {
- if (offset < (loff_t)ext[i].fe_logical)
- goto out;
- offset = next;
+ /*
+ * If there is a unwritten extent at this offset,
+ * it will be as a data or a hole according to page
+ * cache that has data or not.
+ */
+ if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+ int unwritten;
+ unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
+ &map, &holeoff);
+ if (!unwritten) {
+ last += ret;
+ holeoff = (loff_t)last << blkbits;
continue;
}
- /*
- * If there is a unwritten extent at this offset,
- * it will be as a data or a hole according to page
- * cache that has data or not.
- */
- if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
- next, &offset))
- goto out;
-
- offset = next;
- if (ext[i].fe_flags & FIEMAP_EXTENT_LAST)
- goto out;
}
- }
- if (offset > inode->i_size)
- offset = inode->i_size;
-out:
+
+ /* find a hole */
+ break;
+ } while (last <= end);
+
mutex_unlock(&inode->i_mutex);
- if (ret)
- return ret;
- return vfs_setpos(file, offset, maxsize);
+ if (holeoff > isize)
+ holeoff = isize;
+
+ return vfs_setpos(file, holeoff, maxsize);
}
/*
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index bf76f405a5f9..8a8ec6293b19 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -24,6 +24,18 @@ int ext4_resize_begin(struct super_block *sb)
return -EPERM;
/*
+ * If we are not using the primary superblock/GDT copy don't resize,
+ * because the user tools have no way of handling this. Probably a
+ * bad time to do it anyways.
+ */
+ if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
+ ext4_warning(sb, "won't resize using backup superblock at %llu",
+ (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
+ return -EPERM;
+ }
+
+ /*
* We are not allowed to do online-resizing on a filesystem mounted
* with error, because it can destroy the filesystem easily.
*/
@@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
"EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
gdb_num);
- /*
- * If we are not using the primary superblock/GDT copy don't resize,
- * because the user tools have no way of handling this. Probably a
- * bad time to do it anyways.
- */
- if (EXT4_SB(sb)->s_sbh->b_blocknr !=
- le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
- ext4_warning(sb, "won't resize using backup superblock at %llu",
- (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
- return -EPERM;
- }
-
gdb_bh = sb_bread(sb, gdblock);
if (!gdb_bh)
return -EIO;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 43c92b1685cb..74c5f53595fb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
- ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
+ ext4_warning(sb, "metadata_csum and uninit_bg are "
"redundant flags; please run fsck.");
/* Check for a known checksum algorithm */
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 99d440a4a6ba..ee85cd4e136a 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -740,14 +740,15 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+ BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
O_RDONLY | O_WRONLY | O_RDWR |
O_CREAT | O_EXCL | O_NOCTTY |
O_TRUNC | O_APPEND | /* O_NONBLOCK | */
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH | __O_TMPFILE
+ __FMODE_EXEC | O_PATH | __O_TMPFILE |
+ __FMODE_NONOTIFY
));
fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index bb63254ed848..735d7522a3a9 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -362,6 +362,9 @@ repeat:
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('E', 'R'):
+ /* Invalid length of ER tag id? */
+ if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
+ goto out;
ISOFS_SB(inode->i_sb)->s_rock = 1;
printk(KERN_DEBUG "ISO 9660 Extensions: ");
{
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3550a9c87616..c06a1ba80d73 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3897,11 +3897,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
status = nfs4_setlease(dp);
goto out;
}
- atomic_inc(&fp->fi_delegees);
if (fp->fi_had_conflict) {
status = -EAGAIN;
goto out_unlock;
}
+ atomic_inc(&fp->fi_delegees);
hash_delegation_locked(dp, fp);
status = 0;
out_unlock:
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index c991616acca9..bff8567aa42d 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
struct fsnotify_event *kevent;
char __user *start;
int ret;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
start = buf;
group = file->private_data;
pr_debug("%s: group=%p\n", __func__, group);
+ add_wait_queue(&group->notification_waitq, &wait);
while (1) {
- prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
-
mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
if (start != buf)
break;
- schedule();
+
+ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
continue;
}
@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
buf += ret;
count -= ret;
}
+ remove_wait_queue(&group->notification_waitq, &wait);
- finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 79b5af5e6a7b..cecd875653e4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2023,11 +2023,8 @@ leave:
dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
- if (ret < 0) {
+ if (ret < 0)
mlog_errno(ret);
- if (newlock)
- dlm_lock_put(newlock);
- }
return ret;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index b931e04e3388..914c121ec890 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
struct inode *inode,
const char *symname);
+static int ocfs2_double_lock(struct ocfs2_super *osb,
+ struct buffer_head **bh1,
+ struct inode *inode1,
+ struct buffer_head **bh2,
+ struct inode *inode2,
+ int rename);
+
+static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
/* An orphan dir name is an 8 byte value, printed as a hex string */
#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
@@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry,
{
handle_t *handle;
struct inode *inode = old_dentry->d_inode;
+ struct inode *old_dir = old_dentry->d_parent->d_inode;
int err;
struct buffer_head *fe_bh = NULL;
+ struct buffer_head *old_dir_bh = NULL;
struct buffer_head *parent_fe_bh = NULL;
struct ocfs2_dinode *fe = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
@@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry,
dquot_initialize(dir);
- err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
+ err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
+ &parent_fe_bh, dir, 0);
if (err < 0) {
if (err != -ENOENT)
mlog_errno(err);
return err;
}
+ /* make sure both dirs have bhs
+ * get an extra ref on old_dir_bh if old==new */
+ if (!parent_fe_bh) {
+ if (old_dir_bh) {
+ parent_fe_bh = old_dir_bh;
+ get_bh(parent_fe_bh);
+ } else {
+ mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str);
+ err = -EIO;
+ goto out;
+ }
+ }
+
if (!dir->i_nlink) {
err = -ENOENT;
goto out;
}
- err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
+ err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
old_dentry->d_name.len, &old_de_ino);
if (err) {
err = -ENOENT;
@@ -801,10 +825,11 @@ out_unlock_inode:
ocfs2_inode_unlock(inode, 1);
out:
- ocfs2_inode_unlock(dir, 1);
+ ocfs2_double_unlock(old_dir, dir);
brelse(fe_bh);
brelse(parent_fe_bh);
+ brelse(old_dir_bh);
ocfs2_free_dir_lookup_result(&lookup);
@@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
}
/*
- * The only place this should be used is rename!
+ * The only place this should be used is rename and link!
* if they have the same id, then the 1st one is the only one locked.
*/
static int ocfs2_double_lock(struct ocfs2_super *osb,
struct buffer_head **bh1,
struct inode *inode1,
struct buffer_head **bh2,
- struct inode *inode2)
+ struct inode *inode2,
+ int rename)
{
int status;
int inode1_is_ancestor, inode2_is_ancestor;
@@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
}
/* lock id2 */
status = ocfs2_inode_lock_nested(inode2, bh2, 1,
- OI_LS_RENAME1);
+ rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
@@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
}
/* lock id1 */
- status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2);
+ status = ocfs2_inode_lock_nested(inode1, bh1, 1,
+ rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT);
if (status < 0) {
/*
* An error return must mean that no cluster locks
@@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir,
/* if old and new are the same, this'll just do one lock. */
status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
- &new_dir_bh, new_dir);
+ &new_dir_bh, new_dir, 1);
if (status < 0) {
mlog_errno(status);
goto bail;
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index a012c51caffd..05e90edd1992 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -57,6 +57,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
sector_t offset;
int i, num, ret = 0;
struct extent_position epos = { NULL, 0, {0, 0} };
+ struct super_block *sb = dir->i_sb;
if (ctx->pos == 0) {
if (!dir_emit_dot(file, ctx))
@@ -76,16 +77,16 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
if (nf_pos == 0)
nf_pos = udf_ext0_offset(dir);
- fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
+ fibh.soffset = fibh.eoffset = nf_pos & (sb->s_blocksize - 1);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
- if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
+ if (inode_bmap(dir, nf_pos >> sb->s_blocksize_bits,
&epos, &eloc, &elen, &offset)
!= (EXT_RECORDED_ALLOCATED >> 30)) {
ret = -ENOENT;
goto out;
}
- block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
- if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
+ block = udf_get_lb_pblock(sb, &eloc, offset);
+ if ((++offset << sb->s_blocksize_bits) < elen) {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (iinfo->i_alloc_type ==
@@ -95,18 +96,18 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
offset = 0;
}
- if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
+ if (!(fibh.sbh = fibh.ebh = udf_tread(sb, block))) {
ret = -EIO;
goto out;
}
- if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
- i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
- if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
- i = (elen >> dir->i_sb->s_blocksize_bits) - offset;
+ if (!(offset & ((16 >> (sb->s_blocksize_bits - 9)) - 1))) {
+ i = 16 >> (sb->s_blocksize_bits - 9);
+ if (i + offset > (elen >> sb->s_blocksize_bits))
+ i = (elen >> sb->s_blocksize_bits) - offset;
for (num = 0; i > 0; i--) {
- block = udf_get_lb_pblock(dir->i_sb, &eloc, offset + i);
- tmp = udf_tgetblk(dir->i_sb, block);
+ block = udf_get_lb_pblock(sb, &eloc, offset + i);
+ tmp = udf_tgetblk(sb, block);
if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
bha[num++] = tmp;
else
@@ -152,12 +153,12 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
}
if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
continue;
}
if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
continue;
}
@@ -167,12 +168,12 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
continue;
}
- flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+ flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
if (!flen)
continue;
tloc = lelb_to_cpu(cfi.icb.extLocation);
- iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
+ iblock = udf_get_lb_pblock(sb, &tloc, 0);
if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN))
goto out;
} /* end while */
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index c9b4df5810d5..5bc71d9a674a 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1489,6 +1489,20 @@ reread:
}
inode->i_generation = iinfo->i_unique;
+ /* Sanity checks for files in ICB so that we don't get confused later */
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ /*
+ * For file in ICB data is stored in allocation descriptor
+ * so sizes should match
+ */
+ if (iinfo->i_lenAlloc != inode->i_size)
+ goto out;
+ /* File in ICB has to fit in there... */
+ if (inode->i_size > inode->i_sb->s_blocksize -
+ udf_file_entry_alloc_offset(inode))
+ goto out;
+ }
+
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
inode->i_op = &udf_dir_inode_operations;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index c12e260fd6c4..33b246b82c98 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -159,18 +159,19 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
struct udf_inode_info *dinfo = UDF_I(dir);
int isdotdot = child->len == 2 &&
child->name[0] == '.' && child->name[1] == '.';
+ struct super_block *sb = dir->i_sb;
size = udf_ext0_offset(dir) + dir->i_size;
f_pos = udf_ext0_offset(dir);
fibh->sbh = fibh->ebh = NULL;
- fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
+ fibh->soffset = fibh->eoffset = f_pos & (sb->s_blocksize - 1);
if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
- if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
+ if (inode_bmap(dir, f_pos >> sb->s_blocksize_bits, &epos,
&eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
goto out_err;
- block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
- if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
+ block = udf_get_lb_pblock(sb, &eloc, offset);
+ if ((++offset << sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
@@ -178,7 +179,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
} else
offset = 0;
- fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
+ fibh->sbh = fibh->ebh = udf_tread(sb, block);
if (!fibh->sbh)
goto out_err;
}
@@ -217,12 +218,12 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
}
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
continue;
}
if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
continue;
}
@@ -233,7 +234,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
if (!lfi)
continue;
- flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+ flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
if (flen && udf_match(flen, fname, child->len, child->name))
goto out_ok;
}
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 6fb7945c1e6e..ac10ca939f26 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -30,49 +30,73 @@
#include <linux/buffer_head.h>
#include "udf_i.h"
-static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
- int fromlen, unsigned char *to)
+static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
+ int fromlen, unsigned char *to, int tolen)
{
struct pathComponent *pc;
int elen = 0;
+ int comp_len;
unsigned char *p = to;
+ /* Reserve one byte for terminating \0 */
+ tolen--;
while (elen < fromlen) {
pc = (struct pathComponent *)(from + elen);
+ elen += sizeof(struct pathComponent);
switch (pc->componentType) {
case 1:
/*
* Symlink points to some place which should be agreed
* upon between originator and receiver of the media. Ignore.
*/
- if (pc->lengthComponentIdent > 0)
+ if (pc->lengthComponentIdent > 0) {
+ elen += pc->lengthComponentIdent;
break;
+ }
/* Fall through */
case 2:
+ if (tolen == 0)
+ return -ENAMETOOLONG;
p = to;
*p++ = '/';
+ tolen--;
break;
case 3:
+ if (tolen < 3)
+ return -ENAMETOOLONG;
memcpy(p, "../", 3);
p += 3;
+ tolen -= 3;
break;
case 4:
+ if (tolen < 2)
+ return -ENAMETOOLONG;
memcpy(p, "./", 2);
p += 2;
+ tolen -= 2;
/* that would be . - just ignore */
break;
case 5:
- p += udf_get_filename(sb, pc->componentIdent, p,
- pc->lengthComponentIdent);
+ elen += pc->lengthComponentIdent;
+ if (elen > fromlen)
+ return -EIO;
+ comp_len = udf_get_filename(sb, pc->componentIdent,
+ pc->lengthComponentIdent,
+ p, tolen);
+ p += comp_len;
+ tolen -= comp_len;
+ if (tolen == 0)
+ return -ENAMETOOLONG;
*p++ = '/';
+ tolen--;
break;
}
- elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
}
if (p > to + 1)
p[-1] = '\0';
else
p[0] = '\0';
+ return 0;
}
static int udf_symlink_filler(struct file *file, struct page *page)
@@ -80,11 +104,17 @@ static int udf_symlink_filler(struct file *file, struct page *page)
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
- int err = -EIO;
+ int err;
unsigned char *p = kmap(page);
struct udf_inode_info *iinfo;
uint32_t pos;
+ /* We don't support symlinks longer than one block */
+ if (inode->i_size > inode->i_sb->s_blocksize) {
+ err = -ENAMETOOLONG;
+ goto out_unmap;
+ }
+
iinfo = UDF_I(inode);
pos = udf_block_map(inode, 0);
@@ -94,14 +124,18 @@ static int udf_symlink_filler(struct file *file, struct page *page)
} else {
bh = sb_bread(inode->i_sb, pos);
- if (!bh)
- goto out;
+ if (!bh) {
+ err = -EIO;
+ goto out_unlock_inode;
+ }
symlink = bh->b_data;
}
- udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
+ err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
brelse(bh);
+ if (err)
+ goto out_unlock_inode;
up_read(&iinfo->i_data_sem);
SetPageUptodate(page);
@@ -109,9 +143,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
unlock_page(page);
return 0;
-out:
+out_unlock_inode:
up_read(&iinfo->i_data_sem);
SetPageError(page);
+out_unmap:
kunmap(page);
unlock_page(page);
return err;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 1cc3c993ebd0..47bb3f5ca360 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -211,7 +211,8 @@ udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
}
/* unicode.c */
-extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
+extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
+ int);
extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
int);
extern int udf_build_ustr(struct ustr *, dstring *, int);
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index afd470e588ff..b84fee372734 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -28,7 +28,8 @@
#include "udf_sb.h"
-static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
+static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
+ int);
static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
{
@@ -333,8 +334,8 @@ try_again:
return u_len + 1;
}
-int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
- int flen)
+int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
+ uint8_t *dname, int dlen)
{
struct ustr *filename, *unifilename;
int len = 0;
@@ -347,7 +348,7 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
if (!unifilename)
goto out1;
- if (udf_build_ustr_exact(unifilename, sname, flen))
+ if (udf_build_ustr_exact(unifilename, sname, slen))
goto out2;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
@@ -366,7 +367,8 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
} else
goto out2;
- len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
+ len = udf_translate_to_linux(dname, dlen,
+ filename->u_name, filename->u_len,
unifilename->u_name, unifilename->u_len);
out2:
kfree(unifilename);
@@ -403,10 +405,12 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname,
#define EXT_MARK '.'
#define CRC_MARK '#'
#define EXT_SIZE 5
+/* Number of chars we need to store generated CRC to make filename unique */
+#define CRC_LEN 5
-static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
- int udfLen, uint8_t *fidName,
- int fidNameLen)
+static int udf_translate_to_linux(uint8_t *newName, int newLen,
+ uint8_t *udfName, int udfLen,
+ uint8_t *fidName, int fidNameLen)
{
int index, newIndex = 0, needsCRC = 0;
int extIndex = 0, newExtIndex = 0, hasExt = 0;
@@ -439,7 +443,7 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
newExtIndex = newIndex;
}
}
- if (newIndex < 256)
+ if (newIndex < newLen)
newName[newIndex++] = curr;
else
needsCRC = 1;
@@ -467,13 +471,13 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
}
ext[localExtIndex++] = curr;
}
- maxFilenameLen = 250 - localExtIndex;
+ maxFilenameLen = newLen - CRC_LEN - localExtIndex;
if (newIndex > maxFilenameLen)
newIndex = maxFilenameLen;
else
newIndex = newExtIndex;
- } else if (newIndex > 250)
- newIndex = 250;
+ } else if (newIndex > newLen - CRC_LEN)
+ newIndex = newLen - CRC_LEN;
newName[newIndex++] = CRC_MARK;
valueCRC = crc_itu_t(0, fidName, fidNameLen);
newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 3ca9b751f122..b95dc32a6e6b 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -196,8 +196,8 @@ struct acpi_processor_flags {
struct acpi_processor {
acpi_handle handle;
u32 acpi_id;
- u32 apic_id;
- u32 id;
+ u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */
+ u32 id; /* CPU logical ID allocated by OS */
u32 pblk;
int performance_platform_limit;
int throttling_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
#endif /* CONFIG_CPU_FREQ */
/* in processor_core.c */
-int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
-int acpi_map_cpuid(int apic_id, u32 acpi_id);
+int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(int phys_id, u32 acpi_id);
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
/* in processor_pdc.c */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 8ba35c622e22..e1b2e8b98af7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -901,11 +901,15 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime);
extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e);
+extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 780511a459c0..1e6ae1458f7a 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -119,13 +119,6 @@ struct drm_gem_object {
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
-
- /**
- * dumb - created as dumb buffer
- * Whether the gem object was created using the dumb buffer interface
- * as such it may not be used for GPU rendering.
- */
- bool dumb;
};
void drm_gem_object_release(struct drm_gem_object *obj);
diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h
index 59822a995858..b5e6b0069ac7 100644
--- a/include/dt-bindings/thermal/thermal.h
+++ b/include/dt-bindings/thermal/thermal.h
@@ -11,7 +11,7 @@
#define _DT_BINDINGS_THERMAL_THERMAL_H
/* On cooling devices upper and lower limits */
-#define THERMAL_NO_LIMIT (-1UL)
+#define THERMAL_NO_LIMIT (~0)
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 856d381b1d5b..d459cd17b477 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
-int acpi_unmap_lsapic(int cpu);
+int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
+int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 0c04917c2f12..af84234e1f6e 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -47,6 +47,7 @@ struct sk_buff;
struct audit_krule {
int vers_ops;
+ u32 pflags;
u32 flags;
u32 listnr;
u32 action;
@@ -64,6 +65,9 @@ struct audit_krule {
u64 prio;
};
+/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
+#define AUDIT_LOGINUID_LEGACY 0x1
+
struct audit_field {
u32 type;
union {
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 5d86416d35f2..61b19c46bdb3 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -87,8 +87,8 @@ struct ceph_osd_req_op {
struct ceph_osd_data osd_data;
} extent;
struct {
- __le32 name_len;
- __le32 value_len;
+ u32 name_len;
+ u32 value_len;
__u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
__u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
struct ceph_osd_data osd_data;
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c303d383def1..bd955270d5aa 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -50,7 +50,7 @@ static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
#endif
@@ -65,13 +65,13 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
static inline struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index a07e087f54b2..ab70f3bc44ad 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -53,7 +53,6 @@ struct cpuidle_state {
};
/* Idle State Flags */
-#define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
@@ -89,8 +88,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
/**
* cpuidle_get_last_residency - retrieves the last state's residency time
* @dev: the target CPU
- *
- * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set
*/
static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f90c0282c114..42efe13077b6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
/* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
+#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
/*
* Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 290db1269c4c..75ae2e2631fc 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,11 +13,54 @@
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
+/* Shifted versions of the command enable bits are be used if the command
+ * has no arguments (see kdb_check_flags). This allows commands, such as
+ * go, to have different permissions depending upon whether it is called
+ * with an argument.
+ */
+#define KDB_ENABLE_NO_ARGS_SHIFT 10
+
typedef enum {
- KDB_REPEAT_NONE = 0, /* Do not repeat this command */
- KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */
- KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */
-} kdb_repeat_t;
+ KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
+ KDB_ENABLE_MEM_READ = (1 << 1),
+ KDB_ENABLE_MEM_WRITE = (1 << 2),
+ KDB_ENABLE_REG_READ = (1 << 3),
+ KDB_ENABLE_REG_WRITE = (1 << 4),
+ KDB_ENABLE_INSPECT = (1 << 5),
+ KDB_ENABLE_FLOW_CTRL = (1 << 6),
+ KDB_ENABLE_SIGNAL = (1 << 7),
+ KDB_ENABLE_REBOOT = (1 << 8),
+ /* User exposed values stop here, all remaining flags are
+ * exclusively used to describe a commands behaviour.
+ */
+
+ KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
+ KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
+
+ KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
+
+ KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
+ KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
+} kdb_cmdflags_t;
typedef int (*kdb_func_t)(int, const char **);
@@ -62,6 +105,7 @@ extern atomic_t kdb_event;
#define KDB_BADLENGTH (-19)
#define KDB_NOBP (-20)
#define KDB_BADADDR (-21)
+#define KDB_NOPERM (-22)
/*
* kdb_diemsg
@@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
/* Dynamic kdb shell command registration */
extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
- short, kdb_repeat_t);
+extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
+ short, kdb_cmdflags_t);
extern int kdb_unregister(char *);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
-static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen,
- kdb_repeat_t repeat) { return 0; }
+static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
+ char *help, short minlen,
+ kdb_cmdflags_t flags) { return 0; }
static inline int kdb_unregister(char *cmd) { return 0; }
#endif /* CONFIG_KGDB_KDB */
enum {
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index 575a86c7fcbd..f742b6717d52 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -50,6 +50,8 @@ enum {
STMPE_IDX_GPEDR_MSB,
STMPE_IDX_GPRER_LSB,
STMPE_IDX_GPFER_LSB,
+ STMPE_IDX_GPPUR_LSB,
+ STMPE_IDX_GPPDR_LSB,
STMPE_IDX_GPAFR_U_MSB,
STMPE_IDX_IEGPIOR_LSB,
STMPE_IDX_ISGPIOR_LSB,
@@ -113,24 +115,6 @@ extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins,
extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks);
extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
-struct matrix_keymap_data;
-
-/**
- * struct stmpe_keypad_platform_data - STMPE keypad platform data
- * @keymap_data: key map table and size
- * @debounce_ms: debounce interval, in ms. Maximum is
- * %STMPE_KEYPAD_MAX_DEBOUNCE.
- * @scan_count: number of key scanning cycles to confirm key data.
- * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
- * @no_autorepeat: disable key autorepeat
- */
-struct stmpe_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
- unsigned int debounce_ms;
- unsigned int scan_count;
- bool no_autorepeat;
-};
-
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
/**
@@ -199,7 +183,6 @@ struct stmpe_ts_platform_data {
* @irq_gpio: gpio number over which irq will be requested (significant only if
* irq_over_gpio is true)
* @gpio: GPIO-specific platform data
- * @keypad: keypad-specific platform data
* @ts: touchscreen-specific platform data
*/
struct stmpe_platform_data {
@@ -212,7 +195,6 @@ struct stmpe_platform_data {
int autosleep_timeout;
struct stmpe_gpio_platform_data *gpio;
- struct stmpe_keypad_platform_data *keypad;
struct stmpe_ts_platform_data *ts;
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f80d0194c9bc..80fc92a49649 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1952,7 +1952,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
#if VM_GROWSUP
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#else
- #define expand_upwards(vma, address) do { } while (0)
+ #define expand_upwards(vma, address) (0)
#endif
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c31f74d76ebd..679e6e90aa4c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1012,12 +1012,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
- * bool (*ndo_gso_check) (struct sk_buff *skb,
- * struct net_device *dev);
+ * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
* Called by core transmit path to determine if device is capable of
- * performing GSO on a packet. The device returns true if it is
- * able to GSO the packet, false otherwise. If the return value is
- * false the stack will do software GSO.
+ * performing offload operations on a given packet. This is to give
+ * the device an opportunity to implement any restrictions that cannot
+ * be otherwise expressed by feature flags. The check is called with
+ * the set of features that the stack has calculated and it returns
+ * those the driver believes to be appropriate.
*
* int (*ndo_switch_parent_id_get)(struct net_device *dev,
* struct netdev_phys_item_id *psid);
@@ -1178,8 +1181,9 @@ struct net_device_ops {
struct net_device *dev,
void *priv);
int (*ndo_get_lock_subclass)(struct net_device *dev);
- bool (*ndo_gso_check) (struct sk_buff *skb,
- struct net_device *dev);
+ netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
#ifdef CONFIG_NET_SWITCHDEV
int (*ndo_switch_parent_id_get)(struct net_device *dev,
struct netdev_phys_item_id *psid);
@@ -3611,8 +3615,6 @@ static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
- (dev->netdev_ops->ndo_gso_check &&
- !dev->netdev_ops->ndo_gso_check(skb, dev)) ||
unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
(skb->ip_summed != CHECKSUM_UNNECESSARY)));
}
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9e572daa15d5..02fc86d2348e 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -46,8 +46,8 @@ struct netlink_kernel_cfg {
unsigned int flags;
void (*input)(struct sk_buff *skb);
struct mutex *cb_mutex;
- int (*bind)(int group);
- void (*unbind)(int group);
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
bool (*compare)(struct net *net, struct sock *sk);
};
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7ea069cd3257..4b3736f7065c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -251,7 +251,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
#define FGP_NOWAIT 0x00000020
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
+ int fgp_flags, gfp_t cache_gfp_mask);
/**
* find_get_page - find and get a page reference
@@ -266,13 +266,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
static inline struct page *find_get_page(struct address_space *mapping,
pgoff_t offset)
{
- return pagecache_get_page(mapping, offset, 0, 0, 0);
+ return pagecache_get_page(mapping, offset, 0, 0);
}
static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, int fgp_flags)
{
- return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
+ return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
/**
@@ -292,7 +292,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
static inline struct page *find_lock_page(struct address_space *mapping,
pgoff_t offset)
{
- return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
+ return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
}
/**
@@ -319,7 +319,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
{
return pagecache_get_page(mapping, offset,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
- gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
+ gfp_mask);
}
/**
@@ -340,8 +340,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
{
return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
- mapping_gfp_mask(mapping),
- GFP_NOFS);
+ mapping_gfp_mask(mapping));
}
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 486e84ccb1f9..4f7a61ca4b39 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -79,11 +79,6 @@ struct perf_branch_stack {
struct perf_branch_entry entries[0];
};
-struct perf_regs {
- __u64 abi;
- struct pt_regs *regs;
-};
-
struct task_struct;
/*
@@ -610,7 +605,14 @@ struct perf_sample_data {
u32 reserved;
} cpu_entry;
struct perf_callchain_entry *callchain;
+
+ /*
+ * regs_user may point to task_pt_regs or to regs_user_copy, depending
+ * on arch details.
+ */
struct perf_regs regs_user;
+ struct pt_regs regs_user_copy;
+
struct perf_regs regs_intr;
u64 stack_user_size;
} ____cacheline_aligned;
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 3c73d5fe18be..a5f98d53d732 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -1,11 +1,19 @@
#ifndef _LINUX_PERF_REGS_H
#define _LINUX_PERF_REGS_H
+struct perf_regs {
+ __u64 abi;
+ struct pt_regs *regs;
+};
+
#ifdef CONFIG_HAVE_PERF_REGS
#include <asm/perf_regs.h>
u64 perf_reg_value(struct pt_regs *regs, int idx);
int perf_reg_validate(u64 mask);
u64 perf_reg_abi(struct task_struct *task);
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy);
#else
static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
{
@@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_NONE;
}
+
+static inline void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
#endif /* CONFIG_HAVE_PERF_REGS */
#endif /* _LINUX_PERF_REGS_H */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 6cd20d5e651b..a9edab2c787a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -271,6 +271,8 @@ typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
void *data);
void of_genpd_del_provider(struct device_node *np);
+struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec);
struct generic_pm_domain *__of_genpd_xlate_simple(
struct of_phandle_args *genpdspec,
@@ -288,6 +290,12 @@ static inline int __of_genpd_add_provider(struct device_node *np,
}
static inline void of_genpd_del_provider(struct device_node *np) {}
+static inline struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec)
+{
+ return NULL;
+}
+
#define __of_genpd_xlate_simple NULL
#define __of_genpd_xlate_onecell NULL
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c0c2bce6b0b7..d9d7e7e56352 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -37,6 +37,16 @@ struct anon_vma {
atomic_t refcount;
/*
+ * Count of child anon_vmas and VMAs which points to this anon_vma.
+ *
+ * This counter is used for making decision about reusing anon_vma
+ * instead of forking new one. See comments in function anon_vma_clone.
+ */
+ unsigned degree;
+
+ struct anon_vma *parent; /* Parent of this anon_vma */
+
+ /*
* NOTE: the LSB of the rb_root.rb_node is set by
* mm_take_all_locks() _after_ taking the above lock. So the
* rb_root must only be read/written after taking the above lock
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index c611a02fbc51..fc52e307efab 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -38,7 +38,7 @@
#define THERMAL_CSTATE_INVALID -1UL
/* No upper/lower limit requirement */
-#define THERMAL_NO_LIMIT THERMAL_CSTATE_INVALID
+#define THERMAL_NO_LIMIT ((u32)~0)
/* Unit conversion macros */
#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a219be961c0a..00048339c23e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
-void set_page_dirty_balance(struct page *page);
void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index af10c2cf8a1d..84125088c309 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -31,6 +31,9 @@ struct genl_info;
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
+ * @mcast_bind: a socket bound to the given multicast group (which
+ * is given as the offset into the groups array)
+ * @mcast_unbind: a socket was unbound from the given multicast group
* @attrbuf: buffer to store parsed attributes
* @family_list: family list
* @mcgrps: multicast groups used by this family (private)
@@ -53,6 +56,8 @@ struct genl_family {
void (*post_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
+ int (*mcast_bind)(struct net *net, int group);
+ void (*mcast_unbind)(struct net *net, int group);
struct nlattr ** attrbuf; /* private */
const struct genl_ops * ops; /* private */
const struct genl_multicast_group *mcgrps; /* private */
@@ -395,11 +400,11 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
}
static inline int genl_has_listeners(struct genl_family *family,
- struct sock *sk, unsigned int group)
+ struct net *net, unsigned int group)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
- return netlink_has_listeners(sk, group);
+ return netlink_has_listeners(net->genl_sock, group);
}
#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 58d719ddaa60..29c7be8808d5 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*
* @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
* driver to indicate that it requires IV generation for this
- * particular key. Setting this flag does not necessarily mean that SKBs
- * will have sufficient tailroom for ICV or MIC.
+ * particular key.
* @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
* the driver for a TKIP key if it requires Michael MIC
* generation in software.
@@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
* if space should be prepared for the IV, but the IV
* itself should not be generated. Do not set together with
- * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
- * not necessarily mean that SKBs will have sufficient tailroom for ICV or
- * MIC.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
* @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
* management frames. The flag can help drivers that have a hardware
* crypto implementation that doesn't deal with management frames
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index eb070b3674a1..76f708486aae 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -190,7 +190,6 @@ struct neigh_hash_table {
struct neigh_table {
- struct neigh_table *next;
int family;
int entry_size;
int key_len;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 57cccd0052e5..903461aa5644 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -1,6 +1,9 @@
#ifndef __NET_VXLAN_H
#define __NET_VXLAN_H 1
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/udp.h>
@@ -51,16 +54,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
-static inline bool vxlan_gso_check(struct sk_buff *skb)
+static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
+ netdev_features_t features)
{
- if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) &&
+ u8 l4_hdr = 0;
+
+ if (!skb->encapsulation)
+ return features;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;;
+ }
+
+ if ((l4_hdr == IPPROTO_UDP) &&
(skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB) ||
(skb_inner_mac_header(skb) - skb_transport_header(skb) !=
sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
- return false;
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
- return true;
+ return features;
}
/* IP header + UDP + VXLAN + Ethernet header */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 1e7f74acc2ec..b429b73e875e 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -857,7 +857,7 @@ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
}
/**
- * params_channels - Get the sample rate from the hw params
+ * params_rate - Get the sample rate from the hw params
* @p: hw params
*/
static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
@@ -866,7 +866,7 @@ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
}
/**
- * params_channels - Get the period size (in frames) from the hw params
+ * params_period_size - Get the period size (in frames) from the hw params
* @p: hw params
*/
static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
@@ -875,7 +875,7 @@ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
}
/**
- * params_channels - Get the number of periods from the hw params
+ * params_periods - Get the number of periods from the hw params
* @p: hw params
*/
static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
@@ -884,7 +884,7 @@ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
}
/**
- * params_channels - Get the buffer size (in frames) from the hw params
+ * params_buffer_size - Get the buffer size (in frames) from the hw params
* @p: hw params
*/
static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
@@ -893,7 +893,7 @@ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
}
/**
- * params_channels - Get the buffer size (in bytes) from the hw params
+ * params_buffer_bytes - Get the buffer size (in bytes) from the hw params
* @p: hw params
*/
static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index 7543b3e51331..e063effe0cc1 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -5,7 +5,7 @@
/*
* FMODE_EXEC is 0x20
- * FMODE_NONOTIFY is 0x1000000
+ * FMODE_NONOTIFY is 0x4000000
* These cannot be used by userspace O_* until internal and external open
* flags are split.
* -Eric Paris
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 74a2a1773494..79b12b004ade 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -149,7 +149,7 @@ struct in6_flowlabel_req {
/*
* IPV6 socket options
*/
-
+#if __UAPI_DEF_IPV6_OPTIONS
#define IPV6_ADDRFORM 1
#define IPV6_2292PKTINFO 2
#define IPV6_2292HOPOPTS 3
@@ -196,6 +196,7 @@ struct in6_flowlabel_req {
#define IPV6_IPSEC_POLICY 34
#define IPV6_XFRM_POLICY 35
+#endif
/*
* Multicast:
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 7acef41fc209..af94f31e33ac 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -128,27 +128,34 @@ struct kfd_ioctl_get_process_apertures_args {
uint32_t pad;
};
-#define KFD_IOC_MAGIC 'K'
+#define AMDKFD_IOCTL_BASE 'K'
+#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
+#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
-#define KFD_IOC_GET_VERSION \
- _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
+#define AMDKFD_IOC_GET_VERSION \
+ AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
-#define KFD_IOC_CREATE_QUEUE \
- _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
+#define AMDKFD_IOC_CREATE_QUEUE \
+ AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
-#define KFD_IOC_DESTROY_QUEUE \
- _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
+#define AMDKFD_IOC_DESTROY_QUEUE \
+ AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
-#define KFD_IOC_SET_MEMORY_POLICY \
- _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
+#define AMDKFD_IOC_SET_MEMORY_POLICY \
+ AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
-#define KFD_IOC_GET_CLOCK_COUNTERS \
- _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
+#define AMDKFD_IOC_GET_CLOCK_COUNTERS \
+ AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
-#define KFD_IOC_GET_PROCESS_APERTURES \
- _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
+#define AMDKFD_IOC_GET_PROCESS_APERTURES \
+ AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
-#define KFD_IOC_UPDATE_QUEUE \
- _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
+#define AMDKFD_IOC_UPDATE_QUEUE \
+ AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
+
+#define AMDKFD_COMMAND_START 0x01
+#define AMDKFD_COMMAND_END 0x08
#endif
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index c140620dad92..e28807ad17fa 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -69,6 +69,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 0
#define __UAPI_DEF_IPV6_MREQ 0
#define __UAPI_DEF_IPPROTO_V6 0
+#define __UAPI_DEF_IPV6_OPTIONS 0
#else
@@ -82,6 +83,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 1
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
+#define __UAPI_DEF_IPV6_OPTIONS 1
#endif /* _NETINET_IN_H */
@@ -103,6 +105,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 1
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
+#define __UAPI_DEF_IPV6_OPTIONS 1
/* Definitions for xattr.h */
#define __UAPI_DEF_XATTR 1
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 61c818a7fe70..a3318f31e8e7 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -101,6 +101,13 @@ struct vring {
struct vring_used *used;
};
+/* Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
/* The standard layout for the ring is a continuous chunk of memory which looks
* like this. We assume num is a power of 2.
*
diff --git a/kernel/audit.c b/kernel/audit.c
index f8f203e8018c..72ab759a0b43 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -429,7 +429,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
* This function doesn't consume an skb as might be expected since it has to
* copy it anyways.
*/
-static void kauditd_send_multicast_skb(struct sk_buff *skb)
+static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *copy;
struct audit_net *aunet = net_generic(&init_net, audit_net_id);
@@ -448,11 +448,11 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
* no reason for new multicast clients to continue with this
* non-compliance.
*/
- copy = skb_copy(skb, GFP_KERNEL);
+ copy = skb_copy(skb, gfp_mask);
if (!copy)
return;
- nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
+ nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask);
}
/*
@@ -1100,7 +1100,7 @@ static void audit_receive(struct sk_buff *skb)
}
/* Run custom bind function on netlink socket group connect or bind requests. */
-static int audit_bind(int group)
+static int audit_bind(struct net *net, int group)
{
if (!capable(CAP_AUDIT_READ))
return -EPERM;
@@ -1940,7 +1940,7 @@ void audit_log_end(struct audit_buffer *ab)
struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
nlh->nlmsg_len = ab->skb->len;
- kauditd_send_multicast_skb(ab->skb);
+ kauditd_send_multicast_skb(ab->skb, ab->gfp_mask);
/*
* The original kaudit unicast socket sends up messages with
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 3598e13f2a65..4f68a326d92e 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -442,19 +442,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
f->type = AUDIT_LOGINUID_SET;
f->val = 0;
- }
-
- if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
- struct pid *pid;
- rcu_read_lock();
- pid = find_vpid(f->val);
- if (!pid) {
- rcu_read_unlock();
- err = -ESRCH;
- goto exit_free;
- }
- f->val = pid_nr(pid);
- rcu_read_unlock();
+ entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
}
err = audit_field_valid(entry, f);
@@ -630,6 +618,13 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
data->buflen += data->values[i] =
audit_pack_string(&bufp, krule->filterkey);
break;
+ case AUDIT_LOGINUID_SET:
+ if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
+ data->fields[i] = AUDIT_LOGINUID;
+ data->values[i] = AUDIT_UID_UNSET;
+ break;
+ }
+ /* fallthrough if set */
default:
data->values[i] = f->val;
}
@@ -646,6 +641,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
int i;
if (a->flags != b->flags ||
+ a->pflags != b->pflags ||
a->listnr != b->listnr ||
a->action != b->action ||
a->field_count != b->field_count)
@@ -764,6 +760,7 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old)
new = &entry->rule;
new->vers_ops = old->vers_ops;
new->flags = old->flags;
+ new->pflags = old->pflags;
new->listnr = old->listnr;
new->action = old->action;
for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index c75522a83678..072566dd0caf 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -72,6 +72,8 @@
#include <linux/fs_struct.h>
#include <linux/compat.h>
#include <linux/ctype.h>
+#include <linux/string.h>
+#include <uapi/linux/limits.h>
#include "audit.h"
@@ -1861,8 +1863,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
}
list_for_each_entry_reverse(n, &context->names_list, list) {
- /* does the name pointer match? */
- if (!n->name || n->name->name != name->name)
+ if (!n->name || strcmp(n->name->name, name->name))
continue;
/* match the correct record type */
@@ -1877,12 +1878,48 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
}
out_alloc:
- /* unable to find the name from a previous getname(). Allocate a new
- * anonymous entry.
- */
- n = audit_alloc_name(context, AUDIT_TYPE_NORMAL);
+ /* unable to find an entry with both a matching name and type */
+ n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
if (!n)
return;
+ /* unfortunately, while we may have a path name to record with the
+ * inode, we can't always rely on the string lasting until the end of
+ * the syscall so we need to create our own copy, it may fail due to
+ * memory allocation issues, but we do our best */
+ if (name) {
+ /* we can't use getname_kernel() due to size limits */
+ size_t len = strlen(name->name) + 1;
+ struct filename *new = __getname();
+
+ if (unlikely(!new))
+ goto out;
+
+ if (len <= (PATH_MAX - sizeof(*new))) {
+ new->name = (char *)(new) + sizeof(*new);
+ new->separate = false;
+ } else if (len <= PATH_MAX) {
+ /* this looks odd, but is due to final_putname() */
+ struct filename *new2;
+
+ new2 = kmalloc(sizeof(*new2), GFP_KERNEL);
+ if (unlikely(!new2)) {
+ __putname(new);
+ goto out;
+ }
+ new2->name = (char *)new;
+ new2->separate = true;
+ new = new2;
+ } else {
+ /* we should never get here, but let's be safe */
+ __putname(new);
+ goto out;
+ }
+ strlcpy((char *)new->name, name->name, len);
+ new->uptr = NULL;
+ new->aname = n;
+ n->name = new;
+ n->name_put = true;
+ }
out:
if (parent) {
n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 1adf62b39b96..07ce18ca71e0 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -27,6 +27,9 @@
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
+
+#define pr_fmt(fmt) "KGDB: " fmt
+
#include <linux/pid_namespace.h>
#include <linux/clocksource.h>
#include <linux/serial_core.h>
@@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr)
return err;
err = kgdb_arch_remove_breakpoint(&tmp);
if (err)
- printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
- "memory destroyed at: %lx", addr);
+ pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
+ addr);
return err;
}
@@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void)
error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
if (error) {
ret = error;
- printk(KERN_INFO "KGDB: BP install failed: %lx",
- kgdb_break[i].bpt_addr);
+ pr_info("BP install failed: %lx\n",
+ kgdb_break[i].bpt_addr);
continue;
}
@@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void)
continue;
error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
if (error) {
- printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
- kgdb_break[i].bpt_addr);
+ pr_info("BP remove failed: %lx\n",
+ kgdb_break[i].bpt_addr);
ret = error;
}
@@ -367,7 +370,7 @@ int dbg_remove_all_break(void)
goto setundefined;
error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
if (error)
- printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+ pr_err("breakpoint remove failed: %lx\n",
kgdb_break[i].bpt_addr);
setundefined:
kgdb_break[i].state = BP_UNDEFINED;
@@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait)
if (print_wait) {
#ifdef CONFIG_KGDB_KDB
if (!dbg_kdb_mode)
- printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
+ pr_crit("waiting... or $3#33 for KDB\n");
#else
- printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+ pr_crit("Waiting for remote debugger\n");
#endif
}
return 1;
@@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
exception_level = 0;
kgdb_skipexception(ks->ex_vector, ks->linux_regs);
dbg_activate_sw_breakpoints();
- printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
- addr);
+ pr_crit("re-enter error: breakpoint removed %lx\n", addr);
WARN_ON_ONCE(1);
return 1;
@@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
panic("Recursive entry to debugger");
}
- printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+ pr_crit("re-enter exception: ALL breakpoints killed\n");
#ifdef CONFIG_KGDB_KDB
/* Allow kdb to debug itself one level */
return 0;
@@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
int cpu;
int trace_on = 0;
int online_cpus = num_online_cpus();
+ u64 time_left;
kgdb_info[ks->cpu].enter_kgdb++;
kgdb_info[ks->cpu].exception_state |= exception_state;
@@ -595,9 +598,13 @@ return_normal:
/*
* Wait for the other CPUs to be notified and be waiting for us:
*/
- while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
- atomic_read(&slaves_in_kgdb)) != online_cpus)
+ time_left = loops_per_jiffy * HZ;
+ while (kgdb_do_roundup && --time_left &&
+ (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
+ online_cpus)
cpu_relax();
+ if (!time_left)
+ pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
/*
* At this point the primary processor is completely
@@ -795,15 +802,15 @@ static struct console kgdbcons = {
static void sysrq_handle_dbg(int key)
{
if (!dbg_io_ops) {
- printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+ pr_crit("ERROR: No KGDB I/O module available\n");
return;
}
if (!kgdb_connected) {
#ifdef CONFIG_KGDB_KDB
if (!dbg_kdb_mode)
- printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
+ pr_crit("KGDB or $3#33 for KDB\n");
#else
- printk(KERN_CRIT "Entering KGDB\n");
+ pr_crit("Entering KGDB\n");
#endif
}
@@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void)
{
kgdb_break_asap = 0;
- printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+ pr_crit("Waiting for connection from remote gdb...\n");
kgdb_breakpoint();
}
@@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
if (dbg_io_ops) {
spin_unlock(&kgdb_registration_lock);
- printk(KERN_ERR "kgdb: Another I/O driver is already "
- "registered with KGDB.\n");
+ pr_err("Another I/O driver is already registered with KGDB\n");
return -EBUSY;
}
@@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
spin_unlock(&kgdb_registration_lock);
- printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
- new_dbg_io_ops->name);
+ pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
/* Arm KGDB now. */
kgdb_register_callbacks();
@@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
spin_unlock(&kgdb_registration_lock);
- printk(KERN_INFO
- "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+ pr_info("Unregistered I/O driver %s, debugger disabled\n",
old_dbg_io_ops->name);
}
EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index b20d544f20c2..e1dbf4a2c69e 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -531,22 +531,29 @@ void __init kdb_initbptab(void)
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
bp->bp_free = 1;
- kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
- "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
- "Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bp", kdb_bp, "[<vaddr>]",
+ "Set/Display breakpoints", 0,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bl", kdb_bp, "[<vaddr>]",
+ "Display breakpoints", 0,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
- kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
- "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("bc", kdb_bc, "<bpnum>",
- "Clear Breakpoint", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("be", kdb_bc, "<bpnum>",
- "Enable Breakpoint", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bd", kdb_bc, "<bpnum>",
- "Disable Breakpoint", 0, KDB_REPEAT_NONE);
-
- kdb_register_repeat("ss", kdb_ss, "",
- "Single Step", 1, KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bph", kdb_bp, "[<vaddr>]",
+ "[datar [length]|dataw [length]] Set hw brk", 0,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bc", kdb_bc, "<bpnum>",
+ "Clear Breakpoint", 0,
+ KDB_ENABLE_FLOW_CTRL);
+ kdb_register_flags("be", kdb_bc, "<bpnum>",
+ "Enable Breakpoint", 0,
+ KDB_ENABLE_FLOW_CTRL);
+ kdb_register_flags("bd", kdb_bc, "<bpnum>",
+ "Disable Breakpoint", 0,
+ KDB_ENABLE_FLOW_CTRL);
+
+ kdb_register_flags("ss", kdb_ss, "",
+ "Single Step", 1,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
/*
* Architecture dependent initialization.
*/
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 8859ca34dcfe..15e1a7af5dd0 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks)
ks->pass_exception = 1;
KDB_FLAG_SET(CATASTROPHIC);
}
+ /* set CATASTROPHIC if the system contains unresponsive processors */
+ for_each_online_cpu(i)
+ if (!kgdb_info[i].enter_kgdb)
+ KDB_FLAG_SET(CATASTROPHIC);
if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
KDB_STATE_CLEAR(SSBPT);
KDB_STATE_CLEAR(DOING_SS);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 379650b984f8..f191bddf64b8 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -12,6 +12,7 @@
*/
#include <linux/ctype.h>
+#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/kmsg_dump.h>
@@ -23,6 +24,7 @@
#include <linux/vmalloc.h>
#include <linux/atomic.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
@@ -42,6 +44,12 @@
#include <linux/slab.h>
#include "kdb_private.h"
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "kdb."
+
+static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
+module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
+
#define GREP_LEN 256
char kdb_grep_string[GREP_LEN];
int kdb_grepping_flag;
@@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = {
KDBMSG(BADLENGTH, "Invalid length field"),
KDBMSG(NOBP, "No Breakpoint exists"),
KDBMSG(BADADDR, "Invalid address"),
+ KDBMSG(NOPERM, "Permission denied"),
};
#undef KDBMSG
@@ -188,6 +197,26 @@ struct task_struct *kdb_curr_task(int cpu)
}
/*
+ * Check whether the flags of the current command and the permissions
+ * of the kdb console has allow a command to be run.
+ */
+static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
+ bool no_args)
+{
+ /* permissions comes from userspace so needs massaging slightly */
+ permissions &= KDB_ENABLE_MASK;
+ permissions |= KDB_ENABLE_ALWAYS_SAFE;
+
+ /* some commands change group when launched with no arguments */
+ if (no_args)
+ permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT;
+
+ flags |= KDB_ENABLE_ALL;
+
+ return permissions & flags;
+}
+
+/*
* kdbgetenv - This function will return the character string value of
* an environment variable.
* Parameters:
@@ -476,6 +505,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
kdb_symtab_t symtab;
/*
+ * If the enable flags prohibit both arbitrary memory access
+ * and flow control then there are no reasonable grounds to
+ * provide symbol lookup.
+ */
+ if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL,
+ kdb_cmd_enabled, false))
+ return KDB_NOPERM;
+
+ /*
* Process arguments which follow the following syntax:
*
* symbol | numeric-address [+/- numeric-offset]
@@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
if (!s->count)
s->usable = 0;
if (s->usable)
- kdb_register(s->name, kdb_exec_defcmd,
- s->usage, s->help, 0);
+ /* macros are always safe because when executed each
+ * internal command re-enters kdb_parse() and is
+ * safety checked individually.
+ */
+ kdb_register_flags(s->name, kdb_exec_defcmd, s->usage,
+ s->help, 0,
+ KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
if (!s->usable)
@@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr)
if (i < kdb_max_commands) {
int result;
+
+ if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1))
+ return KDB_NOPERM;
+
KDB_STATE_SET(CMD);
result = (*tp->cmd_func)(argc-1, (const char **)argv);
if (result && ignore_errors && result > KDB_CMD_GO)
result = 0;
KDB_STATE_CLEAR(CMD);
- switch (tp->cmd_repeat) {
- case KDB_REPEAT_NONE:
- argc = 0;
- if (argv[0])
- *(argv[0]) = '\0';
- break;
- case KDB_REPEAT_NO_ARGS:
- argc = 1;
- if (argv[1])
- *(argv[1]) = '\0';
- break;
- case KDB_REPEAT_WITH_ARGS:
- break;
- }
+
+ if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS)
+ return result;
+
+ argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0;
+ if (argv[argc])
+ *(argv[argc]) = '\0';
return result;
}
@@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv)
*/
static int kdb_sr(int argc, const char **argv)
{
+ bool check_mask =
+ !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false);
+
if (argc != 1)
return KDB_ARGCOUNT;
+
kdb_trap_printk++;
- __handle_sysrq(*argv[1], false);
+ __handle_sysrq(*argv[1], check_mask);
kdb_trap_printk--;
return 0;
@@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void)
for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i)) {
state = 'F'; /* cpu is offline */
+ } else if (!kgdb_info[i].enter_kgdb) {
+ state = 'D'; /* cpu is online but unresponsive */
} else {
state = ' '; /* cpu is responding to kdb */
if (kdb_task_state_char(KDB_TSK(i)) == 'I')
@@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
/*
* Validate cpunum
*/
- if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
+ if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
return KDB_BADCPUNUM;
dbg_switch_cpu = cpunum;
@@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv)
return 0;
if (!kt->cmd_name)
continue;
+ if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true))
+ continue;
if (strlen(kt->cmd_usage) > 20)
space = "\n ";
kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
@@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv)
}
/*
- * kdb_register_repeat - This function is used to register a kernel
+ * kdb_register_flags - This function is used to register a kernel
* debugger command.
* Inputs:
* cmd Command name
@@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv)
* zero for success, one if a duplicate command.
*/
#define kdb_command_extend 50 /* arbitrary */
-int kdb_register_repeat(char *cmd,
- kdb_func_t func,
- char *usage,
- char *help,
- short minlen,
- kdb_repeat_t repeat)
+int kdb_register_flags(char *cmd,
+ kdb_func_t func,
+ char *usage,
+ char *help,
+ short minlen,
+ kdb_cmdflags_t flags)
{
int i;
kdbtab_t *kp;
@@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd,
kp->cmd_func = func;
kp->cmd_usage = usage;
kp->cmd_help = help;
- kp->cmd_flags = 0;
kp->cmd_minlen = minlen;
- kp->cmd_repeat = repeat;
+ kp->cmd_flags = flags;
return 0;
}
-EXPORT_SYMBOL_GPL(kdb_register_repeat);
+EXPORT_SYMBOL_GPL(kdb_register_flags);
/*
* kdb_register - Compatibility register function for commands that do
* not need to specify a repeat state. Equivalent to
- * kdb_register_repeat with KDB_REPEAT_NONE.
+ * kdb_register_flags with flags set to 0.
* Inputs:
* cmd Command name
* func Function to execute the command
@@ -2721,8 +2768,7 @@ int kdb_register(char *cmd,
char *help,
short minlen)
{
- return kdb_register_repeat(cmd, func, usage, help, minlen,
- KDB_REPEAT_NONE);
+ return kdb_register_flags(cmd, func, usage, help, minlen, 0);
}
EXPORT_SYMBOL_GPL(kdb_register);
@@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void)
for_each_kdbcmd(kp, i)
kp->cmd_name = NULL;
- kdb_register_repeat("md", kdb_md, "<vaddr>",
+ kdb_register_flags("md", kdb_md, "<vaddr>",
"Display Memory Contents, also mdWcN, e.g. md8c1", 1,
- KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
- "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
- "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mds", kdb_md, "<vaddr>",
- "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
- "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("go", kdb_go, "[<vaddr>]",
- "Continue Execution", 1, KDB_REPEAT_NONE);
- kdb_register_repeat("rd", kdb_rd, "",
- "Display Registers", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
- "Modify Registers", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("ef", kdb_ef, "<vaddr>",
- "Display exception frame", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
- "Stack traceback", 1, KDB_REPEAT_NONE);
- kdb_register_repeat("btp", kdb_bt, "<pid>",
- "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
- "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("btc", kdb_bt, "",
- "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("btt", kdb_bt, "<vaddr>",
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>",
+ "Display Raw Memory", 0,
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>",
+ "Display Physical Memory", 0,
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mds", kdb_md, "<vaddr>",
+ "Display Memory Symbolically", 0,
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>",
+ "Modify Memory Contents", 0,
+ KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("go", kdb_go, "[<vaddr>]",
+ "Continue Execution", 1,
+ KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+ kdb_register_flags("rd", kdb_rd, "",
+ "Display Registers", 0,
+ KDB_ENABLE_REG_READ);
+ kdb_register_flags("rm", kdb_rm, "<reg> <contents>",
+ "Modify Registers", 0,
+ KDB_ENABLE_REG_WRITE);
+ kdb_register_flags("ef", kdb_ef, "<vaddr>",
+ "Display exception frame", 0,
+ KDB_ENABLE_MEM_READ);
+ kdb_register_flags("bt", kdb_bt, "[<vaddr>]",
+ "Stack traceback", 1,
+ KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+ kdb_register_flags("btp", kdb_bt, "<pid>",
+ "Display stack for process <pid>", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
+ "Backtrace all processes matching state flag", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("btc", kdb_bt, "",
+ "Backtrace current process on each cpu", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("btt", kdb_bt, "<vaddr>",
"Backtrace process given its struct task address", 0,
- KDB_REPEAT_NONE);
- kdb_register_repeat("env", kdb_env, "",
- "Show environment variables", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("set", kdb_set, "",
- "Set environment variables", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("help", kdb_help, "",
- "Display Help Message", 1, KDB_REPEAT_NONE);
- kdb_register_repeat("?", kdb_help, "",
- "Display Help Message", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
- "Switch to new cpu", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("kgdb", kdb_kgdb, "",
- "Enter kgdb mode", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
- "Display active task list", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("pid", kdb_pid, "<pidnum>",
- "Switch to another task", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("reboot", kdb_reboot, "",
- "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
+ KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+ kdb_register_flags("env", kdb_env, "",
+ "Show environment variables", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("set", kdb_set, "",
+ "Set environment variables", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("help", kdb_help, "",
+ "Display Help Message", 1,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("?", kdb_help, "",
+ "Display Help Message", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("cpu", kdb_cpu, "<cpunum>",
+ "Switch to new cpu", 0,
+ KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+ kdb_register_flags("kgdb", kdb_kgdb, "",
+ "Enter kgdb mode", 0, 0);
+ kdb_register_flags("ps", kdb_ps, "[<flags>|A]",
+ "Display active task list", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("pid", kdb_pid, "<pidnum>",
+ "Switch to another task", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("reboot", kdb_reboot, "",
+ "Reboot the machine immediately", 0,
+ KDB_ENABLE_REBOOT);
#if defined(CONFIG_MODULES)
- kdb_register_repeat("lsmod", kdb_lsmod, "",
- "List loaded kernel modules", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("lsmod", kdb_lsmod, "",
+ "List loaded kernel modules", 0,
+ KDB_ENABLE_INSPECT);
#endif
#if defined(CONFIG_MAGIC_SYSRQ)
- kdb_register_repeat("sr", kdb_sr, "<key>",
- "Magic SysRq key", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("sr", kdb_sr, "<key>",
+ "Magic SysRq key", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
#endif
#if defined(CONFIG_PRINTK)
- kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
- "Display syslog buffer", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("dmesg", kdb_dmesg, "[lines]",
+ "Display syslog buffer", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
#endif
if (arch_kgdb_ops.enable_nmi) {
- kdb_register_repeat("disable_nmi", kdb_disable_nmi, "",
- "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE);
- }
- kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
- "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
- "Send a signal to a process", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("summary", kdb_summary, "",
- "Summarize the system", 4, KDB_REPEAT_NONE);
- kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
- "Display per_cpu variables", 3, KDB_REPEAT_NONE);
- kdb_register_repeat("grephelp", kdb_grep_help, "",
- "Display help on | grep", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("disable_nmi", kdb_disable_nmi, "",
+ "Disable NMI entry to KDB", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ }
+ kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
+ "Define a set of commands, down to endefcmd", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("kill", kdb_kill, "<-signal> <pid>",
+ "Send a signal to a process", 0,
+ KDB_ENABLE_SIGNAL);
+ kdb_register_flags("summary", kdb_summary, "",
+ "Summarize the system", 4,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
+ "Display per_cpu variables", 3,
+ KDB_ENABLE_MEM_READ);
+ kdb_register_flags("grephelp", kdb_grep_help, "",
+ "Display help on | grep", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
}
/* Execute any commands defined in kdb_cmds. */
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 7afd3c8c41d5..eaacd1693954 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -172,10 +172,9 @@ typedef struct _kdbtab {
kdb_func_t cmd_func; /* Function to execute command */
char *cmd_usage; /* Usage String for this command */
char *cmd_help; /* Help message for this command */
- short cmd_flags; /* Parsing flags */
short cmd_minlen; /* Minimum legal # command
* chars required */
- kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */
+ kdb_cmdflags_t cmd_flags; /* Command behaviour flags */
} kdbtab_t;
extern int kdb_bt(int, const char **); /* KDB display back trace */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4c1ee7f2bebc..882f835a0d85 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4461,18 +4461,14 @@ perf_output_sample_regs(struct perf_output_handle *handle,
}
static void perf_sample_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs)
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
{
- if (!user_mode(regs)) {
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
- }
-
- if (regs) {
- regs_user->abi = perf_reg_abi(current);
+ if (user_mode(regs)) {
+ regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs;
+ } else if (current->mm) {
+ perf_get_regs_user(regs_user, regs, regs_user_copy);
} else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
regs_user->regs = NULL;
@@ -4951,7 +4947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
}
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
- perf_sample_regs_user(&data->regs_user, regs);
+ perf_sample_regs_user(&data->regs_user, regs,
+ &data->regs_user_copy);
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
diff --git a/kernel/exit.c b/kernel/exit.c
index 1ea4369890a3..6806c55475ee 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
static int wait_consider_task(struct wait_opts *wo, int ptrace,
struct task_struct *p)
{
+ /*
+ * We can race with wait_task_zombie() from another thread.
+ * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
+ * can't confuse the checks below.
+ */
+ int exit_state = ACCESS_ONCE(p->exit_state);
int ret;
- if (unlikely(p->exit_state == EXIT_DEAD))
+ if (unlikely(exit_state == EXIT_DEAD))
return 0;
ret = eligible_child(wo, p);
@@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
return 0;
}
- if (unlikely(p->exit_state == EXIT_TRACE)) {
+ if (unlikely(exit_state == EXIT_TRACE)) {
/*
* ptrace == 0 means we are the natural parent. In this case
* we should clear notask_error, debugger will notify us.
@@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
}
/* slay zombie? */
- if (p->exit_state == EXIT_ZOMBIE) {
+ if (exit_state == EXIT_ZOMBIE) {
/* we don't reap group leaders with subthreads */
if (!delay_group_leader(p)) {
/*
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 5cf6731b98e9..3ef3736002d8 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock)
DEBUG_LOCKS_WARN_ON(lock->owner != current);
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
- mutex_clear_owner(lock);
}
/*
* __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
* mutexes so that we can do it here after we've verified state.
*/
+ mutex_clear_owner(lock);
atomic_set(&lock->count, 1);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b5797b78add6..c0accc00566e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7113,9 +7113,6 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
- alloc_size += num_possible_cpus() * cpumask_size();
-#endif
if (alloc_size) {
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
+ }
#ifdef CONFIG_CPUMASK_OFFSTACK
- for_each_possible_cpu(i) {
- per_cpu(load_balance_mask, i) = (void *)ptr;
- ptr += cpumask_size();
- }
-#endif /* CONFIG_CPUMASK_OFFSTACK */
+ for_each_possible_cpu(i) {
+ per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
+ cpumask_size(), GFP_KERNEL, cpu_to_node(i));
}
+#endif /* CONFIG_CPUMASK_OFFSTACK */
init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime());
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index e5db8c6feebd..b52092f2636d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
static
int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
{
- int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
- int rorun = dl_se->runtime <= 0;
-
- if (!rorun && !dmiss)
- return 0;
-
- /*
- * If we are beyond our current deadline and we are still
- * executing, then we have already used some of the runtime of
- * the next instance. Thus, if we do not account that, we are
- * stealing bandwidth from the system at each deadline miss!
- */
- if (dmiss) {
- dl_se->runtime = rorun ? dl_se->runtime : 0;
- dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
- }
-
- return 1;
+ return (dl_se->runtime <= 0);
}
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
* parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime.
*/
- if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
- replenish_dl_entity(dl_se, pi_se);
- else
+ if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
update_dl_entity(dl_se, pi_se);
+ else if (flags & ENQUEUE_REPLENISH)
+ replenish_dl_entity(dl_se, pi_se);
__enqueue_dl_entity(dl_se);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df2cdf77f899..40667cbf371b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
+ /* init_cfs_bandwidth() was not called */
+ if (!cfs_b->throttled_cfs_rq.next)
+ return;
+
hrtimer_cancel(&cfs_b->period_timer);
hrtimer_cancel(&cfs_b->slack_timer);
}
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
* wl = S * s'_i; see (2)
*/
if (W > 0 && w < W)
- wl = (w * tg->shares) / W;
+ wl = (w * (long)tg->shares) / W;
else
wl = tg->shares;
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index b0b1c44e923a..3ccf5c2c1320 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv)
static __init int kdb_ftrace_register(void)
{
- kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
- "Dump ftrace log", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
+ "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 358eb81fa28d..c635a107a7de 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -73,6 +73,31 @@ config KGDB_KDB
help
KDB frontend for kernel
+config KDB_DEFAULT_ENABLE
+ hex "KDB: Select kdb command functions to be enabled by default"
+ depends on KGDB_KDB
+ default 0x1
+ help
+ Specifiers which kdb commands are enabled by default. This may
+ be set to 1 or 0 to enable all commands or disable almost all
+ commands.
+
+ Alternatively the following bitmask applies:
+
+ 0x0002 - allow arbitrary reads from memory and symbol lookup
+ 0x0004 - allow arbitrary writes to memory
+ 0x0008 - allow current register state to be inspected
+ 0x0010 - allow current register state to be modified
+ 0x0020 - allow passive inspection (backtrace, process list, lsmod)
+ 0x0040 - allow flow control management (breakpoint, single step)
+ 0x0080 - enable signalling of processes
+ 0x0100 - allow machine to be rebooted
+
+ The config option merely sets the default at boot time. Both
+ issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or
+ setting with kdb.cmd_enable=X kernel command line option will
+ override the default settings.
+
config KDB_KEYBOARD
bool "KGDB_KDB: keyboard as input device"
depends on VT && KGDB_KDB
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 2404d03e251a..03dd576e6773 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -11,6 +11,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
//#define DEBUG
+#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/assoc_array_priv.h>
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 56badfc4810a..957d3da53ddd 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -14,7 +14,6 @@ config DEBUG_PAGEALLOC
depends on !KMEMCHECK
select PAGE_EXTENSION
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
- select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
---help---
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
@@ -27,13 +26,5 @@ config DEBUG_PAGEALLOC
that would result in incorrect warnings of memory corruption after
a resume because free pages are not saved to the suspend image.
-config WANT_PAGE_DEBUG_FLAGS
- bool
-
config PAGE_POISONING
bool
- select WANT_PAGE_DEBUG_FLAGS
-
-config PAGE_GUARD
- bool
- select WANT_PAGE_DEBUG_FLAGS
diff --git a/mm/filemap.c b/mm/filemap.c
index bd8543c6508f..673e4581a2e5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1046,8 +1046,7 @@ EXPORT_SYMBOL(find_lock_entry);
* @mapping: the address_space to search
* @offset: the page index
* @fgp_flags: PCG flags
- * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
- * @radix_gfp_mask: gfp mask to use for radix tree node allocation
+ * @gfp_mask: gfp mask to use for the page cache data page allocation
*
* Looks up the page cache slot at @mapping & @offset.
*
@@ -1056,11 +1055,9 @@ EXPORT_SYMBOL(find_lock_entry);
* FGP_ACCESSED: the page will be marked accessed
* FGP_LOCK: Page is return locked
* FGP_CREAT: If page is not present then a new page is allocated using
- * @cache_gfp_mask and added to the page cache and the VM's LRU
- * list. If radix tree nodes are allocated during page cache
- * insertion then @radix_gfp_mask is used. The page is returned
- * locked and with an increased refcount. Otherwise, %NULL is
- * returned.
+ * @gfp_mask and added to the page cache and the VM's LRU
+ * list. The page is returned locked and with an increased
+ * refcount. Otherwise, %NULL is returned.
*
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic.
@@ -1068,7 +1065,7 @@ EXPORT_SYMBOL(find_lock_entry);
* If there is a page cache page, it is returned with an increased refcount.
*/
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
+ int fgp_flags, gfp_t gfp_mask)
{
struct page *page;
@@ -1105,13 +1102,11 @@ no_page:
if (!page && (fgp_flags & FGP_CREAT)) {
int err;
if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
- cache_gfp_mask |= __GFP_WRITE;
- if (fgp_flags & FGP_NOFS) {
- cache_gfp_mask &= ~__GFP_FS;
- radix_gfp_mask &= ~__GFP_FS;
- }
+ gfp_mask |= __GFP_WRITE;
+ if (fgp_flags & FGP_NOFS)
+ gfp_mask &= ~__GFP_FS;
- page = __page_cache_alloc(cache_gfp_mask);
+ page = __page_cache_alloc(gfp_mask);
if (!page)
return NULL;
@@ -1122,7 +1117,8 @@ no_page:
if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page);
- err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
+ err = add_to_page_cache_lru(page, mapping, offset,
+ gfp_mask & GFP_RECLAIM_MASK);
if (unlikely(err)) {
page_cache_release(page);
page = NULL;
@@ -2443,8 +2439,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
fgp_flags |= FGP_NOFS;
page = pagecache_get_page(mapping, index, fgp_flags,
- mapping_gfp_mask(mapping),
- GFP_KERNEL);
+ mapping_gfp_mask(mapping));
if (page)
wait_for_stable_page(page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ef91e856c7e4..851924fa5170 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3043,18 +3043,6 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
mem_cgroup_swap_statistics(from, false);
mem_cgroup_swap_statistics(to, true);
- /*
- * This function is only called from task migration context now.
- * It postpones page_counter and refcount handling till the end
- * of task migration(mem_cgroup_clear_mc()) for performance
- * improvement. But we cannot postpone css_get(to) because if
- * the process that has been moved to @to does swap-in, the
- * refcount of @to might be decreased to 0.
- *
- * We are in attach() phase, so the cgroup is guaranteed to be
- * alive, so we can just call css_get().
- */
- css_get(&to->css);
return 0;
}
return -EINVAL;
@@ -4679,6 +4667,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (parent_css == NULL) {
root_mem_cgroup = memcg;
page_counter_init(&memcg->memory, NULL);
+ memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
}
@@ -4724,6 +4713,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
if (parent->use_hierarchy) {
page_counter_init(&memcg->memory, &parent->memory);
+ memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, &parent->memsw);
page_counter_init(&memcg->kmem, &parent->kmem);
@@ -4733,6 +4723,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
*/
} else {
page_counter_init(&memcg->memory, NULL);
+ memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
/*
@@ -4807,7 +4798,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
- memcg->soft_limit = 0;
+ memcg->soft_limit = PAGE_COUNTER_MAX;
}
#ifdef CONFIG_MMU
diff --git a/mm/memory.c b/mm/memory.c
index 649e7d440bd7..c6565f00fb38 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2137,17 +2137,24 @@ reuse:
if (!dirty_page)
return ret;
- /*
- * Yes, Virginia, this is actually required to prevent a race
- * with clear_page_dirty_for_io() from clearing the page dirty
- * bit after it clear all dirty ptes, but before a racing
- * do_wp_page installs a dirty pte.
- *
- * do_shared_fault is protected similarly.
- */
if (!page_mkwrite) {
- wait_on_page_locked(dirty_page);
- set_page_dirty_balance(dirty_page);
+ struct address_space *mapping;
+ int dirtied;
+
+ lock_page(dirty_page);
+ dirtied = set_page_dirty(dirty_page);
+ VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
+ mapping = dirty_page->mapping;
+ unlock_page(dirty_page);
+
+ if (dirtied && mapping) {
+ /*
+ * Some device drivers do not set page.mapping
+ * but still dirty their pages
+ */
+ balance_dirty_pages_ratelimited(mapping);
+ }
+
/* file_update_time outside page_lock */
if (vma->vm_file)
file_update_time(vma->vm_file);
@@ -2378,12 +2385,12 @@ void unmap_mapping_range(struct address_space *mapping,
details.last_index = ULONG_MAX;
- i_mmap_lock_read(mapping);
+ i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
- i_mmap_unlock_read(mapping);
+ i_mmap_unlock_write(mapping);
}
EXPORT_SYMBOL(unmap_mapping_range);
@@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
if (prev && prev->vm_end == address)
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
- expand_downwards(vma, address - PAGE_SIZE);
+ return expand_downwards(vma, address - PAGE_SIZE);
}
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
struct vm_area_struct *next = vma->vm_next;
@@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
if (next && next->vm_start == address + PAGE_SIZE)
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
- expand_upwards(vma, address + PAGE_SIZE);
+ return expand_upwards(vma, address + PAGE_SIZE);
}
return 0;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 7b36aa7cc89a..7f684d5a8087 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -778,10 +778,12 @@ again: remove_next = 1 + (end > next->vm_end);
if (exporter && exporter->anon_vma && !importer->anon_vma) {
int error;
+ importer->anon_vma = exporter->anon_vma;
error = anon_vma_clone(importer, exporter);
- if (error)
+ if (error) {
+ importer->anon_vma = NULL;
return error;
- importer->anon_vma = exporter->anon_vma;
+ }
}
}
@@ -2099,14 +2101,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start;
+ unsigned long new_start, actual_size;
/* address space limit tests */
if (!may_expand_vm(mm, grow))
return -ENOMEM;
/* Stack limit test */
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ actual_size = size;
+ if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+ actual_size -= PAGE_SIZE;
+ if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d5d81f5384d1..6f4335238e33 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1541,16 +1541,6 @@ pause:
bdi_start_background_writeback(bdi);
}
-void set_page_dirty_balance(struct page *page)
-{
- if (set_page_dirty(page)) {
- struct address_space *mapping = page_mapping(page);
-
- if (mapping)
- balance_dirty_pages_ratelimited(mapping);
- }
-}
-
static DEFINE_PER_CPU(int, bdp_ratelimits);
/*
@@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied);
* page dirty in that case, but not all the buffers. This is a "bottom-up"
* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
*
- * Most callers have locked the page, which pins the address_space in memory.
- * But zap_pte_range() does not lock the page, however in that case the
- * mapping is pinned by the vma's ->vm_file reference.
- *
- * We take care to handle the case where the page was truncated from the
- * mapping by re-checking page_mapping() inside tree_lock.
+ * The caller must ensure this doesn't race with truncation. Most will simply
+ * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
+ * the pte lock held, which also locks out truncation.
*/
int __set_page_dirty_nobuffers(struct page *page)
{
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
- struct address_space *mapping2;
unsigned long flags;
if (!mapping)
return 1;
spin_lock_irqsave(&mapping->tree_lock, flags);
- mapping2 = page_mapping(page);
- if (mapping2) { /* Race with truncate? */
- BUG_ON(mapping2 != mapping);
- WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
- account_page_dirtied(page, mapping);
- radix_tree_tag_set(&mapping->page_tree,
- page_index(page), PAGECACHE_TAG_DIRTY);
- }
+ BUG_ON(page_mapping(page) != mapping);
+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+ account_page_dirtied(page, mapping);
+ radix_tree_tag_set(&mapping->page_tree, page_index(page),
+ PAGECACHE_TAG_DIRTY);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
if (mapping->host) {
/* !PageAnon && !swapper_space */
@@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page)
/*
* We carefully synchronise fault handlers against
* installing a dirty pte and marking the page dirty
- * at this point. We do this by having them hold the
- * page lock at some point after installing their
- * pte, but before marking the page dirty.
- * Pages are always locked coming in here, so we get
- * the desired exclusion. See mm/memory.c:do_wp_page()
- * for more comments.
+ * at this point. We do this by having them hold the
+ * page lock while dirtying the page, and pages are
+ * always locked coming in here, so we get the desired
+ * exclusion.
*/
if (TestClearPageDirty(page)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
diff --git a/mm/rmap.c b/mm/rmap.c
index c5bc241127b2..71cd5bd0c17d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
atomic_set(&anon_vma->refcount, 1);
+ anon_vma->degree = 1; /* Reference for first vma */
+ anon_vma->parent = anon_vma;
/*
* Initialise the anon_vma root to point to itself. If called
* from fork, the root will be reset to the parents anon_vma.
@@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma);
+ /* vma reference or self-parent link for new root */
+ anon_vma->degree++;
allocated = NULL;
avc = NULL;
}
@@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
/*
* Attach the anon_vmas from src to dst.
* Returns 0 on success, -ENOMEM on failure.
+ *
+ * If dst->anon_vma is NULL this function tries to find and reuse existing
+ * anon_vma which has no vmas and only one child anon_vma. This prevents
+ * degradation of anon_vma hierarchy to endless linear chain in case of
+ * constantly forking task. On the other hand, an anon_vma with more than one
+ * child isn't reused even if there was no alive vma, thus rmap walker has a
+ * good chance of avoiding scanning the whole hierarchy when it searches where
+ * page is mapped.
*/
int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
{
@@ -256,7 +268,21 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
anon_vma = pavc->anon_vma;
root = lock_anon_vma_root(root, anon_vma);
anon_vma_chain_link(dst, avc, anon_vma);
+
+ /*
+ * Reuse existing anon_vma if its degree lower than two,
+ * that means it has no vma and only one anon_vma child.
+ *
+ * Do not chose parent anon_vma, otherwise first child
+ * will always reuse it. Root anon_vma is never reused:
+ * it has self-parent reference and at least one child.
+ */
+ if (!dst->anon_vma && anon_vma != src->anon_vma &&
+ anon_vma->degree < 2)
+ dst->anon_vma = anon_vma;
}
+ if (dst->anon_vma)
+ dst->anon_vma->degree++;
unlock_anon_vma_root(root);
return 0;
@@ -280,6 +306,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
if (!pvma->anon_vma)
return 0;
+ /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
+ vma->anon_vma = NULL;
+
/*
* First, attach the new VMA to the parent VMA's anon_vmas,
* so rmap can find non-COWed pages in child processes.
@@ -288,6 +317,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
if (error)
return error;
+ /* An existing anon_vma has been reused, all done then. */
+ if (vma->anon_vma)
+ return 0;
+
/* Then add our own anon_vma. */
anon_vma = anon_vma_alloc();
if (!anon_vma)
@@ -301,6 +334,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
* lock any of the anon_vmas in this anon_vma tree.
*/
anon_vma->root = pvma->anon_vma->root;
+ anon_vma->parent = pvma->anon_vma;
/*
* With refcounts, an anon_vma can stay around longer than the
* process it belongs to. The root anon_vma needs to be pinned until
@@ -311,6 +345,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
vma->anon_vma = anon_vma;
anon_vma_lock_write(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
+ anon_vma->parent->degree++;
anon_vma_unlock_write(anon_vma);
return 0;
@@ -341,12 +376,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
* Leave empty anon_vmas on the list - we'll need
* to free them outside the lock.
*/
- if (RB_EMPTY_ROOT(&anon_vma->rb_root))
+ if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
+ anon_vma->parent->degree--;
continue;
+ }
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
+ if (vma->anon_vma)
+ vma->anon_vma->degree--;
unlock_anon_vma_root(root);
/*
@@ -357,6 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
+ BUG_ON(anon_vma->degree);
put_anon_vma(anon_vma);
list_del(&avc->same_vma);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd9a72bc4a1b..ab2505c3ef54 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2921,18 +2921,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
return false;
/*
- * There is a potential race between when kswapd checks its watermarks
- * and a process gets throttled. There is also a potential race if
- * processes get throttled, kswapd wakes, a large process exits therby
- * balancing the zones that causes kswapd to miss a wakeup. If kswapd
- * is going to sleep, no process should be sleeping on pfmemalloc_wait
- * so wake them now if necessary. If necessary, processes will wake
- * kswapd and get throttled again
+ * The throttled processes are normally woken up in balance_pgdat() as
+ * soon as pfmemalloc_watermark_ok() is true. But there is a potential
+ * race between when kswapd checks the watermarks and a process gets
+ * throttled. There is also a potential race if processes get
+ * throttled, kswapd wakes, a large process exits thereby balancing the
+ * zones, which causes kswapd to exit balance_pgdat() before reaching
+ * the wake up checks. If kswapd is going to sleep, no process should
+ * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
+ * the wake up is premature, processes will wake kswapd and get
+ * throttled again. The difference from wake ups in balance_pgdat() is
+ * that here we are under prepare_to_wait().
*/
- if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
- wake_up(&pgdat->pfmemalloc_wait);
- return false;
- }
+ if (waitqueue_active(&pgdat->pfmemalloc_wait))
+ wake_up_all(&pgdat->pfmemalloc_wait);
return pgdat_balanced(pgdat, order, classzone_idx);
}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index fc1835c6bb40..00f9e144cc97 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -251,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
kfree(entry);
/* Make room for the rest of the fragments. */
- if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
+ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
kfree_skb(skb_out);
skb_out = NULL;
goto free;
@@ -434,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
* fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
*/
mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
- max_fragment_size = (mtu - header_size - ETH_HLEN);
+ max_fragment_size = mtu - header_size;
max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
/* Don't even try to fragment, if we need more than 16 fragments */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 90cff585b37d..e0bcf9e84273 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -810,7 +810,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
goto out;
gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
- if (!gw_node->bandwidth_down == 0)
+ if (!gw_node)
goto out;
switch (atomic_read(&bat_priv->gw_mode)) {
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index ab6bb2af1d45..b24e4bb64fb5 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
if (orig_initialized)
atomic_dec(&bat_priv->mcast.num_disabled);
orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
- /* If mcast support is being switched off increase the disabled
- * mcast node counter.
+ /* If mcast support is being switched off or if this is an initial
+ * OGM without mcast support then increase the disabled mcast
+ * node counter.
*/
} else if (!orig_mcast_enabled &&
- orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) {
+ (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+ !orig_initialized)) {
atomic_inc(&bat_priv->mcast.num_disabled);
orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
}
@@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
{
struct batadv_priv *bat_priv = orig->bat_priv;
- if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST))
+ if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
+ orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
atomic_dec(&bat_priv->mcast.num_disabled);
batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 8d04d174669e..fab47f1f3ef9 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
if (!bat_priv->nc.decoding_hash)
goto err;
- batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
+ batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
&batadv_nc_decoding_hash_lock_class_key);
INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6a484514cd3e..bea8198d0198 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
batadv_frag_purge_orig(orig_node, NULL);
- batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
- "originator timed out");
-
if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
@@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
atomic_set(&orig_node->last_ttvn, 0);
orig_node->tt_buff = NULL;
orig_node->tt_buff_len = 0;
+ orig_node->last_seen = jiffies;
reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
orig_node->bcast_seqno_reset = reset_time;
#ifdef CONFIG_BATMAN_ADV_MCAST
@@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
if (batadv_purge_orig_node(bat_priv, orig_node)) {
batadv_gw_node_delete(bat_priv, orig_node);
hlist_del_rcu(&orig_node->hash_entry);
+ batadv_tt_global_del_orig(orig_node->bat_priv,
+ orig_node, -1,
+ "originator timed out");
batadv_orig_node_free_ref(orig_node);
continue;
}
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 35f76f2f7824..6648f321864d 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv,
router = batadv_orig_router_get(orig_node, recv_if);
+ if (!router)
+ return router;
+
/* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
* and if activated.
*/
- if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) ||
- !router)
+ if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
return router;
/* bonding: loop through the list of possible routers found
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 76617be1e797..c989253737f0 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -390,7 +390,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
drop:
dev->stats.rx_dropped++;
- kfree_skb(skb);
return NET_RX_DROP;
}
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 85bcc21e84d2..ce82722d049b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -533,6 +533,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
BT_DBG("");
+ if (!l2cap_is_socket(sock))
+ return -EBADFD;
+
baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 67fe5e84e68f..278a194e6af4 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
BT_DBG("");
+ if (!l2cap_is_socket(sock))
+ return -EBADFD;
+
session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
if (!session)
return -ENOMEM;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 39a5c8a01726..3f2e8b830cbd 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -242,7 +242,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_CONFIG, &hdev->dev_flags))
memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
}
@@ -509,7 +510,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_CONFIG, &hdev->dev_flags)) {
hdev->hci_ver = rp->hci_ver;
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
hdev->lmp_ver = rp->lmp_ver;
@@ -528,7 +530,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_CONFIG, &hdev->dev_flags))
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
}
@@ -2194,7 +2197,12 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
- if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
+ /* Require HCI_CONNECTABLE or a whitelist entry to accept the
+ * connection. These features are only touched through mgmt so
+ * only do the checks if HCI_MGMT is set.
+ */
+ if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+ !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
BDADDR_BREDR)) {
hci_reject_conn(hdev, &ev->bdaddr);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index cc25d0b74b36..07348e142f16 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1314,13 +1314,14 @@ int hidp_connection_add(struct hidp_connadd_req *req,
{
struct hidp_session *session;
struct l2cap_conn *conn;
- struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
+ struct l2cap_chan *chan;
int ret;
ret = hidp_verify_sockets(ctrl_sock, intr_sock);
if (ret)
return ret;
+ chan = l2cap_pi(ctrl_sock->sk)->chan;
conn = NULL;
l2cap_chan_lock(chan);
if (chan->conn)
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 15845814a0f2..ba6eb17226da 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -676,7 +676,7 @@ static int calcu_signature(struct ceph_x_authorizer *au,
int ret;
char tmp_enc[40];
__le32 tmp[5] = {
- 16u, msg->hdr.crc, msg->footer.front_crc,
+ cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc,
msg->footer.middle_crc, msg->footer.data_crc,
};
ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index a83062ceeec9..f2148e22b148 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -717,7 +717,7 @@ static int get_poolop_reply_buf(const char *src, size_t src_len,
if (src_len != sizeof(u32) + dst_len)
return -EINVAL;
- buf_len = le32_to_cpu(*(u32 *)src);
+ buf_len = le32_to_cpu(*(__le32 *)src);
if (buf_len != dst_len)
return -EINVAL;
diff --git a/net/core/dev.c b/net/core/dev.c
index f411c28d0a66..683d493aa1bf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1694,6 +1694,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
skb_scrub_packet(skb, true);
skb->protocol = eth_type_trans(skb, dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
return 0;
}
@@ -2522,7 +2523,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
/* If MPLS offload request, verify we are testing hardware MPLS features
* instead of standard features for the netdev.
*/
-#ifdef CONFIG_NET_MPLS_GSO
+#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
@@ -2562,7 +2563,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
netdev_features_t netif_skb_features(struct sk_buff *skb)
{
- const struct net_device *dev = skb->dev;
+ struct net_device *dev = skb->dev;
netdev_features_t features = dev->features;
u16 gso_segs = skb_shinfo(skb)->gso_segs;
__be16 protocol = skb->protocol;
@@ -2570,11 +2571,21 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
features &= ~NETIF_F_GSO_MASK;
- if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
- protocol = veh->h_vlan_encapsulated_proto;
- } else if (!vlan_tx_tag_present(skb)) {
- return harmonize_features(skb, features);
+ /* If encapsulation offload request, verify we are testing
+ * hardware encapsulation features instead of standard
+ * features for the netdev
+ */
+ if (skb->encapsulation)
+ features &= dev->hw_enc_features;
+
+ if (!vlan_tx_tag_present(skb)) {
+ if (unlikely(protocol == htons(ETH_P_8021Q) ||
+ protocol == htons(ETH_P_8021AD))) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ } else {
+ goto finalize;
+ }
}
features = netdev_intersect_features(features,
@@ -2591,6 +2602,11 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
+finalize:
+ if (dev->netdev_ops->ndo_features_check)
+ features &= dev->netdev_ops->ndo_features_check(skb, dev,
+ features);
+
return harmonize_features(skb, features);
}
EXPORT_SYMBOL(netif_skb_features);
@@ -2661,19 +2677,12 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
if (unlikely(!skb))
goto out_null;
- /* If encapsulation offload request, verify we are testing
- * hardware encapsulation features instead of standard
- * features for the netdev
- */
- if (skb->encapsulation)
- features &= dev->hw_enc_features;
-
if (netif_needs_gso(dev, skb, features)) {
struct sk_buff *segs;
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs)) {
- segs = NULL;
+ goto out_kfree_skb;
} else if (segs) {
consume_skb(skb);
skb = segs;
@@ -4557,6 +4566,68 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);
+static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+{
+ void *have;
+ int work, weight;
+
+ list_del_init(&n->poll_list);
+
+ have = netpoll_poll_lock(n);
+
+ weight = n->weight;
+
+ /* This NAPI_STATE_SCHED test is for avoiding a race
+ * with netpoll's poll_napi(). Only the entity which
+ * obtains the lock and sees NAPI_STATE_SCHED set will
+ * actually make the ->poll() call. Therefore we avoid
+ * accidentally calling ->poll() when NAPI is not scheduled.
+ */
+ work = 0;
+ if (test_bit(NAPI_STATE_SCHED, &n->state)) {
+ work = n->poll(n, weight);
+ trace_napi_poll(n);
+ }
+
+ WARN_ON_ONCE(work > weight);
+
+ if (likely(work < weight))
+ goto out_unlock;
+
+ /* Drivers must not modify the NAPI state if they
+ * consume the entire weight. In such cases this code
+ * still "owns" the NAPI instance and therefore can
+ * move the instance around on the list at-will.
+ */
+ if (unlikely(napi_disable_pending(n))) {
+ napi_complete(n);
+ goto out_unlock;
+ }
+
+ if (n->gro_list) {
+ /* flush too old packets
+ * If HZ < 1000, flush all packets.
+ */
+ napi_gro_flush(n, HZ >= 1000);
+ }
+
+ /* Some drivers may have called napi_schedule
+ * prior to exhausting their budget.
+ */
+ if (unlikely(!list_empty(&n->poll_list))) {
+ pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
+ n->dev ? n->dev->name : "backlog");
+ goto out_unlock;
+ }
+
+ list_add_tail(&n->poll_list, repoll);
+
+out_unlock:
+ netpoll_poll_unlock(have);
+
+ return work;
+}
+
static void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -4564,74 +4635,34 @@ static void net_rx_action(struct softirq_action *h)
int budget = netdev_budget;
LIST_HEAD(list);
LIST_HEAD(repoll);
- void *have;
local_irq_disable();
list_splice_init(&sd->poll_list, &list);
local_irq_enable();
- while (!list_empty(&list)) {
+ for (;;) {
struct napi_struct *n;
- int work, weight;
-
- /* If softirq window is exhausted then punt.
- * Allow this to run for 2 jiffies since which will allow
- * an average latency of 1.5/HZ.
- */
- if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
- goto softnet_break;
-
-
- n = list_first_entry(&list, struct napi_struct, poll_list);
- list_del_init(&n->poll_list);
- have = netpoll_poll_lock(n);
-
- weight = n->weight;
-
- /* This NAPI_STATE_SCHED test is for avoiding a race
- * with netpoll's poll_napi(). Only the entity which
- * obtains the lock and sees NAPI_STATE_SCHED set will
- * actually make the ->poll() call. Therefore we avoid
- * accidentally calling ->poll() when NAPI is not scheduled.
- */
- work = 0;
- if (test_bit(NAPI_STATE_SCHED, &n->state)) {
- work = n->poll(n, weight);
- trace_napi_poll(n);
+ if (list_empty(&list)) {
+ if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
+ return;
+ break;
}
- WARN_ON_ONCE(work > weight);
-
- budget -= work;
+ n = list_first_entry(&list, struct napi_struct, poll_list);
+ budget -= napi_poll(n, &repoll);
- /* Drivers must not modify the NAPI state if they
- * consume the entire weight. In such cases this code
- * still "owns" the NAPI instance and therefore can
- * move the instance around on the list at-will.
+ /* If softirq window is exhausted then punt.
+ * Allow this to run for 2 jiffies since which will allow
+ * an average latency of 1.5/HZ.
*/
- if (unlikely(work == weight)) {
- if (unlikely(napi_disable_pending(n))) {
- napi_complete(n);
- } else {
- if (n->gro_list) {
- /* flush too old packets
- * If HZ < 1000, flush all packets.
- */
- napi_gro_flush(n, HZ >= 1000);
- }
- list_add_tail(&n->poll_list, &repoll);
- }
+ if (unlikely(budget <= 0 ||
+ time_after_eq(jiffies, time_limit))) {
+ sd->time_squeeze++;
+ break;
}
-
- netpoll_poll_unlock(have);
}
- if (!sd_has_rps_ipi_waiting(sd) &&
- list_empty(&list) &&
- list_empty(&repoll))
- return;
-out:
local_irq_disable();
list_splice_tail_init(&sd->poll_list, &list);
@@ -4641,12 +4672,6 @@ out:
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
net_rps_action_and_irq_enable(sd);
-
- return;
-
-softnet_break:
- sd->time_squeeze++;
- goto out;
}
struct netdev_adjacent {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ae13ef6b3ea7..395c15b82087 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4148,6 +4148,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->ignore_df = 0;
skb_dst_drop(skb);
skb->mark = 0;
+ skb_init_secmark(skb);
secpath_reset(skb);
nf_reset(skb);
nf_reset_trace(skb);
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 95e47c97585e..394a200f93c1 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -122,14 +122,18 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
int err;
skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
+ if (unlikely(err)) {
+ kfree_skb(skb);
return err;
+ }
skb = vlan_hwaccel_push_inside(skb);
if (unlikely(!skb))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7f18262e2326..65caf8b95e17 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
break;
- if (tso_segs == 1) {
+ if (tso_segs == 1 || !max_segs) {
if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
(tcp_skb_is_last(sk, skb) ?
nonagle : TCP_NAGLE_PUSH))))
@@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
limit = mss_now;
- if (tso_segs > 1 && !tcp_urg_mode(tp))
+ if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
min_t(unsigned int,
cwnd_quota,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5ff87805258e..9c0b54e87b47 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1387,6 +1387,28 @@ ipv6_pktoptions:
return 0;
}
+static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
+ const struct tcphdr *th)
+{
+ /* This is tricky: we move IP6CB at its correct location into
+ * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
+ * _decode_session6() uses IP6CB().
+ * barrier() makes sure compiler won't play aliasing games.
+ */
+ memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
+ sizeof(struct inet6_skb_parm));
+ barrier();
+
+ TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+ skb->len - th->doff*4);
+ TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
+ TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+ TCP_SKB_CB(skb)->tcp_tw_isn = 0;
+ TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
+ TCP_SKB_CB(skb)->sacked = 0;
+}
+
static int tcp_v6_rcv(struct sk_buff *skb)
{
const struct tcphdr *th;
@@ -1418,24 +1440,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
th = tcp_hdr(skb);
hdr = ipv6_hdr(skb);
- /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
- * barrier() makes sure compiler wont play fool^Waliasing games.
- */
- memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
- sizeof(struct inet6_skb_parm));
- barrier();
-
- TCP_SKB_CB(skb)->seq = ntohl(th->seq);
- TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
- skb->len - th->doff*4);
- TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
- TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
- TCP_SKB_CB(skb)->tcp_tw_isn = 0;
- TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
- TCP_SKB_CB(skb)->sacked = 0;
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
- tcp_v6_iif(skb));
+ inet6_iif(skb));
if (!sk)
goto no_tcp_socket;
@@ -1451,6 +1458,8 @@ process:
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
+ tcp_v6_fill_cb(skb, hdr, th);
+
#ifdef CONFIG_TCP_MD5SIG
if (tcp_v6_inbound_md5_hash(sk, skb))
goto discard_and_relse;
@@ -1482,6 +1491,8 @@ no_tcp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
+ tcp_v6_fill_cb(skb, hdr, th);
+
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
csum_error:
TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
@@ -1505,6 +1516,8 @@ do_time_wait:
goto discard_it;
}
+ tcp_v6_fill_cb(skb, hdr, th);
+
if (skb->len < (th->doff<<2)) {
inet_twsk_put(inet_twsk(sk));
goto bad_packet;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 0bb7038121ac..bd4e46ec32bd 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
sdata->crypto_tx_tailroom_needed_cnt--;
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
@@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta = key->sta;
sdata = key->sdata;
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(sdata);
ret = drv_set_key(key->local, DISABLE_KEY, sdata,
@@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(key->sdata);
}
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index ca27837974fe..349295d21946 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -31,10 +31,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
SKB_GSO_TCPV6 |
SKB_GSO_UDP |
SKB_GSO_DODGY |
- SKB_GSO_TCP_ECN |
- SKB_GSO_GRE |
- SKB_GSO_GRE_CSUM |
- SKB_GSO_IPIP)))
+ SKB_GSO_TCP_ECN)))
goto out;
/* Setup inner SKB. */
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 13c2e17bbe27..cde4a6702fa3 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -463,7 +463,7 @@ static void nfnetlink_rcv(struct sk_buff *skb)
}
#ifdef CONFIG_MODULES
-static int nfnetlink_bind(int group)
+static int nfnetlink_bind(struct net *net, int group)
{
const struct nfnetlink_subsystem *ss;
int type;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 074cf3e91c6f..84ea76ca3f1f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1091,8 +1091,10 @@ static void netlink_remove(struct sock *sk)
mutex_unlock(&nl_sk_hash_lock);
netlink_table_grab();
- if (nlk_sk(sk)->subscriptions)
+ if (nlk_sk(sk)->subscriptions) {
__sk_del_bind_node(sk);
+ netlink_update_listeners(sk);
+ }
netlink_table_ungrab();
}
@@ -1139,8 +1141,8 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
struct module *module = NULL;
struct mutex *cb_mutex;
struct netlink_sock *nlk;
- int (*bind)(int group);
- void (*unbind)(int group);
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
int err = 0;
sock->state = SS_UNCONNECTED;
@@ -1226,8 +1228,8 @@ static int netlink_release(struct socket *sock)
module_put(nlk->module);
- netlink_table_grab();
if (netlink_is_kernel(sk)) {
+ netlink_table_grab();
BUG_ON(nl_table[sk->sk_protocol].registered == 0);
if (--nl_table[sk->sk_protocol].registered == 0) {
struct listeners *old;
@@ -1241,11 +1243,16 @@ static int netlink_release(struct socket *sock)
nl_table[sk->sk_protocol].flags = 0;
nl_table[sk->sk_protocol].registered = 0;
}
- } else if (nlk->subscriptions) {
- netlink_update_listeners(sk);
+ netlink_table_ungrab();
}
- netlink_table_ungrab();
+ if (nlk->netlink_unbind) {
+ int i;
+
+ for (i = 0; i < nlk->ngroups; i++)
+ if (test_bit(i, nlk->groups))
+ nlk->netlink_unbind(sock_net(sk), i + 1);
+ }
kfree(nlk->groups);
nlk->groups = NULL;
@@ -1410,9 +1417,10 @@ static int netlink_realloc_groups(struct sock *sk)
return err;
}
-static void netlink_unbind(int group, long unsigned int groups,
- struct netlink_sock *nlk)
+static void netlink_undo_bind(int group, long unsigned int groups,
+ struct sock *sk)
{
+ struct netlink_sock *nlk = nlk_sk(sk);
int undo;
if (!nlk->netlink_unbind)
@@ -1420,7 +1428,7 @@ static void netlink_unbind(int group, long unsigned int groups,
for (undo = 0; undo < group; undo++)
if (test_bit(undo, &groups))
- nlk->netlink_unbind(undo);
+ nlk->netlink_unbind(sock_net(sk), undo);
}
static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1458,10 +1466,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
for (group = 0; group < nlk->ngroups; group++) {
if (!test_bit(group, &groups))
continue;
- err = nlk->netlink_bind(group);
+ err = nlk->netlink_bind(net, group);
if (!err)
continue;
- netlink_unbind(group, groups, nlk);
+ netlink_undo_bind(group, groups, sk);
return err;
}
}
@@ -1471,7 +1479,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
netlink_insert(sk, net, nladdr->nl_pid) :
netlink_autobind(sock);
if (err) {
- netlink_unbind(nlk->ngroups, groups, nlk);
+ netlink_undo_bind(nlk->ngroups, groups, sk);
return err;
}
}
@@ -2122,7 +2130,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
if (!val || val - 1 >= nlk->ngroups)
return -EINVAL;
if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
- err = nlk->netlink_bind(val);
+ err = nlk->netlink_bind(sock_net(sk), val);
if (err)
return err;
}
@@ -2131,7 +2139,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
optname == NETLINK_ADD_MEMBERSHIP);
netlink_table_ungrab();
if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
- nlk->netlink_unbind(val);
+ nlk->netlink_unbind(sock_net(sk), val);
err = 0;
break;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index b20a1731759b..f123a88496f8 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -39,8 +39,8 @@ struct netlink_sock {
struct mutex *cb_mutex;
struct mutex cb_def_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
- int (*netlink_bind)(int group);
- void (*netlink_unbind)(int group);
+ int (*netlink_bind)(struct net *net, int group);
+ void (*netlink_unbind)(struct net *net, int group);
struct module *module;
#ifdef CONFIG_NETLINK_MMAP
struct mutex pg_vec_lock;
@@ -65,8 +65,8 @@ struct netlink_table {
unsigned int groups;
struct mutex *cb_mutex;
struct module *module;
- int (*bind)(int group);
- void (*unbind)(int group);
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
bool (*compare)(struct net *net, struct sock *sock);
int registered;
};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 76393f2f4b22..2e11061ef885 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -983,11 +983,67 @@ static struct genl_multicast_group genl_ctrl_groups[] = {
{ .name = "notify", },
};
+static int genl_bind(struct net *net, int group)
+{
+ int i, err = 0;
+
+ down_read(&cb_lock);
+ for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+ struct genl_family *f;
+
+ list_for_each_entry(f, genl_family_chain(i), family_list) {
+ if (group >= f->mcgrp_offset &&
+ group < f->mcgrp_offset + f->n_mcgrps) {
+ int fam_grp = group - f->mcgrp_offset;
+
+ if (!f->netnsok && net != &init_net)
+ err = -ENOENT;
+ else if (f->mcast_bind)
+ err = f->mcast_bind(net, fam_grp);
+ else
+ err = 0;
+ break;
+ }
+ }
+ }
+ up_read(&cb_lock);
+
+ return err;
+}
+
+static void genl_unbind(struct net *net, int group)
+{
+ int i;
+ bool found = false;
+
+ down_read(&cb_lock);
+ for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+ struct genl_family *f;
+
+ list_for_each_entry(f, genl_family_chain(i), family_list) {
+ if (group >= f->mcgrp_offset &&
+ group < f->mcgrp_offset + f->n_mcgrps) {
+ int fam_grp = group - f->mcgrp_offset;
+
+ if (f->mcast_unbind)
+ f->mcast_unbind(net, fam_grp);
+ found = true;
+ break;
+ }
+ }
+ }
+ up_read(&cb_lock);
+
+ WARN_ON(!found);
+}
+
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
+ .bind = genl_bind,
+ .unbind = genl_unbind,
};
/* we'll bump the group number right afterwards */
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 764fdc39c63b..770064c83711 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -147,7 +147,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
hdr = eth_hdr(skb);
hdr->h_proto = mpls->mpls_ethertype;
- skb_set_inner_protocol(skb, skb->protocol);
+ if (!skb->inner_protocol)
+ skb_set_inner_protocol(skb, skb->protocol);
skb->protocol = mpls->mpls_ethertype;
invalidate_flow_key(key);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 332b5a031739..4e9a5f035cbc 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -83,8 +83,7 @@ static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
unsigned int group)
{
return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
- genl_has_listeners(family, genl_info_net(info)->genl_sock,
- group);
+ genl_has_listeners(family, genl_info_net(info), group);
}
static void ovs_notify(struct genl_family *family,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 70bef2ab7f2b..da2fae0873a5 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
{
struct flow_stats *stats;
int node = numa_node_id();
+ int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
stats = rcu_dereference(flow->stats[node]);
@@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
if (likely(new_stats)) {
new_stats->used = jiffies;
new_stats->packet_count = 1;
- new_stats->byte_count = skb->len;
+ new_stats->byte_count = len;
new_stats->tcp_flags = tcp_flags;
spin_lock_init(&new_stats->lock);
@@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
stats->used = jiffies;
stats->packet_count++;
- stats->byte_count += skb->len;
+ stats->byte_count += len;
stats->tcp_flags |= tcp_flags;
unlock:
spin_unlock(&stats->lock);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 9645a21d9eaa..d1eecf707613 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1753,7 +1753,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
__be16 eth_type, __be16 vlan_tci, bool log)
{
const struct nlattr *a;
- bool out_tnl_port = false;
int rem, err;
if (depth >= SAMPLE_ACTION_DEPTH)
@@ -1796,8 +1795,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_OUTPUT:
if (nla_get_u32(a) >= DP_MAX_PORTS)
return -EINVAL;
- out_tnl_port = false;
-
break;
case OVS_ACTION_ATTR_HASH: {
@@ -1832,12 +1829,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_PUSH_MPLS: {
const struct ovs_action_push_mpls *mpls = nla_data(a);
- /* Networking stack do not allow simultaneous Tunnel
- * and MPLS GSO.
- */
- if (out_tnl_port)
- return -EINVAL;
-
if (!eth_p_mpls(mpls->mpls_ethertype))
return -EINVAL;
/* Prohibit push MPLS other than to a white list
@@ -1873,11 +1864,9 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_SET:
err = validate_set(a, key, sfa,
- &out_tnl_port, eth_type, log);
+ &skip_copy, eth_type, log);
if (err)
return err;
-
- skip_copy = out_tnl_port;
break;
case OVS_ACTION_ATTR_SAMPLE:
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 347fa2325b22..484864dd0e68 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -219,7 +219,10 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
false);
if (err < 0)
ip_rt_put(rt);
+ return err;
+
error:
+ kfree_skb(skb);
return err;
}
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 6b69df545b1d..d4168c442db5 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -73,7 +73,7 @@ static struct sk_buff *__build_header(struct sk_buff *skb,
skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
if (IS_ERR(skb))
- return NULL;
+ return skb;
tpi.flags = filter_tnl_flags(tun_key->tun_flags);
tpi.proto = htons(ETH_P_TEB);
@@ -144,7 +144,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
err = -EINVAL;
- goto error;
+ goto err_free_skb;
}
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
@@ -157,8 +157,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
fl.flowi4_proto = IPPROTO_GRE;
rt = ip_route_output_key(net, &fl);
- if (IS_ERR(rt))
- return PTR_ERR(rt);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto err_free_skb;
+ }
tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
@@ -183,8 +185,9 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
/* Push Tunnel header. */
skb = __build_header(skb, tunnel_hlen);
- if (unlikely(!skb)) {
- err = 0;
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ skb = NULL;
goto err_free_rt;
}
@@ -198,7 +201,8 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
err_free_rt:
ip_rt_put(rt);
-error:
+err_free_skb:
+ kfree_skb(skb);
return err;
}
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 38f95a52241b..d7c46b301024 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -187,7 +187,9 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
false);
if (err < 0)
ip_rt_put(rt);
+ return err;
error:
+ kfree_skb(skb);
return err;
}
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 9584526c0778..2034c6d9cb5a 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
stats = this_cpu_ptr(vport->percpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
u64_stats_update_end(&stats->syncp);
OVS_CB(skb)->input_vport = vport;
@@ -519,10 +519,9 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
u64_stats_update_end(&stats->syncp);
} else if (sent < 0) {
ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
- kfree_skb(skb);
- } else
+ } else {
ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
-
+ }
return sent;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e52a44785681..6880f34a529a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -785,6 +785,7 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
struct tpacket3_hdr *last_pkt;
struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
+ struct sock *sk = &po->sk;
if (po->stats.stats3.tp_drops)
status |= TP_STATUS_LOSING;
@@ -809,6 +810,8 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
/* Flush the block */
prb_flush_block(pkc1, pbd1, status);
+ sk->sk_data_ready(sk);
+
pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
}
@@ -2052,12 +2055,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
smp_wmb();
#endif
- if (po->tp_version <= TPACKET_V2)
+ if (po->tp_version <= TPACKET_V2) {
__packet_set_status(po, h.raw, status);
- else
+ sk->sk_data_ready(sk);
+ } else {
prb_clear_blk_fill_status(&po->rx_ring);
-
- sk->sk_data_ready(sk);
+ }
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 1cb61242e55e..4439ac4c1b53 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -606,7 +606,7 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
struct kvec *head = buf->head;
struct kvec *tail = buf->tail;
int fraglen;
- int new, old;
+ int new;
if (len > buf->len) {
WARN_ON_ONCE(1);
@@ -629,8 +629,8 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
buf->len -= fraglen;
new = buf->page_base + buf->page_len;
- old = new + fraglen;
- xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT);
+
+ xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
if (buf->page_len) {
xdr->p = page_address(*xdr->page_ptr);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 22ba971741e5..29c8675f9a11 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -175,7 +175,7 @@ config CFG80211_INTERNAL_REGDB
Most distributions have a CRDA package. So if unsure, say N.
config CFG80211_WEXT
- bool
+ bool "cfg80211 wireless extensions compatibility"
depends on CFG80211
select WEXT_CORE
help
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 1bca180db8ad..627f8cbbedb8 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -42,19 +42,19 @@ __clean-files := $(extra-y) $(extra-m) $(extra-) \
__clean-files := $(filter-out $(no-clean-files), $(__clean-files))
-# as clean-files is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# clean-files is given relative to the current directory, unless it
+# starts with $(objtree)/ (which means "./", so do not add "./" unless
+# you want to delete a file from the toplevel object directory).
__clean-files := $(wildcard \
- $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \
- $(filter /%, $(__clean-files)))
+ $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \
+ $(filter $(objtree)/%, $(__clean-files)))
-# as clean-dirs is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# same as clean-files
__clean-dirs := $(wildcard \
- $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \
- $(filter /%, $(clean-dirs)))
+ $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \
+ $(filter $(objtree)/%, $(clean-dirs)))
# ==========================================================================
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 9609a7f0faea..c7952375ac53 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
- key_user_put(key->user);
-
/* now throw away the key memory */
if (key->type->destroy)
key->type->destroy(key);
+ key_user_put(key->user);
+
kfree(key->description);
#ifdef KEY_DEBUGGING
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index 255dabc6fc33..2a85e4209f0b 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
spin_lock_irq(&efw->lock);
t = (struct snd_efw_transaction *)data;
- length = min_t(size_t, t->length * sizeof(t->length), length);
+ length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
if (efw->push_ptr < efw->pull_ptr)
capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 8276a743e22e..0cfc9c8c4b4e 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1922,10 +1922,18 @@ int azx_mixer_create(struct azx *chip)
EXPORT_SYMBOL_GPL(azx_mixer_create);
+static bool is_input_stream(struct azx *chip, unsigned char index)
+{
+ return (index >= chip->capture_index_offset &&
+ index < chip->capture_index_offset + chip->capture_streams);
+}
+
/* initialize SD streams */
int azx_init_stream(struct azx *chip)
{
int i;
+ int in_stream_tag = 0;
+ int out_stream_tag = 0;
/* initialize each stream (aka device)
* assign the starting bdl address to each stream (device)
@@ -1938,9 +1946,21 @@ int azx_init_stream(struct azx *chip)
azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
azx_dev->sd_int_sta_mask = 1 << i;
- /* stream tag: must be non-zero and unique */
azx_dev->index = i;
- azx_dev->stream_tag = i + 1;
+
+ /* stream tag must be unique throughout
+ * the stream direction group,
+ * valid values 1...15
+ * use separate stream tag if the flag
+ * AZX_DCAPS_SEPARATE_STREAM_TAG is used
+ */
+ if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
+ azx_dev->stream_tag =
+ is_input_stream(chip, i) ?
+ ++in_stream_tag :
+ ++out_stream_tag;
+ else
+ azx_dev->stream_tag = i + 1;
}
return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2bf0b568e3de..d426a0bd6a5f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -299,6 +299,9 @@ enum {
AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\
AZX_DCAPS_SNOOP_TYPE(SCH))
+#define AZX_DCAPS_INTEL_SKYLAKE \
+ (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG)
+
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
(AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\
@@ -2027,7 +2030,7 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0a0c),
.driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index aa484fdf4338..166e3e84b963 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
+#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
enum {
AZX_SNOOP_TYPE_NONE ,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 5f13d2d18079..b422e406a9cb 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3353,6 +3353,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3413,6 +3414,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0060");
MODULE_ALIAS("snd-hda-codec-id:10de0067");
MODULE_ALIAS("snd-hda-codec-id:10de0070");
MODULE_ALIAS("snd-hda-codec-id:10de0071");
+MODULE_ALIAS("snd-hda-codec-id:10de0072");
MODULE_ALIAS("snd-hda-codec-id:10de8001");
MODULE_ALIAS("snd-hda-codec-id:11069f80");
MODULE_ALIAS("snd-hda-codec-id:11069f81");
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 4f6413e01c13..605d14003d25 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -568,9 +568,9 @@ static void stac_store_hints(struct hda_codec *codec)
spec->gpio_mask;
}
if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
- spec->gpio_mask &= spec->gpio_mask;
- if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
spec->gpio_dir &= spec->gpio_mask;
+ if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+ spec->gpio_data &= spec->gpio_mask;
if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
spec->eapd_mask &= spec->gpio_mask;
if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 81fe1464d268..c0fbe1881439 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -784,8 +784,8 @@ static unsigned int bst_tlv[] = {
static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = rt5677->dsp_vad_en;
@@ -795,8 +795,9 @@ static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0];
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index b93168d4f648..8d18bbda661b 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -209,16 +209,9 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
switch (config->chan_nr) {
case EIGHT_CHANNEL_SUPPORT:
- ch_reg = 3;
- break;
case SIX_CHANNEL_SUPPORT:
- ch_reg = 2;
- break;
case FOUR_CHANNEL_SUPPORT:
- ch_reg = 1;
- break;
case TWO_CHANNEL_SUPPORT:
- ch_reg = 0;
break;
default:
dev_err(dev->dev, "channel not supported\n");
@@ -227,18 +220,22 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
i2s_disable_channels(dev, substream->stream);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- i2s_write_reg(dev->i2s_base, TCR(ch_reg), xfer_resolution);
- i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
- irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
- i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
- i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
- } else {
- i2s_write_reg(dev->i2s_base, RCR(ch_reg), xfer_resolution);
- i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
- irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
- i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
- i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+ for (ch_reg = 0; ch_reg < (config->chan_nr / 2); ch_reg++) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ i2s_write_reg(dev->i2s_base, TCR(ch_reg),
+ xfer_resolution);
+ i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
+ irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+ i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
+ i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
+ } else {
+ i2s_write_reg(dev->i2s_base, RCR(ch_reg),
+ xfer_resolution);
+ i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
+ irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+ i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
+ i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+ }
}
i2s_write_reg(dev->i2s_base, CCR, ccr);
@@ -263,6 +260,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
snd_soc_dai_set_dma_data(dai, substream, NULL);
}
+static int dw_i2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ i2s_write_reg(dev->i2s_base, TXFFR, 1);
+ else
+ i2s_write_reg(dev->i2s_base, RXFFR, 1);
+
+ return 0;
+}
+
static int dw_i2s_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
@@ -294,6 +304,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
.startup = dw_i2s_startup,
.shutdown = dw_i2s_shutdown,
.hw_params = dw_i2s_hw_params,
+ .prepare = dw_i2s_prepare,
.trigger = dw_i2s_trigger,
};
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index e989ecf046c9..f86de1211b96 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -89,7 +89,7 @@ config SND_SOC_INTEL_BROADWELL_MACH
config SND_SOC_INTEL_BYTCR_RT5640_MACH
tristate "ASoC Audio DSP Support for MID BYT Platform"
- depends on X86
+ depends on X86 && I2C
select SND_SOC_RT5640
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
@@ -101,7 +101,7 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH
config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
- depends on X86_INTEL_LPSS
+ depends on X86_INTEL_LPSS && I2C
select SND_SOC_RT5670
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
diff --git a/sound/soc/intel/bytcr_dpcm_rt5640.c b/sound/soc/intel/bytcr_dpcm_rt5640.c
index f5d0fc1ab10c..eef0c56ec32e 100644
--- a/sound/soc/intel/bytcr_dpcm_rt5640.c
+++ b/sound/soc/intel/bytcr_dpcm_rt5640.c
@@ -227,4 +227,4 @@ module_platform_driver(snd_byt_mc_driver);
MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver");
MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:bytrt5640-audio");
+MODULE_ALIAS("platform:bytt100_rt5640");
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
index 4a5bde9c686b..ef2e8b5766a1 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/sst-firmware.c
@@ -763,8 +763,12 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
/* does block span more than 1 section */
if (ba->offset >= block->offset && ba->offset < block_end) {
+ /* add block */
+ list_move(&block->list, &dsp->used_block_list);
+ list_add(&block->module_list, block_list);
/* align ba to block boundary */
- ba->offset = block->offset;
+ ba->size -= block_end - ba->offset;
+ ba->offset = block_end;
err = block_alloc_contiguous(dsp, ba, block_list);
if (err < 0)
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
index 3abc29e8a928..2ac72eb5e75d 100644
--- a/sound/soc/intel/sst/sst_acpi.c
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -343,7 +343,7 @@ int sst_acpi_remove(struct platform_device *pdev)
}
static struct sst_machines sst_acpi_bytcr[] = {
- {"10EC5640", "T100", "bytt100_rt5640", NULL, "fw_sst_0f28.bin",
+ {"10EC5640", "T100", "bytt100_rt5640", NULL, "intel/fw_sst_0f28.bin",
&byt_rvp_platform_data },
{},
};
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 26ec5117b35c..13d8507333b8 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -454,11 +454,11 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
i2s->playback_dma_data.addr = res->start + I2S_TXDR;
i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- i2s->playback_dma_data.maxburst = 16;
+ i2s->playback_dma_data.maxburst = 4;
i2s->capture_dma_data.addr = res->start + I2S_RXDR;
i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- i2s->capture_dma_data.maxburst = 16;
+ i2s->capture_dma_data.maxburst = 4;
i2s->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, i2s);
diff --git a/sound/soc/rockchip/rockchip_i2s.h b/sound/soc/rockchip/rockchip_i2s.h
index 89a5d8bc6ee7..93f456f518a9 100644
--- a/sound/soc/rockchip/rockchip_i2s.h
+++ b/sound/soc/rockchip/rockchip_i2s.h
@@ -127,7 +127,7 @@
#define I2S_DMACR_TDE_DISABLE (0 << I2S_DMACR_TDE_SHIFT)
#define I2S_DMACR_TDE_ENABLE (1 << I2S_DMACR_TDE_SHIFT)
#define I2S_DMACR_TDL_SHIFT 0
-#define I2S_DMACR_TDL(x) ((x - 1) << I2S_DMACR_TDL_SHIFT)
+#define I2S_DMACR_TDL(x) ((x) << I2S_DMACR_TDL_SHIFT)
#define I2S_DMACR_TDL_MASK (0x1f << I2S_DMACR_TDL_SHIFT)
/*
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 985052b3fbed..2c62620abca6 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3230,7 +3230,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname)
{
struct device_node *np = card->dev->of_node;
- int num_routes, old_routes;
+ int num_routes;
struct snd_soc_dapm_route *routes;
int i, ret;
@@ -3248,9 +3248,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
return -EINVAL;
}
- old_routes = card->num_dapm_routes;
- routes = devm_kzalloc(card->dev,
- (old_routes + num_routes) * sizeof(*routes),
+ routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes),
GFP_KERNEL);
if (!routes) {
dev_err(card->dev,
@@ -3258,11 +3256,9 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
return -EINVAL;
}
- memcpy(routes, card->dapm_routes, old_routes * sizeof(*routes));
-
for (i = 0; i < num_routes; i++) {
ret = of_property_read_string_index(np, propname,
- 2 * i, &routes[old_routes + i].sink);
+ 2 * i, &routes[i].sink);
if (ret) {
dev_err(card->dev,
"ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3270,7 +3266,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
return -EINVAL;
}
ret = of_property_read_string_index(np, propname,
- (2 * i) + 1, &routes[old_routes + i].source);
+ (2 * i) + 1, &routes[i].source);
if (ret) {
dev_err(card->dev,
"ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3279,7 +3275,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
}
}
- card->num_dapm_routes += num_routes;
+ card->num_dapm_routes = num_routes;
card->dapm_routes = routes;
return 0;
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 272844746135..327f8642ca80 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -816,7 +816,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
return -EINVAL;
}
- if (cdev->n_streams < 2) {
+ if (cdev->n_streams < 1) {
dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams);
return -EINVAL;
}
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 6f803609e498..0b0112c80f22 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -317,7 +317,7 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
*
* TODO: Hook into free() and add that check there as well.
*/
- debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex));
+ debug_check_no_locks_freed(mutex, sizeof(*mutex));
__del_lock(__get_lock(mutex));
return ll_pthread_mutex_destroy(mutex);
}
@@ -341,7 +341,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{
try_init_preload();
- debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock));
+ debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
__del_lock(__get_lock(rwlock));
return ll_pthread_rwlock_destroy(rwlock);
}
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index e7417fe97a97..747f86103599 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -232,7 +232,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
if (nr_samples > 0) {
total_nr_samples += nr_samples;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos))
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 1ce425d101a9..1fd96c13f199 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -545,6 +545,42 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
return __hist_entry__cmp_compute(p_left, p_right, c);
}
+static int64_t
+hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
+ struct hist_entry *right __maybe_unused)
+{
+ return 0;
+}
+
+static int64_t
+hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
+{
+ if (sort_compute)
+ return 0;
+
+ if (left->stat.period == right->stat.period)
+ return 0;
+ return left->stat.period > right->stat.period ? 1 : -1;
+}
+
+static int64_t
+hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+}
+
+static int64_t
+hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+}
+
+static int64_t
+hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+}
+
static void insert_hist_entry_by_compute(struct rb_root *root,
struct hist_entry *he,
int c)
@@ -605,7 +641,7 @@ static void hists__process(struct hists *hists)
hists__precompute(hists);
hists__compute_resort(hists);
} else {
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
}
hists__fprintf(hists, true, 0, 0, 0, stdout);
@@ -1038,27 +1074,35 @@ static void data__hpp_register(struct data__file *d, int idx)
fmt->header = hpp__header;
fmt->width = hpp__width;
fmt->entry = hpp__entry_global;
+ fmt->cmp = hist_entry__cmp_nop;
+ fmt->collapse = hist_entry__cmp_nop;
/* TODO more colors */
switch (idx) {
case PERF_HPP_DIFF__BASELINE:
fmt->color = hpp__color_baseline;
+ fmt->sort = hist_entry__cmp_baseline;
break;
case PERF_HPP_DIFF__DELTA:
fmt->color = hpp__color_delta;
+ fmt->sort = hist_entry__cmp_delta;
break;
case PERF_HPP_DIFF__RATIO:
fmt->color = hpp__color_ratio;
+ fmt->sort = hist_entry__cmp_ratio;
break;
case PERF_HPP_DIFF__WEIGHTED_DIFF:
fmt->color = hpp__color_wdiff;
+ fmt->sort = hist_entry__cmp_wdiff;
break;
default:
+ fmt->sort = hist_entry__cmp_nop;
break;
}
init_header(d, dfmt);
perf_hpp__column_register(fmt);
+ perf_hpp__register_sort_field(fmt);
}
static void ui_init(void)
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 011195e38f21..198f3c3aff95 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -19,7 +19,9 @@
int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
{
int i;
- const struct option list_options[] = {
+ bool raw_dump = false;
+ struct option list_options[] = {
+ OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
OPT_END()
};
const char * const list_usage[] = {
@@ -27,11 +29,18 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
NULL
};
+ set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN);
+
argc = parse_options(argc, argv, list_options, list_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
setup_pager();
+ if (raw_dump) {
+ print_events(NULL, true);
+ return 0;
+ }
+
if (argc == 0) {
print_events(NULL, false);
return 0;
@@ -53,8 +62,6 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
print_hwcache_events(NULL, false);
else if (strcmp(argv[i], "pmu") == 0)
print_pmu_events(NULL, false);
- else if (strcmp(argv[i], "--raw-dump") == 0)
- print_events(NULL, true);
else {
char *sep = strchr(argv[i], ':'), *s;
int sep_idx;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 39367609c707..072ae8ad67fc 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -457,6 +457,19 @@ static void report__collapse_hists(struct report *rep)
ui_progress__finish();
}
+static void report__output_resort(struct report *rep)
+{
+ struct ui_progress prog;
+ struct perf_evsel *pos;
+
+ ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
+
+ evlist__for_each(rep->session->evlist, pos)
+ hists__output_resort(evsel__hists(pos), &prog);
+
+ ui_progress__finish();
+}
+
static int __cmd_report(struct report *rep)
{
int ret;
@@ -505,13 +518,20 @@ static int __cmd_report(struct report *rep)
if (session_done())
return 0;
+ /*
+ * recalculate number of entries after collapsing since it
+ * might be changed during the collapse phase.
+ */
+ rep->nr_entries = 0;
+ evlist__for_each(session->evlist, pos)
+ rep->nr_entries += evsel__hists(pos)->nr_entries;
+
if (rep->nr_entries == 0) {
ui__error("The %s file has no samples!\n", file->path);
return 0;
}
- evlist__for_each(session->evlist, pos)
- hists__output_resort(evsel__hists(pos));
+ report__output_resort(rep);
return report__browse_hists(rep);
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 0aa7747ff139..961cea183a83 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -285,7 +285,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
}
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
hists__output_recalc_col_len(hists, top->print_entries - printed);
putchar('\n');
@@ -554,7 +554,7 @@ static void perf_top__sort_new_samples(void *arg)
}
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
}
static void *display_thread_tui(void *arg)
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 614d5c4978ab..8d110dec393e 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -187,7 +187,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
* function since TEST_ASSERT_VAL() returns in case of failure.
*/
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("use callchain: %d, cumulate callchain: %d\n",
@@ -454,12 +454,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
* 30.00% 10.00% perf perf [.] cmd_record
* 20.00% 0.00% bash libc [.] malloc
* 10.00% 10.00% bash [kernel] [k] page_fault
- * 10.00% 10.00% perf [kernel] [k] schedule
- * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
+ * 10.00% 10.00% bash bash [.] xmalloc
* 10.00% 10.00% perf [kernel] [k] page_fault
- * 10.00% 10.00% perf libc [.] free
* 10.00% 10.00% perf libc [.] malloc
- * 10.00% 10.00% bash bash [.] xmalloc
+ * 10.00% 10.00% perf [kernel] [k] schedule
+ * 10.00% 10.00% perf libc [.] free
+ * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
*/
struct result expected[] = {
{ 7000, 2000, "perf", "perf", "main" },
@@ -468,12 +468,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
{ 3000, 1000, "perf", "perf", "cmd_record" },
{ 2000, 0, "bash", "libc", "malloc" },
{ 1000, 1000, "bash", "[kernel]", "page_fault" },
- { 1000, 1000, "perf", "[kernel]", "schedule" },
- { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
+ { 1000, 1000, "bash", "bash", "xmalloc" },
{ 1000, 1000, "perf", "[kernel]", "page_fault" },
+ { 1000, 1000, "perf", "[kernel]", "schedule" },
{ 1000, 1000, "perf", "libc", "free" },
{ 1000, 1000, "perf", "libc", "malloc" },
- { 1000, 1000, "bash", "bash", "xmalloc" },
+ { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
};
symbol_conf.use_callchain = false;
@@ -537,10 +537,13 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
* malloc
* main
*
- * 10.00% 10.00% perf [kernel] [k] schedule
+ * 10.00% 10.00% bash bash [.] xmalloc
* |
- * --- schedule
- * run_command
+ * --- xmalloc
+ * malloc
+ * xmalloc <--- NOTE: there's a cycle
+ * malloc
+ * xmalloc
* main
*
* 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
@@ -556,6 +559,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
* run_command
* main
*
+ * 10.00% 10.00% perf [kernel] [k] schedule
+ * |
+ * --- schedule
+ * run_command
+ * main
+ *
* 10.00% 10.00% perf libc [.] free
* |
* --- free
@@ -570,15 +579,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
* run_command
* main
*
- * 10.00% 10.00% bash bash [.] xmalloc
- * |
- * --- xmalloc
- * malloc
- * xmalloc <--- NOTE: there's a cycle
- * malloc
- * xmalloc
- * main
- *
*/
struct result expected[] = {
{ 7000, 2000, "perf", "perf", "main" },
@@ -587,12 +587,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ 3000, 1000, "perf", "perf", "cmd_record" },
{ 2000, 0, "bash", "libc", "malloc" },
{ 1000, 1000, "bash", "[kernel]", "page_fault" },
- { 1000, 1000, "perf", "[kernel]", "schedule" },
+ { 1000, 1000, "bash", "bash", "xmalloc" },
{ 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
{ 1000, 1000, "perf", "[kernel]", "page_fault" },
+ { 1000, 1000, "perf", "[kernel]", "schedule" },
{ 1000, 1000, "perf", "libc", "free" },
{ 1000, 1000, "perf", "libc", "malloc" },
- { 1000, 1000, "bash", "bash", "xmalloc" },
};
struct callchain_result expected_callchain[] = {
{
@@ -622,9 +622,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ "bash", "main" }, },
},
{
- 3, { { "[kernel]", "schedule" },
- { "perf", "run_command" },
- { "perf", "main" }, },
+ 6, { { "bash", "xmalloc" },
+ { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "bash", "main" }, },
},
{
3, { { "[kernel]", "sys_perf_event_open" },
@@ -638,6 +641,11 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ "perf", "main" }, },
},
{
+ 3, { { "[kernel]", "schedule" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
4, { { "libc", "free" },
{ "perf", "cmd_record" },
{ "perf", "run_command" },
@@ -649,14 +657,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ "perf", "run_command" },
{ "perf", "main" }, },
},
- {
- 6, { { "bash", "xmalloc" },
- { "libc", "malloc" },
- { "bash", "xmalloc" },
- { "libc", "malloc" },
- { "bash", "xmalloc" },
- { "bash", "main" }, },
- },
};
symbol_conf.use_callchain = true;
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 74f257a81265..59e53db7914c 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -138,7 +138,7 @@ int test__hists_filter(void)
struct hists *hists = evsel__hists(evsel);
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("Normal histogram\n");
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index a748f2be1222..f5547610da02 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -152,7 +152,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -252,7 +252,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -306,7 +306,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -384,7 +384,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -487,7 +487,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index e6bb04b5b09b..788506eef567 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -550,7 +550,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
bool need_percent;
node = rb_first(root);
- need_percent = !!rb_next(node);
+ need_percent = node && rb_next(node);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index dc0d095f318c..482adae3cc44 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -204,6 +204,9 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
if (ret)
return ret;
+ if (a->thread != b->thread || !symbol_conf.use_callchain)
+ return 0;
+
ret = b->callchain->max_depth - a->callchain->max_depth;
}
return ret;
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 2f612562978c..3c38f25b1695 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -1,5 +1,8 @@
#include <signal.h>
#include <stdbool.h>
+#ifdef HAVE_BACKTRACE_SUPPORT
+#include <execinfo.h>
+#endif
#include "../../util/cache.h"
#include "../../util/debug.h"
@@ -88,6 +91,25 @@ int ui__getch(int delay_secs)
return SLkp_getkey();
}
+#ifdef HAVE_BACKTRACE_SUPPORT
+static void ui__signal_backtrace(int sig)
+{
+ void *stackdump[32];
+ size_t size;
+
+ ui__exit(false);
+ psignal(sig, "perf");
+
+ printf("-------- backtrace --------\n");
+ size = backtrace(stackdump, ARRAY_SIZE(stackdump));
+ backtrace_symbols_fd(stackdump, size, STDOUT_FILENO);
+
+ exit(0);
+}
+#else
+# define ui__signal_backtrace ui__signal
+#endif
+
static void ui__signal(int sig)
{
ui__exit(false);
@@ -122,8 +144,8 @@ int ui__init(void)
ui_browser__init();
tui_progress__init();
- signal(SIGSEGV, ui__signal);
- signal(SIGFPE, ui__signal);
+ signal(SIGSEGV, ui__signal_backtrace);
+ signal(SIGFPE, ui__signal_backtrace);
signal(SIGINT, ui__signal);
signal(SIGQUIT, ui__signal);
signal(SIGTERM, ui__signal);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 64b377e591e4..14e7a123d43b 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -841,3 +841,33 @@ char *callchain_list__sym_name(struct callchain_list *cl,
return bf;
}
+
+static void free_callchain_node(struct callchain_node *node)
+{
+ struct callchain_list *list, *tmp;
+ struct callchain_node *child;
+ struct rb_node *n;
+
+ list_for_each_entry_safe(list, tmp, &node->val, list) {
+ list_del(&list->list);
+ free(list);
+ }
+
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = container_of(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+ rb_erase(&child->rb_node_in, &node->rb_root_in);
+
+ free_callchain_node(child);
+ free(child);
+ }
+}
+
+void free_callchain(struct callchain_root *root)
+{
+ if (!symbol_conf.use_callchain)
+ return;
+
+ free_callchain_node(&root->node);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index dbc08cf5f970..c0ec1acc38e4 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -198,4 +198,6 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
char *callchain_list__sym_name(struct callchain_list *cl,
char *bf, size_t bfsize, bool show_dso);
+void free_callchain(struct callchain_root *root);
+
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 6e88b9e395df..182395546ddc 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -6,6 +6,7 @@
#include "evlist.h"
#include "evsel.h"
#include "annotate.h"
+#include "ui/progress.h"
#include <math.h>
static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -303,7 +304,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
size_t callchain_size = 0;
struct hist_entry *he;
- if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
+ if (symbol_conf.use_callchain)
callchain_size = sizeof(struct callchain_root);
he = zalloc(sizeof(*he) + callchain_size);
@@ -736,7 +737,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
iter->he = he;
he_cache[iter->curr++] = he;
- callchain_append(he->callchain, &callchain_cursor, sample->period);
+ hist_entry__append_callchain(he, sample);
/*
* We need to re-initialize the cursor since callchain_append()
@@ -809,7 +810,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
iter->he = he;
he_cache[iter->curr++] = he;
- callchain_append(he->callchain, &cursor, sample->period);
+ if (symbol_conf.use_callchain)
+ callchain_append(he->callchain, &cursor, sample->period);
return 0;
}
@@ -945,6 +947,7 @@ void hist_entry__free(struct hist_entry *he)
zfree(&he->mem_info);
zfree(&he->stat_acc);
free_srcline(he->srcline);
+ free_callchain(he->callchain);
free(he);
}
@@ -987,6 +990,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
else
p = &(*p)->rb_right;
}
+ hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
@@ -1024,7 +1028,10 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
if (!sort__need_collapse)
return;
+ hists->nr_entries = 0;
+
root = hists__get_rotate_entries_in(hists);
+
next = rb_first(root);
while (next) {
@@ -1119,7 +1126,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
rb_insert_color(&he->rb_node, entries);
}
-void hists__output_resort(struct hists *hists)
+void hists__output_resort(struct hists *hists, struct ui_progress *prog)
{
struct rb_root *root;
struct rb_node *next;
@@ -1148,6 +1155,9 @@ void hists__output_resort(struct hists *hists)
if (!n->filtered)
hists__calc_col_len(hists, n);
+
+ if (prog)
+ ui_progress__update(prog, 1);
}
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index d0ef9a19a744..46bd50344f85 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -121,7 +121,7 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
struct hists *hists);
void hist_entry__free(struct hist_entry *);
-void hists__output_resort(struct hists *hists);
+void hists__output_resort(struct hists *hists, struct ui_progress *prog);
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 28eb1417cb2a..7f9b8632e433 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -495,9 +495,11 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
}
if (ntevs == 0) { /* No error but failed to find probe point. */
- pr_warning("Probe point '%s' not found.\n",
+ pr_warning("Probe point '%s' not found in debuginfo.\n",
synthesize_perf_probe_point(&pev->point));
- return -ENOENT;
+ if (need_dwarf)
+ return -ENOENT;
+ return 0;
}
/* Error path : ntevs < 0 */
pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c7918f83b300..b5247d777f0e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -989,8 +989,24 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
int ret = 0;
#if _ELFUTILS_PREREQ(0, 142)
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+
/* Get the call frame information from this dwarf */
- pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg));
+ elf = dwarf_getelf(dbg->dbg);
+ if (elf == NULL)
+ return -EINVAL;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ return -EINVAL;
+
+ if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
+ shdr.sh_type == SHT_PROGBITS) {
+ pf->cfi = dwarf_getcfi_elf(elf);
+ } else {
+ pf->cfi = dwarf_getcfi(dbg->dbg);
+ }
#endif
off = 0;
diff --git a/tools/power/cpupower/utils/cpupower.c b/tools/power/cpupower/utils/cpupower.c
index 7cdcf88659c7..9ea914378985 100644
--- a/tools/power/cpupower/utils/cpupower.c
+++ b/tools/power/cpupower/utils/cpupower.c
@@ -199,7 +199,7 @@ int main(int argc, const char *argv[])
}
get_cpu_info(0, &cpupower_cpu_info);
- run_as_root = !getuid();
+ run_as_root = !geteuid();
if (run_as_root) {
ret = uname(&uts);
if (!ret && !strcmp(uts.machine, "x86_64") &&
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c
index 09afe5d87f2b..4e8fe2c7b054 100644
--- a/tools/power/cpupower/utils/helpers/sysfs.c
+++ b/tools/power/cpupower/utils/helpers/sysfs.c
@@ -361,7 +361,7 @@ unsigned int sysfs_get_idlestate_count(unsigned int cpu)
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
- return -ENODEV;
+ return 0;
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index 33a5c06d95ca..d273624c93a6 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -179,11 +179,11 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
*/
fd = open(longpath, O_RDONLY);
if (fd > 0) {
- printf("Invoke copy of '%s' via filename of length %lu:\n",
+ printf("Invoke copy of '%s' via filename of length %zu:\n",
src, strlen(longpath));
fail += check_execveat(fd, "", AT_EMPTY_PATH);
} else {
- printf("Failed to open length %lu filename, errno=%d (%s)\n",
+ printf("Failed to open length %zu filename, errno=%d (%s)\n",
strlen(longpath), errno, strerror(errno));
fail++;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f5283438ee05..1cc6e2e19982 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -671,6 +671,7 @@ static void update_memslots(struct kvm_memslots *slots,
WARN_ON(mslots[i].id != id);
if (!new->npages) {
+ WARN_ON(!mslots[i].npages);
new->base_gfn = 0;
if (mslots[i].npages)
slots->used_slots--;
@@ -687,12 +688,25 @@ static void update_memslots(struct kvm_memslots *slots,
slots->id_to_index[mslots[i].id] = i;
i++;
}
- while (i > 0 &&
- new->base_gfn > mslots[i - 1].base_gfn) {
- mslots[i] = mslots[i - 1];
- slots->id_to_index[mslots[i].id] = i;
- i--;
- }
+
+ /*
+ * The ">=" is needed when creating a slot with base_gfn == 0,
+ * so that it moves before all those with base_gfn == npages == 0.
+ *
+ * On the other hand, if new->npages is zero, the above loop has
+ * already left i pointing to the beginning of the empty part of
+ * mslots, and the ">=" would move the hole backwards in this
+ * case---which is wrong. So skip the loop when deleting a slot.
+ */
+ if (new->npages) {
+ while (i > 0 &&
+ new->base_gfn >= mslots[i - 1].base_gfn) {
+ mslots[i] = mslots[i - 1];
+ slots->id_to_index[mslots[i].id] = i;
+ i--;
+ }
+ } else
+ WARN_ON_ONCE(i != slots->used_slots);
mslots[i] = *new;
slots->id_to_index[mslots[i].id] = i;