summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-03-23 12:05:13 +1000
committerDave Airlie <airlied@redhat.com>2017-03-23 12:05:13 +1000
commit65d1086c44791112188f6aebbdc3a27cab3736d3 (patch)
treef769c133e61c54e34e91aa9ecf1d3504a6eb4fb4
parentedd849e5448c4f6ddc04a5fa1ac5479176660c27 (diff)
parent97da3854c526d3a6ee05c849c96e48d21527606c (diff)
BackMerge tag 'v4.11-rc3' into drm-next
Linux 4.11-rc3 as requested by Daniel
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt9
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/cgroup-v2.txt11
-rw-r--r--Documentation/dev-tools/kcov.rst2
-rw-r--r--Documentation/devicetree/bindings/powerpc/4xx/emac.txt62
-rw-r--r--Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb251xb.txt53
-rw-r--r--Documentation/networking/ip-sysctl.txt3
-rw-r--r--Documentation/trace/kprobetrace.txt2
-rw-r--r--Documentation/trace/uprobetracer.txt2
-rw-r--r--Documentation/virtual/kvm/api.txt4
-rw-r--r--Documentation/vm/userfaultfd.txt4
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/hugepage.h1
-rw-r--r--arch/arc/include/asm/pgtable.h1
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h1
-rw-r--r--arch/arm/include/asm/pgtable.h1
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/handle_exit.c19
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm/xen/mm.c2
-rw-r--r--arch/arm64/Kconfig14
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/pgtable-types.h4
-rw-r--r--arch/arm64/kernel/cpuidle.c2
-rw-r--r--arch/arm64/kernel/probes/kprobes.c6
-rw-r--r--arch/arm64/kvm/handle_exit.c19
-rw-r--r--arch/arm64/kvm/hyp/tlb.c64
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/avr32/include/asm/pgtable-2level.h1
-rw-r--r--arch/avr32/oprofile/backtrace.c2
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c2
-rw-r--r--arch/cris/include/asm/pgtable.h1
-rw-r--r--arch/frv/include/asm/pgtable.h1
-rw-r--r--arch/h8300/include/asm/pgtable.h1
-rw-r--r--arch/h8300/kernel/ptrace_h.c2
-rw-r--r--arch/hexagon/include/asm/pgtable.h1
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/metag/include/asm/pgtable.h1
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/mips/cavium-octeon/cpu.c2
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-crypto.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c1
-rw-r--r--arch/mips/include/asm/fpu.h1
-rw-r--r--arch/mips/include/asm/pgtable-32.h1
-rw-r--r--arch/mips/include/asm/pgtable-64.h1
-rw-r--r--arch/mips/kernel/smp-bmips.c1
-rw-r--r--arch/mips/kernel/smp-mt.c1
-rw-r--r--arch/mips/loongson64/loongson-3/cop2-ex.c1
-rw-r--r--arch/mips/netlogic/common/smp.c1
-rw-r--r--arch/mips/netlogic/xlp/cop2-ex.c3
-rw-r--r--arch/mips/sgi-ip22/ip28-berr.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c3
-rw-r--r--arch/mips/sgi-ip32/ip32-berr.c1
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c1
-rw-r--r--arch/mn10300/include/asm/page.h1
-rw-r--r--arch/nios2/include/asm/pgtable.h1
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h8
-rw-r--r--arch/openrisc/include/asm/pgtable.h1
-rw-r--r--arch/openrisc/include/asm/uaccess.h2
-rw-r--r--arch/openrisc/kernel/or32_ksyms.c4
-rw-r--r--arch/openrisc/kernel/process.c1
-rw-r--r--arch/parisc/include/asm/cacheflush.h23
-rw-r--r--arch/parisc/include/asm/uaccess.h3
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/cache.c22
-rw-r--r--arch/parisc/kernel/module.c8
-rw-r--r--arch/parisc/kernel/perf.c94
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/Kconfig138
-rw-r--r--arch/powerpc/Makefile11
-rw-r--r--arch/powerpc/boot/zImage.lds.S1
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c2
-rw-r--r--arch/powerpc/include/asm/bitops.h4
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h88
-rw-r--r--arch/powerpc/include/asm/checksum.h2
-rw-r--r--arch/powerpc/include/asm/cpuidle.h4
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/mce.h108
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-4k.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-64k.h1
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h7
-rw-r--r--arch/powerpc/include/asm/prom.h18
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/cputable.c3
-rw-r--r--arch/powerpc/kernel/idle_book3s.S10
-rw-r--r--arch/powerpc/kernel/mce.c88
-rw-r--r--arch/powerpc/kernel/mce_power.c237
-rw-r--r--arch/powerpc/kernel/prom_init.c120
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/lib/Makefile1
-rw-r--r--arch/powerpc/lib/sstep.c20
-rw-r--r--arch/powerpc/lib/test_emulate_step.c434
-rw-r--r--arch/powerpc/mm/init_64.c36
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/isa207-common.c43
-rw-r--r--arch/powerpc/perf/isa207-common.h1
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c21
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c20
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
-rw-r--r--arch/powerpc/purgatory/trampoline.S12
-rw-r--r--arch/powerpc/sysdev/axonram.c5
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c10
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c17
-rw-r--r--arch/s390/configs/default_defconfig2
-rw-r--r--arch/s390/configs/gcov_defconfig2
-rw-r--r--arch/s390/configs/performance_defconfig2
-rw-r--r--arch/s390/crypto/paes_s390.c5
-rw-r--r--arch/s390/defconfig2
-rw-r--r--arch/s390/include/asm/cputime.h20
-rw-r--r--arch/s390/include/asm/pgtable.h1
-rw-r--r--arch/s390/include/asm/timex.h12
-rw-r--r--arch/s390/include/uapi/asm/unistd.h4
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/entry.S10
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/process.c3
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/mm/pgtable.c19
-rw-r--r--arch/score/include/asm/pgtable.h1
-rw-r--r--arch/score/kernel/traps.c1
-rw-r--r--arch/score/mm/extable.c2
-rw-r--r--arch/sh/boards/mach-cayman/setup.c2
-rw-r--r--arch/sh/include/asm/pgtable-2level.h1
-rw-r--r--arch/sh/include/asm/pgtable-3level.h1
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/tile/include/asm/pgtable_32.h1
-rw-r--r--arch/tile/include/asm/pgtable_64.h1
-rw-r--r--arch/um/include/asm/pgtable-2level.h1
-rw-r--r--arch/um/include/asm/pgtable-3level.h1
-rw-r--r--arch/unicore32/include/asm/pgtable.h1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/events/core.c16
-rw-r--r--arch/x86/events/intel/cstate.c2
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/events/intel/uncore.h6
-rw-r--r--arch/x86/hyperv/hv_init.c2
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/pgtable-3level.h3
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/include/asm/pkeys.h15
-rw-r--r--arch/x86/include/asm/purgatory.h20
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/apic/apic.c49
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/centaur.c2
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c3
-rw-r--r--arch/x86/kernel/cpu/transmeta.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c1
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/kdebugfs.c2
-rw-r--r--arch/x86/kernel/kprobes/common.h2
-rw-r--r--arch/x86/kernel/kprobes/core.c6
-rw-r--r--arch/x86/kernel/kprobes/opt.c2
-rw-r--r--arch/x86/kernel/machine_kexec_64.c9
-rw-r--r--arch/x86/kernel/nmi.c6
-rw-r--r--arch/x86/kernel/reboot.c16
-rw-r--r--arch/x86/kernel/tsc.c37
-rw-r--r--arch/x86/kernel/unwind_frame.c36
-rw-r--r--arch/x86/kvm/vmx.c30
-rw-r--r--arch/x86/mm/gup.c37
-rw-r--r--arch/x86/mm/kasan_init_64.c1
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/pci/common.c9
-rw-r--r--arch/x86/pci/xen.c23
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile1
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c82
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c2
-rw-r--r--arch/x86/platform/intel-mid/mfld.c15
-rw-r--r--arch/x86/platform/uv/tlb_uv.c1
-rw-r--r--arch/x86/purgatory/purgatory.c36
-rw-r--r--arch/x86/purgatory/setup-x86_64.S1
-rw-r--r--arch/x86/purgatory/sha256.h1
-rw-r--r--arch/xtensa/include/asm/pgtable.h1
-rw-r--r--block/bio.c12
-rw-r--r--block/blk-core.c41
-rw-r--r--block/blk-mq-sysfs.c40
-rw-r--r--block/blk-mq-tag.c3
-rw-r--r--block/blk-mq.c37
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/genhd.c37
-rw-r--r--block/sed-opal.c10
-rw-r--r--crypto/af_alg.c9
-rw-r--r--crypto/algif_hash.c9
-rw-r--r--drivers/acpi/acpi_processor.c57
-rw-r--r--drivers/acpi/bus.c1
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/ioapic.c22
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/processor_core.c133
-rw-r--r--drivers/ata/ahci_qoriq.c6
-rw-r--r--drivers/ata/libata-sff.c1
-rw-r--r--drivers/ata/libata-transport.c9
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pf.c2
-rw-r--r--drivers/block/paride/pg.c2
-rw-r--r--drivers/block/paride/pt.c2
-rw-r--r--drivers/block/rbd.c16
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c16
-rw-r--r--drivers/char/nwbutton.c2
-rw-r--r--drivers/char/random.c129
-rw-r--r--drivers/clocksource/tcb_clksrc.c16
-rw-r--r--drivers/cpufreq/cpufreq.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c131
-rw-r--r--drivers/crypto/s5p-sss.c132
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c2
-rw-r--r--drivers/dax/dax.c33
-rw-r--r--drivers/firmware/efi/arm-runtime.c1
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c4
-rw-r--r--drivers/gpu/drm/amd/acp/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c18
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h1
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c57
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c139
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h20
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c40
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h12
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c439
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c66
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c52
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c72
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c49
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_regs.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c37
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c27
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c28
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c3
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c9
-rw-r--r--drivers/i2c/busses/i2c-riic.c6
-rw-r--r--drivers/i2c/i2c-mux.c2
-rw-r--r--drivers/irqchip/irq-crossbar.c9
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c16
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c3
-rw-r--r--drivers/isdn/hisax/st5481_b.c2
-rw-r--r--drivers/macintosh/macio_asic.c1
-rw-r--r--drivers/md/bcache/util.h1
-rw-r--r--drivers/md/dm.c29
-rw-r--r--drivers/md/md-cluster.c2
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/md.h6
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c44
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h8
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c33
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/rc/nuvoton-cir.c5
-rw-r--r--drivers/media/rc/rc-main.c26
-rw-r--r--drivers/media/rc/serial_ir.c123
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c244
-rw-r--r--drivers/misc/sgi-gru/grufault.c9
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c206
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c110
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c104
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c17
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h42
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h43
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c184
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c64
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c25
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c43
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c47
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c8
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c3
-rw-r--r--drivers/net/team/team.c1
-rw-r--r--drivers/net/tun.c19
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c73
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c4
-rw-r--r--drivers/net/wimax/i2400m/usb.c3
-rw-r--r--drivers/net/xen-netback/interface.c26
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c20
-rw-r--r--drivers/pci/dwc/pci-exynos.c8
-rw-r--r--drivers/pci/pcie/aspm.c5
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c15
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c12
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c49
-rw-r--r--drivers/platform/x86/asus-wmi.c22
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c451
-rw-r--r--drivers/scsi/Kconfig19
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c2
-rw-r--r--drivers/scsi/libiscsi.c26
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c128
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c107
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c19
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h13
-rw-r--r--drivers/scsi/qedf/qedf_fip.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c4
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c16
-rw-r--r--drivers/scsi/qedi/qedi_fw.c4
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h8
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/Kconfig1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c107
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c304
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c748
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c49
-rw-r--r--drivers/scsi/scsi_lib.c14
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/sd.c58
-rw-r--r--drivers/scsi/storvsc_drv.c27
-rw-r--r--drivers/scsi/ufs/ufs.h22
-rw-r--r--drivers/scsi/ufs/ufshcd.c231
-rw-r--r--drivers/scsi/ufs/ufshcd.h15
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c1
-rw-r--r--drivers/staging/vc04_services/Kconfig1
-rw-r--r--drivers/target/target_core_alua.c82
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_pscsi.c50
-rw-r--r--drivers/target/target_core_sbc.c10
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c3
-rw-r--r--drivers/target/target_core_user.c152
-rw-r--r--drivers/tty/n_hdlc.c132
-rw-r--r--drivers/tty/serial/samsung.c6
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c3
-rw-r--r--drivers/usb/dwc3/gadget.c76
-rw-r--r--drivers/usb/dwc3/gadget.h14
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c17
-rw-r--r--drivers/usb/gadget/function/f_uvc.c7
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.c25
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c5
-rw-r--r--drivers/usb/host/ohci-at91.c4
-rw-r--r--drivers/usb/host/xhci-dbg.c2
-rw-r--r--drivers/usb/host/xhci-mtk.c7
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c1
-rw-r--r--drivers/usb/host/xhci.c4
-rw-r--r--drivers/usb/misc/iowarrior.c21
-rw-r--r--drivers/usb/misc/usb251xb.c59
-rw-r--r--drivers/usb/phy/phy-isp1301.c7
-rw-r--r--drivers/usb/serial/digi_acceleport.c2
-rw-r--r--drivers/usb/serial/io_ti.c8
-rw-r--r--drivers/usb/serial/omninet.c13
-rw-r--r--drivers/usb/serial/safe_serial.c5
-rw-r--r--drivers/usb/storage/unusual_devs.h14
-rw-r--r--drivers/xen/gntdev.c11
-rw-r--r--drivers/xen/swiotlb-xen.c47
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c1
-rw-r--r--fs/afs/callback.c7
-rw-r--r--fs/afs/cmservice.c11
-rw-r--r--fs/afs/file.c20
-rw-r--r--fs/afs/fsclient.c77
-rw-r--r--fs/afs/inode.c42
-rw-r--r--fs/afs/internal.h23
-rw-r--r--fs/afs/misc.c2
-rw-r--r--fs/afs/mntpt.c53
-rw-r--r--fs/afs/rxrpc.c149
-rw-r--r--fs/afs/security.c9
-rw-r--r--fs/afs/server.c6
-rw-r--r--fs/afs/vlocation.c16
-rw-r--r--fs/afs/write.c76
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/fat/inode.c13
-rw-r--r--fs/fs-writeback.c35
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/iomap.c17
-rw-r--r--fs/nfs/callback.c4
-rw-r--r--fs/nfs/client.c25
-rw-r--r--fs/nfs/filelayout/filelayoutdev.c8
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h14
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c5
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs4client.c4
-rw-r--r--fs/nfs/nfs4proc.c11
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/pnfs.h2
-rw-r--r--fs/nfs/pnfs_nfs.c31
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/overlayfs/util.c1
-rw-r--r--fs/timerfd.c8
-rw-r--r--fs/userfaultfd.c75
-rw-r--r--fs/xfs/kmem.c18
-rw-r--r--fs/xfs/kmem.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c34
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c87
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c26
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h2
-rw-r--r--fs/xfs/xfs_aops.c59
-rw-r--r--fs/xfs/xfs_dir2_readdir.c11
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--fs/xfs/xfs_inode.c14
-rw-r--r--fs/xfs/xfs_iomap.c25
-rw-r--r--fs/xfs/xfs_itable.c6
-rw-r--r--fs/xfs/xfs_mount.c3
-rw-r--r--fs/xfs/xfs_reflink.c23
-rw-r--r--fs/xfs/xfs_reflink.h4
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--include/asm-generic/4level-fixup.h3
-rw-r--r--include/asm-generic/5level-fixup.h41
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h62
-rw-r--r--include/asm-generic/pgtable-nop4d.h56
-rw-r--r--include/asm-generic/pgtable-nopud.h48
-rw-r--r--include/asm-generic/pgtable.h48
-rw-r--r--include/asm-generic/tlb.h14
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/dt-bindings/sound/cs42l42.h2
-rw-r--r--include/linux/acpi.h5
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/dccp.h1
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/filter.h16
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genhd.h8
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/irqdomain.h4
-rw-r--r--include/linux/jump_label.h11
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/list_nulls.h5
-rw-r--r--include/linux/mm.h34
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/phy.h4
-rw-r--r--include/linux/purgatory.h23
-rw-r--r--include/linux/random.h18
-rw-r--r--include/linux/rculist_nulls.h14
-rw-r--r--include/linux/regulator/machine.h2
-rw-r--r--include/linux/user_namespace.h2
-rw-r--r--include/linux/userfaultfd_k.h13
-rw-r--r--include/linux/vm_event_item.h3
-rw-r--r--include/linux/wait.h31
-rw-r--r--include/media/vsp1.h13
-rw-r--r--include/net/inet_common.h3
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/irda/timer.h2
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sock.h9
-rw-r--r--include/scsi/libiscsi.h1
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/target/target_core_backend.h7
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/trace/events/syscalls.h1
-rw-r--r--include/uapi/drm/omap_drm.h38
-rw-r--r--include/uapi/linux/packet_diag.h2
-rw-r--r--include/uapi/linux/userfaultfd.h5
-rw-r--r--include/xen/swiotlb-xen.h11
-rw-r--r--init/main.c1
-rw-r--r--kernel/bpf/hashtab.c119
-rw-r--r--kernel/bpf/lpm_trie.c6
-rw-r--r--kernel/cgroup/cgroup-v1.c2
-rw-r--r--kernel/cgroup/cgroup.c2
-rw-r--r--kernel/cgroup/pids.c2
-rw-r--r--kernel/cpu.c28
-rw-r--r--kernel/events/core.c66
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/futex.c22
-rw-r--r--kernel/kexec_file.c8
-rw-r--r--kernel/kexec_internal.h6
-rw-r--r--kernel/locking/lockdep.c11
-rw-r--r--kernel/locking/rwsem-spinlock.c16
-rw-r--r--kernel/locking/test-ww_mutex.c6
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/cpufreq_schedutil.c19
-rw-r--r--kernel/sched/deadline.c63
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/features.h5
-rw-r--r--kernel/sched/loadavg.c20
-rw-r--r--kernel/sched/wait.c39
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/trace/Kconfig6
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/ftrace.c23
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_probe.h4
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/ucount.c18
-rw-r--r--kernel/workqueue.c1
-rw-r--r--lib/ioremap.c39
-rw-r--r--lib/radix-tree.c4
-rw-r--r--lib/refcount.c14
-rw-r--r--mm/backing-dev.c43
-rw-r--r--mm/gup.c46
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/hugetlb.c29
-rw-r--r--mm/kasan/kasan_init.c44
-rw-r--r--mm/kasan/quarantine.c51
-rw-r--r--mm/madvise.c44
-rw-r--r--mm/memblock.c5
-rw-r--r--mm/memcontrol.c18
-rw-r--r--mm/memory.c230
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/mlock.c10
-rw-r--r--mm/mprotect.c26
-rw-r--r--mm/mremap.c13
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/page_vma_mapped.c6
-rw-r--r--mm/pagewalk.c32
-rw-r--r--mm/percpu-vm.c7
-rw-r--r--mm/percpu.c5
-rw-r--r--mm/pgtable-generic.c6
-rw-r--r--mm/rmap.c20
-rw-r--r--mm/sparse-vmemmap.c22
-rw-r--r--mm/swap_slots.c2
-rw-r--r--mm/swapfile.c26
-rw-r--r--mm/userfaultfd.c23
-rw-r--r--mm/vmalloc.c84
-rw-r--r--mm/vmstat.c3
-rw-r--r--mm/z3fold.c1
-rw-r--r--net/atm/svc.c5
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_input.c1
-rw-r--r--net/bridge/br_netfilter_hooks.c21
-rw-r--r--net/ceph/ceph_common.c15
-rw-r--r--net/ceph/osd_client.c36
-rw-r--r--net/ceph/osdmap.c4
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/net-sysfs.c6
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/core/sock.c106
-rw-r--r--net/dccp/ccids/ccid2.c1
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/minisocks.c24
-rw-r--r--net/decnet/af_decnet.c5
-rw-r--r--net/ipv4/af_inet.c9
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv6/af_inet6.c10
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c9
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/irda/af_irda.c5
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/netrom/af_netrom.c3
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/phonet/pep.c6
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/ib_cm.c47
-rw-r--r--net/rds/rds.h6
-rw-r--r--net/rds/tcp.c38
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_listen.c11
-rw-r--r--net/rose/af_rose.c3
-rw-r--r--net/rxrpc/input.c27
-rw-r--r--net/rxrpc/recvmsg.c4
-rw-r--r--net/rxrpc/sendmsg.c49
-rw-r--r--net/sched/act_connmark.c3
-rw-r--r--net/sched/act_skbmod.c1
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/protocol.c5
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/smc/af_smc.c2
-rw-r--r--net/socket.c5
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/tipc/socket.c8
-rw-r--r--net/unix/af_unix.c5
-rw-r--r--net/vmw_vsock/af_vsock.c3
-rw-r--r--net/x25/af_x25.c3
-rw-r--r--net/xfrm/xfrm_policy.c19
-rw-r--r--scripts/gcc-plugins/sancov_plugin.c2
-rw-r--r--scripts/module-common.lds2
-rw-r--r--scripts/spelling.txt3
-rw-r--r--sound/soc/amd/acp-pcm-dma.c2
-rw-r--r--tools/include/uapi/linux/bpf_perf_event.h18
-rw-r--r--tools/lguest/lguest.c2
-rw-r--r--tools/lib/bpf/Makefile2
-rw-r--r--tools/lib/traceevent/Makefile2
-rw-r--r--tools/lib/traceevent/event-parse.h2
-rw-r--r--tools/objtool/builtin-check.c15
-rw-r--r--tools/objtool/elf.c12
-rw-r--r--tools/objtool/elf.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c2
-rw-r--r--tools/perf/util/symbol.c2
-rwxr-xr-xtools/testing/ktest/ktest.pl21
-rw-r--r--tools/testing/radix-tree/Makefile15
-rw-r--r--tools/testing/radix-tree/benchmark.c173
-rw-r--r--tools/testing/radix-tree/idr-test.c78
-rw-r--r--tools/testing/radix-tree/main.c1
-rw-r--r--tools/testing/radix-tree/tag_check.c29
-rw-r--r--tools/testing/radix-tree/test.h1
-rw-r--r--tools/testing/selftests/bpf/Makefile4
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c4
-rw-r--r--tools/testing/selftests/powerpc/harness.c6
-rw-r--r--tools/testing/selftests/powerpc/include/vsx_asm.h48
-rw-r--r--tools/testing/selftests/vm/Makefile4
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c2
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c16
-rw-r--r--tools/testing/selftests/x86/ptrace_syscall.c3
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c109
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c32
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c5
726 files changed, 10240 insertions, 4769 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 986e44387dad..2ba45caabada 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -653,6 +653,9 @@
cpuidle.off=1 [CPU_IDLE]
disable the cpuidle sub-system
+ cpufreq.off=1 [CPU_FREQ]
+ disable the cpufreq sub-system
+
cpu_init_udelay=N
[X86] Delay for N microsec between assert and de-assert
of APIC INIT to start processors. This delay occurs
@@ -1183,6 +1186,12 @@
functions that can be changed at run time by the
set_graph_notrace file in the debugfs tracing directory.
+ ftrace_graph_max_depth=<uint>
+ [FTRACE] Used with the function graph tracer. This is
+ the max depth it will trace into a function. This value
+ can be changed at run time by the max_graph_depth file
+ in the tracefs tracing directory. default: 0 (no limit)
+
gamecon.map[2|3]=
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
support via parallel port (up to 5 devices per port)
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index a71b8095dbd8..2f66683500b8 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -68,3 +68,4 @@ stable kernels.
| | | | |
| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
+| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index 3b8449f8ac7e..49d7c997fa1e 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -1142,16 +1142,17 @@ used by the kernel.
pids.max
- A read-write single value file which exists on non-root cgroups. The
- default is "max".
+ A read-write single value file which exists on non-root
+ cgroups. The default is "max".
- Hard limit of number of processes.
+ Hard limit of number of processes.
pids.current
- A read-only single value file which exists on all cgroups.
+ A read-only single value file which exists on all cgroups.
- The number of processes currently in the cgroup and its descendants.
+ The number of processes currently in the cgroup and its
+ descendants.
Organisational operations are not blocked by cgroup policies, so it is
possible to have pids.current > pids.max. This can be done by either
diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst
index 2c41b713841f..44886c91e112 100644
--- a/Documentation/dev-tools/kcov.rst
+++ b/Documentation/dev-tools/kcov.rst
@@ -10,7 +10,7 @@ Note that kcov does not aim to collect as much coverage as possible. It aims
to collect more or less stable coverage that is function of syscall inputs.
To achieve this goal it does not collect coverage in soft/hard interrupts
and instrumentation of some inherently non-deterministic parts of kernel is
-disbled (e.g. scheduler, locking).
+disabled (e.g. scheduler, locking).
Usage
-----
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
index 712baf6c3e24..44b842b6ca15 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
+++ b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
@@ -71,6 +71,9 @@
For Axon it can be absent, though my current driver
doesn't handle phy-address yet so for now, keep
0x00ffffff in it.
+ - phy-handle : Used to describe configurations where a external PHY
+ is used. Please refer to:
+ Documentation/devicetree/bindings/net/ethernet.txt
- rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
operations (if absent the value is the same as
rx-fifo-size). For Axon, either absent or 2048.
@@ -81,8 +84,22 @@
offload, phandle of the TAH device node.
- tah-channel : 1 cell, optional. If appropriate, channel used on the
TAH engine.
+ - fixed-link : Fixed-link subnode describing a link to a non-MDIO
+ managed entity. See
+ Documentation/devicetree/bindings/net/fixed-link.txt
+ for details.
+ - mdio subnode : When the EMAC has a phy connected to its local
+ mdio, which us supported by the kernel's network
+ PHY library in drivers/net/phy, there must be device
+ tree subnode with the following required properties:
+ - #address-cells: Must be <1>.
+ - #size-cells: Must be <0>.
- Example:
+ For PHY definitions: Please refer to
+ Documentation/devicetree/bindings/net/phy.txt and
+ Documentation/devicetree/bindings/net/ethernet.txt
+
+ Examples:
EMAC0: ethernet@40000800 {
device_type = "network";
@@ -104,6 +121,48 @@
zmii-channel = <0>;
};
+ EMAC1: ethernet@ef600c00 {
+ device_type = "network";
+ compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
+ interrupt-parent = <&EMAC1>;
+ interrupts = <0 1>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
+ 1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
+ reg = <0xef600c00 0x000000c4>;
+ local-mac-address = [000000000000]; /* Filled in by U-Boot */
+ mal-device = <&MAL0>;
+ mal-tx-channel = <0>;
+ mal-rx-channel = <0>;
+ cell-index = <0>;
+ max-frame-size = <9000>;
+ rx-fifo-size = <16384>;
+ tx-fifo-size = <2048>;
+ fifo-entry-size = <10>;
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ phy-map = <0x00000000>;
+ rgmii-device = <&RGMII0>;
+ rgmii-channel = <0>;
+ tah-device = <&TAH0>;
+ tah-channel = <0>;
+ has-inverted-stacr-oc;
+ has-new-stacr-staopc;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+ };
+ };
+
+
ii) McMAL node
Required properties:
@@ -145,4 +204,3 @@
- revision : as provided by the RGMII new version register if
available.
For Axon: 0x0000012a
-
diff --git a/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt b/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
index c3f6546ebac7..6a23ad9ac53a 100644
--- a/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
@@ -45,7 +45,7 @@ Required Properties:
Optional Properties:
- reg-names: In addition to the required properties, the following are optional
- "efuse-address" - Contains efuse base address used to pick up ABB info.
- - "ldo-address" - Contains address of ABB LDO overide register address.
+ - "ldo-address" - Contains address of ABB LDO override register.
"efuse-address" is required for this.
- ti,ldovbb-vset-mask - Required if ldo-address is set, mask for LDO override
register to provide override vset value.
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt
index 0c065f77658f..3957d4edaa74 100644
--- a/Documentation/devicetree/bindings/usb/usb251xb.txt
+++ b/Documentation/devicetree/bindings/usb/usb251xb.txt
@@ -7,18 +7,18 @@ Required properties :
- compatible : Should be "microchip,usb251xb" or one of the specific types:
"microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b",
"microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi"
- - hub-reset-gpios : Should specify the gpio for hub reset
+ - reset-gpios : Should specify the gpio for hub reset
+ - reg : I2C address on the selected bus (default is <0x2C>)
Optional properties :
- - reg : I2C address on the selected bus (default is <0x2C>)
- skip-config : Skip Hub configuration, but only send the USB-Attach command
- - vendor-id : USB Vendor ID of the hub (16 bit, default is 0x0424)
- - product-id : USB Product ID of the hub (16 bit, default depends on type)
- - device-id : USB Device ID of the hub (16 bit, default is 0x0bb3)
- - language-id : USB Language ID (16 bit, default is 0x0000)
- - manufacturer : USB Manufacturer string (max 31 characters long)
- - product : USB Product string (max 31 characters long)
- - serial : USB Serial string (max 31 characters long)
+ - vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424)
+ - product-id : Set USB Product ID of the hub (16 bit, default depends on type)
+ - device-id : Set USB Device ID of the hub (16 bit, default is 0x0bb3)
+ - language-id : Set USB Language ID (16 bit, default is 0x0000)
+ - manufacturer : Set USB Manufacturer string (max 31 characters long)
+ - product : Set USB Product string (max 31 characters long)
+ - serial : Set USB Serial string (max 31 characters long)
- {bus,self}-powered : selects between self- and bus-powered operation (default
is self-powered)
- disable-hi-speed : disable USB Hi-Speed support
@@ -31,8 +31,10 @@ Optional properties :
(default is individual)
- dynamic-power-switching : enable auto-switching from self- to bus-powered
operation if the local power source is removed or unavailable
- - oc-delay-{100us,4ms,8ms,16ms} : set over current timer delay (default is 8ms)
- - compound-device : indicated the hub is part of a compound device
+ - oc-delay-us : Delay time (in microseconds) for filtering the over-current
+ sense inputs. Valid values are 100, 4000, 8000 (default) and 16000. If
+ an invalid value is given, the default is used instead.
+ - compound-device : indicate the hub is part of a compound device
- port-mapping-mode : enable port mapping mode
- string-support : enable string descriptor support (required for manufacturer,
product and serial string configuration)
@@ -40,34 +42,15 @@ Optional properties :
device connected.
- sp-disabled-ports : Specifies the ports which will be self-power disabled
- bp-disabled-ports : Specifies the ports which will be bus-power disabled
- - max-sp-power : Specifies the maximum current the hub consumes from an
- upstream port when operating as self-powered hub including the power
- consumption of a permanently attached peripheral if the hub is
- configured as a compound device. The value is given in mA in a 0 - 500
- range (default is 2).
- - max-bp-power : Specifies the maximum current the hub consumes from an
- upstream port when operating as bus-powered hub including the power
- consumption of a permanently attached peripheral if the hub is
- configured as a compound device. The value is given in mA in a 0 - 500
- range (default is 100).
- - max-sp-current : Specifies the maximum current the hub consumes from an
- upstream port when operating as self-powered hub EXCLUDING the power
- consumption of a permanently attached peripheral if the hub is
- configured as a compound device. The value is given in mA in a 0 - 500
- range (default is 2).
- - max-bp-current : Specifies the maximum current the hub consumes from an
- upstream port when operating as bus-powered hub EXCLUDING the power
- consumption of a permanently attached peripheral if the hub is
- configured as a compound device. The value is given in mA in a 0 - 500
- range (default is 100).
- - power-on-time : Specifies the time it takes from the time the host initiates
- the power-on sequence to a port until the port has adequate power. The
- value is given in ms in a 0 - 510 range (default is 100ms).
+ - power-on-time-ms : Specifies the time it takes from the time the host
+ initiates the power-on sequence to a port until the port has adequate
+ power. The value is given in ms in a 0 - 510 range (default is 100ms).
Examples:
usb2512b@2c {
compatible = "microchip,usb2512b";
- hub-reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ reg = <0x2c>;
+ reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
};
usb2514b@2c {
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index fc73eeb7b3b8..ab0230461377 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
FALSE (router)
forwarding - BOOLEAN
- Enable IP forwarding on this interface.
+ Enable IP forwarding on this interface. This controls whether packets
+ received _on_ this interface can be forwarded.
mc_forwarding - BOOLEAN
Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt
index e4991fb1eedc..41ef9d8efe95 100644
--- a/Documentation/trace/kprobetrace.txt
+++ b/Documentation/trace/kprobetrace.txt
@@ -12,7 +12,7 @@ kprobes can probe (this means, all functions body except for __kprobes
functions). Unlike the Tracepoint based event, this can be added and removed
dynamically, on the fly.
-To enable this feature, build your kernel with CONFIG_KPROBE_EVENT=y.
+To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y.
Similar to the events tracer, this doesn't need to be activated via
current_tracer. Instead of that, add probe points via
diff --git a/Documentation/trace/uprobetracer.txt b/Documentation/trace/uprobetracer.txt
index fa7b680ee8a0..bf526a7c5559 100644
--- a/Documentation/trace/uprobetracer.txt
+++ b/Documentation/trace/uprobetracer.txt
@@ -7,7 +7,7 @@
Overview
--------
Uprobe based trace events are similar to kprobe based trace events.
-To enable this feature, build your kernel with CONFIG_UPROBE_EVENT=y.
+To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y.
Similar to the kprobe-event tracer, this doesn't need to be activated via
current_tracer. Instead of that, add probe points via
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 069450938b79..3c248f772ae6 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -951,6 +951,10 @@ This ioctl allows the user to create or modify a guest physical memory
slot. When changing an existing slot, it may be moved in the guest
physical memory space, or its flags may be modified. It may not be
resized. Slots may not overlap in guest physical address space.
+Bits 0-15 of "slot" specifies the slot id and this value should be
+less than the maximum number of user memory slots supported per VM.
+The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
+if this capability is supported by the architecture.
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
specifies the address space which is being modified. They must be
diff --git a/Documentation/vm/userfaultfd.txt b/Documentation/vm/userfaultfd.txt
index 0e5543a920e5..bb2f945f87ab 100644
--- a/Documentation/vm/userfaultfd.txt
+++ b/Documentation/vm/userfaultfd.txt
@@ -172,10 +172,6 @@ the same read(2) protocol as for the page fault notifications. The
manager has to explicitly enable these events by setting appropriate
bits in uffdio_api.features passed to UFFDIO_API ioctl:
-UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the
-non-cooperative process. When the monitored process exits, the uffd
-manager will get UFFD_EVENT_EXIT.
-
UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When
this feature is enabled, the userfaultfd context of the parent process
is duplicated into the newly created process. The manager receives
diff --git a/MAINTAINERS b/MAINTAINERS
index e88154f2226c..e03e6bcefac3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8321,7 +8321,6 @@ M: Richard Leitner <richard.leitner@skidata.com>
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/misc/usb251xb.c
-F: include/linux/platform_data/usb251xb.h
F: Documentation/devicetree/bindings/usb/usb251xb.txt
MICROSOFT SURFACE PRO 3 BUTTON DRIVER
diff --git a/Makefile b/Makefile
index 165cf9783a5d..b2faa9319372 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
NAME = Fearless Coyote
# *DOCUMENTATION*
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 317ff773e1ca..b18fcb606908 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -11,6 +11,7 @@
#define _ASM_ARC_HUGEPAGE_H
#include <linux/types.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
static inline pte_t pmd_pte(pmd_t pmd)
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index e94ca72b974e..ee22d40afef4 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -37,6 +37,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <linux/const.h>
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index e22089fb44dc..a3f0b3d50089 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -209,6 +209,7 @@
#define HSR_EC_IABT_HYP (0x21)
#define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25)
+#define HSR_EC_MAX (0x3f)
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index cc495d799c67..31ee468ce667 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -30,7 +30,6 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
#define KVM_USER_MEM_SLOTS 32
-#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HAVE_ONE_REG
#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index a8d656d9aec7..1c462381c225 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -20,6 +20,7 @@
#else
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index c9a2103faeb9..96dba7cd8be7 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+ case KVM_CAP_NR_MEMSLOTS:
+ r = KVM_USER_MEM_SLOTS;
+ break;
case KVM_CAP_MSI_DEVID:
if (!kvm)
r = -EINVAL;
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 4e40d1955e35..96af65a30d78 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+ kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
+ hsr);
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
+ [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
[HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
@@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
- if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
- !arm_exit_handlers[hsr_ec]) {
- kvm_err("Unknown exception class: hsr: %#08x\n",
- (unsigned int)kvm_vcpu_get_hsr(vcpu));
- BUG();
- }
-
return arm_exit_handlers[hsr_ec];
}
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 3c2cb5d5adfa..0bb0e9c6376c 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -411,3 +411,4 @@
394 common pkey_mprotect sys_pkey_mprotect
395 common pkey_alloc sys_pkey_alloc
396 common pkey_free sys_pkey_free
+397 common statx sys_statx
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index ce18c91b50a1..f0325d96b97a 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -198,6 +198,8 @@ static const struct dma_map_ops xen_swiotlb_dma_ops = {
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
.set_dma_mask = xen_swiotlb_set_dma_mask,
+ .mmap = xen_swiotlb_dma_mmap,
+ .get_sgtable = xen_swiotlb_get_sgtable,
};
int __init xen_mm_init(void)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a39029b5414e..3741859765cf 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -508,6 +508,16 @@ config QCOM_FALKOR_ERRATUM_1009
If unsure, say Y.
+config QCOM_QDF2400_ERRATUM_0065
+ bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size"
+ default y
+ help
+ On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports
+ ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have
+ been indicated as 16Bytes (0xf), not 8Bytes (0x7).
+
+ If unsure, say Y.
+
endmenu
@@ -1063,6 +1073,10 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
+config KEYS_COMPAT
+ def_bool y
+ depends on COMPAT && KEYS
+
endmenu
menu "Power management options"
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 05310ad8c5ab..f31c48d0cd68 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
static inline bool system_uses_ttbr0_pan(void)
{
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
- !cpus_have_cap(ARM64_HAS_PAN);
+ !cpus_have_const_cap(ARM64_HAS_PAN);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f21fd3894370..e7705e7bb07b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -30,8 +30,7 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_USER_MEM_SLOTS 512
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
index 69b2fd41503c..345a072b5856 100644
--- a/arch/arm64/include/asm/pgtable-types.h
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t;
#define __pgprot(x) ((pgprot_t) { (x) } )
#if CONFIG_PGTABLE_LEVELS == 2
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#elif CONFIG_PGTABLE_LEVELS == 3
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
+#elif CONFIG_PGTABLE_LEVELS == 4
+#include <asm-generic/5level-fixup.h>
#endif
#endif /* __ASM_PGTABLE_TYPES_H */
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 75a0f8acef66..fd691087dc9a 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
}
/**
- * cpu_suspend() - function to enter a low-power idle state
+ * arm_cpuidle_suspend() - function to enter a low-power idle state
* @arg: argument to pass to CPU suspend operations
*
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2a07aae5b8a2..c5c45942fb6e 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
return 0;
}
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return NOTIFY_DONE;
-}
-
static void __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur_kprobe;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 1bfe30dfbfe7..fa1b18e364fc 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
return ret;
}
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+ kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
+ hsr, esr_get_class_string(hsr));
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
@@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
u32 hsr = kvm_vcpu_get_hsr(vcpu);
u8 hsr_ec = ESR_ELx_EC(hsr);
- if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
- !arm_exit_handlers[hsr_ec]) {
- kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
- hsr, esr_get_class_string(hsr));
- BUG();
- }
-
return arm_exit_handlers[hsr_ec];
}
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index e8e7ba2bc11f..9e1d2b75eecd 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -18,14 +18,62 @@
#include <asm/kvm_hyp.h>
#include <asm/tlbflush.h>
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+{
+ u64 val;
+
+ /*
+ * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
+ * most TLB operations target EL2/EL0. In order to affect the
+ * guest TLBs (EL1/EL0), we need to change one of these two
+ * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
+ * let's flip TGE before executing the TLB operation.
+ */
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ val = read_sysreg(hcr_el2);
+ val &= ~HCR_TGE;
+ write_sysreg(val, hcr_el2);
+ isb();
+}
+
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+{
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ isb();
+}
+
+static hyp_alternate_select(__tlb_switch_to_guest,
+ __tlb_switch_to_guest_nvhe,
+ __tlb_switch_to_guest_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+{
+ /*
+ * We're done with the TLB operation, let's restore the host's
+ * view of HCR_EL2.
+ */
+ write_sysreg(0, vttbr_el2);
+ write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+}
+
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+{
+ write_sysreg(0, vttbr_el2);
+}
+
+static hyp_alternate_select(__tlb_switch_to_host,
+ __tlb_switch_to_host_nvhe,
+ __tlb_switch_to_host_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
- isb();
+ __tlb_switch_to_guest()(kvm);
/*
* We could do so much better if we had the VA as well.
@@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
dsb(ish);
isb();
- write_sysreg(0, vttbr_el2);
+ __tlb_switch_to_host()(kvm);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
- isb();
+ __tlb_switch_to_guest()(kvm);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- write_sysreg(0, vttbr_el2);
+ __tlb_switch_to_host()(kvm);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
/* Switch to requested VMID */
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
- isb();
+ __tlb_switch_to_guest()(kvm);
__tlbi(vmalle1);
dsb(nsh);
isb();
- write_sysreg(0, vttbr_el2);
+ __tlb_switch_to_host()(kvm);
}
void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 55d1e9205543..687a358a3733 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -162,7 +162,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
- pfn_to_nid(virt_to_pfn(_text)));
+ pfn_to_nid(virt_to_pfn(lm_alias(_text))));
/*
* vmemmap_populate() has populated the shadow region that covers the
diff --git a/arch/avr32/include/asm/pgtable-2level.h b/arch/avr32/include/asm/pgtable-2level.h
index 425dd567b5b9..d5b1c63993ec 100644
--- a/arch/avr32/include/asm/pgtable-2level.h
+++ b/arch/avr32/include/asm/pgtable-2level.h
@@ -8,6 +8,7 @@
#ifndef __ASM_AVR32_PGTABLE_2LEVEL_H
#define __ASM_AVR32_PGTABLE_2LEVEL_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/*
diff --git a/arch/avr32/oprofile/backtrace.c b/arch/avr32/oprofile/backtrace.c
index 75d9ad6f99cf..29cf2f191bfd 100644
--- a/arch/avr32/oprofile/backtrace.c
+++ b/arch/avr32/oprofile/backtrace.c
@@ -14,7 +14,7 @@
*/
#include <linux/oprofile.h>
-#include <linux/sched.h>
+#include <linux/ptrace.h>
#include <linux/uaccess.h>
/* The first two words of each frame on the stack look like this if we have
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index ae6903d7fdbe..14970f11bbf2 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void)
dma_in_cfg.en = regk_dma_no;
REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
- /* Disble the cryptocop. */
+ /* Disable the cryptocop. */
rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
rw_cfg.en = 0;
REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h
index 2a3210ba4c72..fa3a73004cc5 100644
--- a/arch/cris/include/asm/pgtable.h
+++ b/arch/cris/include/asm/pgtable.h
@@ -6,6 +6,7 @@
#define _CRIS_PGTABLE_H
#include <asm/page.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index a0513d463a1f..ab6e7e961b54 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -16,6 +16,7 @@
#ifndef _ASM_PGTABLE_H
#define _ASM_PGTABLE_H
+#include <asm-generic/5level-fixup.h>
#include <asm/mem-layout.h>
#include <asm/setup.h>
#include <asm/processor.h>
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
index 8341db67821d..7d265d28ba5e 100644
--- a/arch/h8300/include/asm/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
@@ -1,5 +1,6 @@
#ifndef _H8300_PGTABLE_H
#define _H8300_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#include <asm-generic/pgtable.h>
#define pgtable_cache_init() do { } while (0)
diff --git a/arch/h8300/kernel/ptrace_h.c b/arch/h8300/kernel/ptrace_h.c
index fe3b5673baba..f5ff3b794c85 100644
--- a/arch/h8300/kernel/ptrace_h.c
+++ b/arch/h8300/kernel/ptrace_h.c
@@ -9,7 +9,7 @@
*/
#include <linux/linkage.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <asm/ptrace.h>
#define BREAKINST 0x5730 /* trapa #3 */
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index 49eab8136ec3..24a9177fb897 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -26,6 +26,7 @@
*/
#include <linux/swap.h>
#include <asm/page.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/* A handy thing to have if one has the RAM. Declared in head.S */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 384794e665fc..6cc22c8d8923 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -587,8 +587,10 @@ extern struct page *zero_page_memmap_ptr;
#if CONFIG_PGTABLE_LEVELS == 3
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#endif
+#include <asm-generic/5level-fixup.h>
#include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index ffa3a3a2ecad..0c151e5af079 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -6,6 +6,7 @@
#define _METAG_PGTABLE_H
#include <asm/pgtable-bits.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index fd850879854d..d506bb0893f9 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -95,7 +95,8 @@ typedef struct { unsigned long pgd; } pgd_t;
# else /* CONFIG_MMU */
typedef struct { unsigned long ste[64]; } pmd_t;
typedef struct { pmd_t pue[1]; } pud_t;
-typedef struct { pud_t pge[1]; } pgd_t;
+typedef struct { pud_t p4e[1]; } p4d_t;
+typedef struct { p4d_t pge[1]; } pgd_t;
# endif /* CONFIG_MMU */
# define pte_val(x) ((x).pte)
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c
index a5b427909b5c..036d56cc4591 100644
--- a/arch/mips/cavium-octeon/cpu.c
+++ b/arch/mips/cavium-octeon/cpu.c
@@ -10,7 +10,9 @@
#include <linux/irqflags.h>
#include <linux/notifier.h>
#include <linux/prefetch.h>
+#include <linux/ptrace.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <asm/cop2.h>
#include <asm/current.h>
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
index 4d22365844af..cfb4a146cf17 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-crypto.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
@@ -9,6 +9,7 @@
#include <asm/cop2.h>
#include <linux/export.h>
#include <linux/interrupt.h>
+#include <linux/sched/task_stack.h>
#include "octeon-crypto.h"
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 4b94b7fbafa3..3de786545ded 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -12,6 +12,7 @@
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
#include <linux/init.h>
#include <linux/export.h>
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 321752bcbab6..f94455f964ec 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
+#include <linux/ptrace.h>
#include <linux/thread_info.h>
#include <linux/bitops.h>
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index d21f3da7bdb6..6f94bed571c4 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -16,6 +16,7 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
extern int temp_tlb_entry;
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 514cbc0a6a67..130a2a6c1531 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -17,6 +17,7 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
+#define __ARCH_USE_5LEVEL_HACK
#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
#include <asm-generic/pgtable-nopmd.h>
#else
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 3daa2cae50b0..1b070a76fcdd 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/smp.h>
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index e077ea3e11fb..e398cbc3d776 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/mips-gic.h>
#include <linux/compiler.h>
+#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/atomic.h>
diff --git a/arch/mips/loongson64/loongson-3/cop2-ex.c b/arch/mips/loongson64/loongson-3/cop2-ex.c
index ea13764d0a03..621d6af5f6eb 100644
--- a/arch/mips/loongson64/loongson-3/cop2-ex.c
+++ b/arch/mips/loongson64/loongson-3/cop2-ex.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/notifier.h>
+#include <linux/ptrace.h>
#include <asm/fpu.h>
#include <asm/cop2.h>
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 10d86d54880a..bddf1ef553a4 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/irq.h>
diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c
index 52bc5de42005..21e439b3db70 100644
--- a/arch/mips/netlogic/xlp/cop2-ex.c
+++ b/arch/mips/netlogic/xlp/cop2-ex.c
@@ -9,11 +9,14 @@
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
*/
+#include <linux/capability.h>
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/notifier.h>
#include <linux/prefetch.h>
+#include <linux/ptrace.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <asm/cop2.h>
#include <asm/current.h>
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c
index 1f2a5bc4779e..75460e1e106b 100644
--- a/arch/mips/sgi-ip22/ip28-berr.c
+++ b/arch/mips/sgi-ip22/ip28-berr.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
#include <linux/seq_file.h>
#include <asm/addrspace.h>
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index d12879eb2b1f..83efe03d5c60 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -12,7 +12,9 @@
#include <linux/signal.h> /* for SIGBUS */
#include <linux/sched.h> /* schow_regs(), force_sig() */
#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
+#include <asm/ptrace.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/sn0/hub.h>
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index f5ed45e8f442..4cd47d23d81a 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -8,10 +8,13 @@
*/
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
+
#include <asm/page.h>
#include <asm/processor.h>
+#include <asm/ptrace.h>
#include <asm/sn/arch.h>
#include <asm/sn/gda.h>
#include <asm/sn/intr.h>
diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c
index 57d8c7486fe6..c1f12a9cf305 100644
--- a/arch/mips/sgi-ip32/ip32-berr.c
+++ b/arch/mips/sgi-ip32/ip32-berr.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <asm/addrspace.h>
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 8bd415c8729f..b3b442def423 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/notifier.h>
#include <linux/delay.h>
#include <linux/rtc/ds1685.h>
diff --git a/arch/mn10300/include/asm/page.h b/arch/mn10300/include/asm/page.h
index 3810a6f740fd..dfe730a5ede0 100644
--- a/arch/mn10300/include/asm/page.h
+++ b/arch/mn10300/include/asm/page.h
@@ -57,6 +57,7 @@ typedef struct page *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 298393c3cb42..db4f7d179220 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -22,6 +22,7 @@
#include <asm/tlbflush.h>
#include <asm/pgtable-bits.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#define FIRST_USER_ADDRESS 0UL
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index 5fcb9ac72693..f0a5d8b844d6 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
return val;
}
-#define xchg(ptr, with) \
- ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
+#define xchg(ptr, with) \
+ ({ \
+ (__typeof__(*(ptr))) __xchg((unsigned long)(with), \
+ (ptr), \
+ sizeof(*(ptr))); \
+ })
#endif /* __ASM_OPENRISC_CMPXCHG_H */
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 3567aa7be555..ff97374ca069 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -25,6 +25,7 @@
#ifndef __ASM_OPENRISC_PGTABLE_H
#define __ASM_OPENRISC_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index 140faa16685a..1311e6b13991 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -211,7 +211,7 @@ do { \
case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
- case 8: __get_user_asm2(x, ptr, retval); \
+ case 8: __get_user_asm2(x, ptr, retval); break; \
default: (x) = __get_user_bad(); \
} \
} while (0)
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
index 5c4695d13542..ee3e604959e1 100644
--- a/arch/openrisc/kernel/or32_ksyms.c
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -30,6 +30,7 @@
#include <asm/hardirq.h>
#include <asm/delay.h>
#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
@@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
DECLARE_EXPORT(__ashrdi3);
DECLARE_EXPORT(__ashldi3);
DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__ucmpdi2);
+EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(memset);
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 828a29110459..f8da545854f9 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -90,6 +90,7 @@ void arch_cpu_idle(void)
}
void (*pm_power_off) (void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
/*
* When a process does an "exec", machine state like FPU and debug
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 19c9c3c5f267..c7e15cc5c668 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
-/* vmap range flushes and invalidates. Architecturally, we don't need
- * the invalidate, because the CPU should refuse to speculate once an
- * area has been flushed, so invalidate is left empty */
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
- unsigned long start = (unsigned long)vaddr;
-
- flush_kernel_dcache_range_asm(start, start + size);
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
- unsigned long start = (unsigned long)vaddr;
- void *cursor = vaddr;
- for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
- struct page *page = vmalloc_to_page(cursor);
-
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
- flush_kernel_dcache_page(page);
- }
- flush_kernel_dcache_range_asm(start, start + size);
-}
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index fb4382c28259..edfbf9d6a6dd 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -32,7 +32,8 @@
* that put_user is the same as __put_user, etc.
*/
-#define access_ok(type, uaddr, size) (1)
+#define access_ok(type, uaddr, size) \
+ ( (uaddr) == (uaddr) )
#define put_user __put_user
#define get_user __get_user
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 6b0741e7a7ed..667c99421003 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -362,8 +362,9 @@
#define __NR_copy_file_range (__NR_Linux + 346)
#define __NR_preadv2 (__NR_Linux + 347)
#define __NR_pwritev2 (__NR_Linux + 348)
+#define __NR_statx (__NR_Linux + 349)
-#define __NR_Linux_syscalls (__NR_pwritev2 + 1)
+#define __NR_Linux_syscalls (__NR_statx + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 0dc72d5de861..c32a09095216 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
+
+void flush_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+
+ if ((unsigned long)size > parisc_cache_flush_threshold)
+ flush_data_cache();
+ else
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+
+ if ((unsigned long)size > parisc_cache_flush_threshold)
+ flush_data_cache();
+ else
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index a0ecdb4abcc8..c66c943d9322 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
*/
*loc = fsel(val, addend);
break;
+ case R_PARISC_SECREL32:
+ /* 32-bit section relative address. */
+ *loc = fsel(val, addend);
+ break;
case R_PARISC_DPREL21L:
/* left 21 bit of relative address */
val = lrsel(val - dp, addend);
@@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
*/
*loc = fsel(val, addend);
break;
+ case R_PARISC_SECREL32:
+ /* 32-bit section relative address. */
+ *loc = fsel(val, addend);
+ break;
case R_PARISC_FPTR64:
/* 64-bit function address */
if(in_local(me, (void *)(val + addend))) {
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index e282a5131d77..6017a5af2e6e 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -39,7 +39,7 @@
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
* in various PDC revisions. The code is much more maintainable
* and reliable this way vs having to debug on every version of PDC
- * on every box.
+ * on every box.
*/
#include <linux/capability.h>
@@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
static int perf_release(struct inode *inode, struct file *file);
static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
- loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
@@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
/*
* configure:
*
- * Configure the cpu with a given data image. First turn off the counters,
+ * Configure the cpu with a given data image. First turn off the counters,
* then download the image, then turn the counters back on.
*/
static int perf_config(uint32_t *image_ptr)
@@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
error = perf_stop_counters(raddr);
if (error != 0) {
printk("perf_config: perf_stop_counters = %ld\n", error);
- return -EINVAL;
+ return -EINVAL;
}
printk("Preparing to write image\n");
@@ -242,7 +242,7 @@ printk("Preparing to write image\n");
error = perf_write_image((uint64_t *)image_ptr);
if (error != 0) {
printk("perf_config: DOWNLOAD = %ld\n", error);
- return -EINVAL;
+ return -EINVAL;
}
printk("Preparing to start counters\n");
@@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
}
/*
- * Open the device and initialize all of its memory. The device is only
+ * Open the device and initialize all of its memory. The device is only
* opened once, but can be "queried" by multiple processes that know its
* file descriptor.
*/
@@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
* called on the processor that the download should happen
* on.
*/
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t perf_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
size_t image_size;
uint32_t image_type;
uint32_t interface_type;
uint32_t test;
- if (perf_processor_interface == ONYX_INTF)
+ if (perf_processor_interface == ONYX_INTF)
image_size = PCXU_IMAGE_SIZE;
- else if (perf_processor_interface == CUDA_INTF)
+ else if (perf_processor_interface == CUDA_INTF)
image_size = PCXW_IMAGE_SIZE;
- else
+ else
return -EFAULT;
if (!capable(CAP_SYS_ADMIN))
@@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
/* First check the machine type is correct for
the requested image */
- if (((perf_processor_interface == CUDA_INTF) &&
- (interface_type != CUDA_INTF)) ||
- ((perf_processor_interface == ONYX_INTF) &&
- (interface_type != ONYX_INTF)))
+ if (((perf_processor_interface == CUDA_INTF) &&
+ (interface_type != CUDA_INTF)) ||
+ ((perf_processor_interface == ONYX_INTF) &&
+ (interface_type != ONYX_INTF)))
return -EINVAL;
/* Next check to make sure the requested image
is valid */
- if (((interface_type == CUDA_INTF) &&
+ if (((interface_type == CUDA_INTF) &&
(test >= MAX_CUDA_IMAGES)) ||
- ((interface_type == ONYX_INTF) &&
- (test >= MAX_ONYX_IMAGES)))
+ ((interface_type == ONYX_INTF) &&
+ (test >= MAX_ONYX_IMAGES)))
return -EINVAL;
/* Copy the image into the processor */
- if (interface_type == CUDA_INTF)
+ if (interface_type == CUDA_INTF)
return perf_config(cuda_images[test]);
else
return perf_config(onyx_images[test]);
@@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
static void perf_patch_images(void)
{
#if 0 /* FIXME!! */
-/*
+/*
* NOTE: this routine is VERY specific to the current TLB image.
* If the image is changed, this routine might also need to be changed.
*/
@@ -367,9 +367,9 @@ static void perf_patch_images(void)
extern void $i_dtlb_miss_2_0();
extern void PA2_0_iva();
- /*
+ /*
* We can only use the lower 32-bits, the upper 32-bits should be 0
- * anyway given this is in the kernel
+ * anyway given this is in the kernel
*/
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
@@ -377,21 +377,21 @@ static void perf_patch_images(void)
if (perf_processor_interface == ONYX_INTF) {
/* clear last 2 bytes */
- onyx_images[TLBMISS][15] &= 0xffffff00;
+ onyx_images[TLBMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBMISS][17] = itlb_addr;
/* clear last 2 bytes */
- onyx_images[TLBHANDMISS][15] &= 0xffffff00;
+ onyx_images[TLBHANDMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBHANDMISS][17] = itlb_addr;
/* clear last 2 bytes */
- onyx_images[BIG_CPI][15] &= 0xffffff00;
+ onyx_images[BIG_CPI][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
@@ -404,24 +404,24 @@ static void perf_patch_images(void)
} else if (perf_processor_interface == CUDA_INTF) {
/* Cuda interface */
- cuda_images[TLBMISS][16] =
+ cuda_images[TLBMISS][16] =
(cuda_images[TLBMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[TLBMISS][17] =
+ cuda_images[TLBMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
- cuda_images[TLBHANDMISS][16] =
+ cuda_images[TLBHANDMISS][16] =
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[TLBHANDMISS][17] =
+ cuda_images[TLBHANDMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
- cuda_images[BIG_CPI][16] =
+ cuda_images[BIG_CPI][16] =
(cuda_images[BIG_CPI][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[BIG_CPI][17] =
+ cuda_images[BIG_CPI][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
} else {
@@ -433,7 +433,7 @@ static void perf_patch_images(void)
/*
* ioctl routine
- * All routines effect the processor that they are executed on. Thus you
+ * All routines effect the processor that they are executed on. Thus you
* must be running on the processor that you wish to change.
*/
@@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
/* copy out the Counters */
- if (copy_to_user((void __user *)arg, raddr,
+ if (copy_to_user((void __user *)arg, raddr,
sizeof (raddr)) != 0) {
error = -EFAULT;
break;
@@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
.open = perf_open,
.release = perf_release
};
-
+
static struct miscdevice perf_dev = {
MISC_DYNAMIC_MINOR,
PA_PERF_DEV,
@@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
/* OR sticky2 (bit 1496) to counter2 bit 32 */
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
raddr[2] = (uint32_t)tmp64;
-
+
/* Counter3 is bits 1497 to 1528 */
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
/* OR sticky3 (bit 1529) to counter3 bit 32 */
@@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
userbuf[22] = 0;
userbuf[23] = 0;
- /*
+ /*
* Write back the zeroed bytes + the image given
* the read was destructive.
*/
@@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
} else {
/*
- * Read RDR-15 which contains the counters and sticky bits
+ * Read RDR-15 which contains the counters and sticky bits
*/
if (!perf_rdr_read_ubuf(15, userbuf)) {
return -13;
}
- /*
+ /*
* Clear out the counters
*/
perf_rdr_clear(15);
@@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
}
-
+
return 0;
}
@@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
i = tentry->num_words;
while (i--) {
buffer[i] = 0;
- }
+ }
/* Check for bits an even number of 64 */
if ((xbits = width & 0x03f) != 0) {
@@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
}
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+ if (!runway) {
+ pr_err("perf_write_image: ioremap failed!\n");
+ return -ENOMEM;
+ }
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
- __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
+ __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
runway + RUNWAY_STATUS);
-
+
/* Write RUNWAY DEBUG registers */
for (i = 0; i < 8; i++) {
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
}
- return 0;
+ return 0;
}
/*
@@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
perf_rdr_shift_out_U(rdr_num, buffer[i]);
} else {
perf_rdr_shift_out_W(rdr_num, buffer[i]);
- }
+ }
}
printk("perf_rdr_write done\n");
}
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 06f7ca7fe70b..b76f503eee4a 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -142,6 +142,8 @@ void machine_power_off(void)
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
+
+ for (;;);
}
void (*pm_power_off)(void) = machine_power_off;
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 3cfef1de8061..44aeaa9c039f 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -444,6 +444,7 @@
ENTRY_SAME(copy_file_range)
ENTRY_COMP(preadv2)
ENTRY_COMP(pwritev2)
+ ENTRY_SAME(statx)
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 494091762bd7..97a8bc8a095c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
config PPC
bool
default y
- select BUILDTIME_EXTABLE_SORT
+ #
+ # Please keep this list sorted alphabetically.
+ #
+ select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_SET_COHERENT_MASK
+ select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
+ select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+ select ARCH_WANT_IPC_PARSE_VERSION
select BINFMT_ELF
- select ARCH_HAS_ELF_RANDOMIZE
- select OF
- select OF_EARLY_FLATTREE
- select OF_RESERVED_MEM
- select HAVE_FTRACE_MCOUNT_RECORD
+ select BUILDTIME_EXTABLE_SORT
+ select CLONE_BACKWARDS
+ select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
+ select EDAC_ATOMIC_SCRUB
+ select EDAC_SUPPORT
+ select GENERIC_ATOMIC64 if PPC32
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select GENERIC_CMOS_UPDATE
+ select GENERIC_CPU_AUTOPROBE
+ select GENERIC_IRQ_SHOW
+ select GENERIC_IRQ_SHOW_LEVEL
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select GENERIC_TIME_VSYSCALL_OLD
+ select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HARDENED_USERCOPY
+ select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_CBPF_JIT if !PPC64
+ select HAVE_CONTEXT_TRACKING if PPC64
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_DMA_API_DEBUG
select HAVE_DYNAMIC_FTRACE
- select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
- select HAVE_FUNCTION_TRACER
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
+ select HAVE_EBPF_JIT if PPC64
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+ select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
- select SYSCTL_EXCEPTION_TRACE
- select VIRT_TO_BUS if !PPC64
+ select HAVE_GENERIC_RCU_GUP
+ select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE
select HAVE_IOREMAP_PROT
- select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select HAVE_KERNEL_GZIP
select HAVE_KPROBES
- select HAVE_OPTPROBES if PPC64
- select HAVE_ARCH_KGDB
select HAVE_KRETPROBES
- select HAVE_ARCH_TRACEHOOK
+ select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
- select HAVE_DMA_API_DEBUG
+ select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_NMI if PERF_EVENTS
select HAVE_OPROFILE
- select HAVE_DEBUG_KMEMLEAK
- select ARCH_HAS_SG_CHAIN
- select GENERIC_ATOMIC64 if PPC32
+ select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
+ select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RCU_TABLE_FREE if SMP
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
- select ARCH_WANT_IPC_PARSE_VERSION
- select SPARSE_IRQ
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_VIRT_CPU_ACCOUNTING
select IRQ_DOMAIN
- select GENERIC_IRQ_SHOW
- select GENERIC_IRQ_SHOW_LEVEL
select IRQ_FORCED_THREADING
- select HAVE_RCU_TABLE_FREE if SMP
- select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_CBPF_JIT if !PPC64
- select HAVE_EBPF_JIT if PPC64
- select HAVE_ARCH_JUMP_LABEL
- select ARCH_HAVE_NMI_SAFE_CMPXCHG
- select ARCH_HAS_GCOV_PROFILE_ALL
- select GENERIC_SMP_IDLE_THREAD
- select GENERIC_CMOS_UPDATE
- select GENERIC_TIME_VSYSCALL_OLD
- select GENERIC_CLOCKEVENTS
- select GENERIC_CLOCKEVENTS_BROADCAST if SMP
- select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select GENERIC_STRNCPY_FROM_USER
- select GENERIC_STRNLEN_USER
- select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS
- select ARCH_USE_BUILTIN_BSWAP
- select OLD_SIGSUSPEND
- select OLD_SIGACTION if PPC32
- select HAVE_DEBUG_STACKOVERFLOW
- select HAVE_IRQ_EXIT_ON_IRQ_STACK
- select ARCH_USE_CMPXCHG_LOCKREF if PPC64
- select HAVE_ARCH_AUDITSYSCALL
- select ARCH_SUPPORTS_ATOMIC_RMW
- select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select NO_BOOTMEM
- select HAVE_GENERIC_RCU_GUP
- select HAVE_PERF_EVENTS_NMI if PPC64
- select HAVE_NMI if PERF_EVENTS
- select EDAC_SUPPORT
- select EDAC_ATOMIC_SCRUB
- select ARCH_HAS_DMA_SET_COHERENT_MASK
- select ARCH_HAS_DEVMEM_IS_ALLOWED
- select HAVE_ARCH_SECCOMP_FILTER
- select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
- select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
- select GENERIC_CPU_AUTOPROBE
- select HAVE_VIRT_CPU_ACCOUNTING
- select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
- select HAVE_ARCH_HARDENED_USERCOPY
- select HAVE_KERNEL_GZIP
- select HAVE_CONTEXT_TRACKING if PPC64
+ select OF
+ select OF_EARLY_FLATTREE
+ select OF_RESERVED_MEM
+ select OLD_SIGACTION if PPC32
+ select OLD_SIGSUSPEND
+ select SPARSE_IRQ
+ select SYSCTL_EXCEPTION_TRACE
+ select VIRT_TO_BUS if !PPC64
+ #
+ # Please keep this list sorted alphabetically.
+ #
config GENERIC_CSUM
def_bool n
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 31286fa7873c..19b0d1a81959 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -72,8 +72,15 @@ GNUTARGET := powerpc
MULTIPLEWORD := -mmultiple
endif
-cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
+ifdef CONFIG_PPC64
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
+aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
+endif
+
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
ifneq ($(cc-name),clang)
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
endif
@@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
else
+CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
+AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
endif
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 861e72109df2..f080abfc2f83 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -68,6 +68,7 @@ SECTIONS
}
#ifdef CONFIG_PPC64_BOOT_WRAPPER
+ . = ALIGN(256);
.got :
{
__toc_start = .;
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 9fa046d56eba..411994551afc 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
{
u32 *key = crypto_tfm_ctx(tfm);
- *key = 0;
+ *key = ~0;
return 0;
}
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 73eb794d6163..bc5fdfd22788 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -51,6 +51,10 @@
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+/* Put a PPC bit into a "normal" bit position */
+#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
+ ((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
+
#include <asm/barrier.h>
/* Macro for generating the ***_bits() functions */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 012223638815..26ed228d4dc6 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/book3s/32/hash.h>
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 1eeeb72c7015..8f4d41936e5a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1,9 +1,12 @@
#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+#include <asm-generic/5level-fixup.h>
+
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
#endif
+
/*
* Common bits between hash and Radix page table
*/
@@ -347,23 +350,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
__r; \
})
+static inline int __pte_write(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+ /*
+ * Saved write ptes are prot none ptes that doesn't have
+ * privileged bit sit. We mark prot none as one which has
+ * present and pviliged bit set and RWX cleared. To mark
+ * protnone which used to have _PAGE_WRITE set we clear
+ * the privileged bit.
+ */
+ return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+}
+#else
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+ return false;
+}
+#endif
+
+static inline int pte_write(pte_t pte)
+{
+ return __pte_write(pte) || pte_savedwrite(pte);
+}
+
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
- return;
-
- pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+ if (__pte_write(*ptep))
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+ else if (unlikely(pte_savedwrite(*ptep)))
+ pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
- return;
-
- pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+ /*
+ * We should not find protnone for hugetlb, but this complete the
+ * interface.
+ */
+ if (__pte_write(*ptep))
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+ else if (unlikely(pte_savedwrite(*ptep)))
+ pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
@@ -397,11 +435,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
-static inline int pte_write(pte_t pte)
-{
- return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
-}
-
static inline int pte_dirty(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
@@ -465,19 +498,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
VM_BUG_ON(!pte_protnone(pte));
return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
}
-
-#define pte_savedwrite pte_savedwrite
-static inline bool pte_savedwrite(pte_t pte)
+#else
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
{
- /*
- * Saved write ptes are prot none ptes that doesn't have
- * privileged bit sit. We mark prot none as one which has
- * present and pviliged bit set and RWX cleared. To mark
- * protnone which used to have _PAGE_WRITE set we clear
- * the privileged bit.
- */
- VM_BUG_ON(!pte_protnone(pte));
- return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+ VM_WARN_ON(1);
+ return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -506,6 +532,8 @@ static inline unsigned long pte_pfn(pte_t pte)
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
+ if (unlikely(pte_savedwrite(pte)))
+ return pte_clear_savedwrite(pte);
return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
@@ -926,6 +954,7 @@ static inline int pmd_protnone(pmd_t pmd)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -982,11 +1011,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
-
- if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0)
- return;
-
- pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+ if (__pmd_write((*pmdp)))
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+ else if (unlikely(pmd_savedwrite(*pmdp)))
+ pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
}
static inline int pmd_trans_huge(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 4e63787dc3be..842124b199b5 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
#ifdef __powerpc64__
res += (__force u64)addend;
- return (__force __wsum)((u32)res + (res >> 32));
+ return (__force __wsum) from64to32(res);
#else
asm("addc %0,%0,%1;"
"addze %0,%0;"
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index fd321eb423cb..155731557c9b 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err)
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
-1: cmpd cr0,r0,r0; \
- bne 1b; \
+236: cmpd cr0,r0,r0; \
+ bne 236b; \
IDLE_INST; \
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 93b9b84568e8..09bde6e34f5d 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#define ARCH_DLINFO_CACHE_GEOMETRY \
NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \
NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \
- NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size); \
- NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i)); \
+ NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \
+ NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \
NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \
NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \
NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index f97d8cb6bdf6..ed62efe01e49 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -66,6 +66,55 @@
#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
P8_DSISR_MC_ERAT_MULTIHIT_SEC)
+
+/*
+ * Machine Check bits on power9
+ */
+#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
+
+#define P9_SRR1_MC_IFETCH(srr1) ( \
+ PPC_BITEXTRACT(srr1, 45, 0) | \
+ PPC_BITEXTRACT(srr1, 44, 1) | \
+ PPC_BITEXTRACT(srr1, 43, 2) | \
+ PPC_BITEXTRACT(srr1, 36, 3) )
+
+/* 0 is reserved */
+#define P9_SRR1_MC_IFETCH_UE 1
+#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
+#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
+#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
+#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
+#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
+/* 7 is reserved */
+#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
+#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
+/* 10 ? */
+#define P9_SRR1_MC_IFETCH_RA 11
+#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
+#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
+#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
+#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
+
+/* DSISR bits for machine check (On Power9) */
+#define P9_DSISR_MC_UE (PPC_BIT(48))
+#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
+#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
+#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
+#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
+#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
+#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
+#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
+#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
+#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
+#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
+#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
+#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
+
+/* SLB error bits */
+#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
+ P9_DSISR_MC_SLB_PARITY_MFSLB | \
+ P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
+
enum MCE_Version {
MCE_V1 = 1,
};
@@ -93,6 +142,9 @@ enum MCE_ErrorType {
MCE_ERROR_TYPE_SLB = 2,
MCE_ERROR_TYPE_ERAT = 3,
MCE_ERROR_TYPE_TLB = 4,
+ MCE_ERROR_TYPE_USER = 5,
+ MCE_ERROR_TYPE_RA = 6,
+ MCE_ERROR_TYPE_LINK = 7,
};
enum MCE_UeErrorType {
@@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
MCE_TLB_ERROR_MULTIHIT = 2,
};
+enum MCE_UserErrorType {
+ MCE_USER_ERROR_INDETERMINATE = 0,
+ MCE_USER_ERROR_TLBIE = 1,
+};
+
+enum MCE_RaErrorType {
+ MCE_RA_ERROR_INDETERMINATE = 0,
+ MCE_RA_ERROR_IFETCH = 1,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
+ MCE_RA_ERROR_LOAD = 4,
+ MCE_RA_ERROR_STORE = 5,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
+ MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
+};
+
+enum MCE_LinkErrorType {
+ MCE_LINK_ERROR_INDETERMINATE = 0,
+ MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
+ MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
+ MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
+ MCE_LINK_ERROR_STORE_TIMEOUT = 4,
+ MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
+};
+
struct machine_check_event {
enum MCE_Version version:8; /* 0x00 */
uint8_t in_use; /* 0x01 */
@@ -166,6 +244,30 @@ struct machine_check_event {
uint64_t effective_address;
uint8_t reserved_2[16];
} tlb_error;
+
+ struct {
+ enum MCE_UserErrorType user_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } user_error;
+
+ struct {
+ enum MCE_RaErrorType ra_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } ra_error;
+
+ struct {
+ enum MCE_LinkErrorType link_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } link_error;
} u;
};
@@ -176,8 +278,12 @@ struct mce_error_info {
enum MCE_SlbErrorType slb_error_type:8;
enum MCE_EratErrorType erat_error_type:8;
enum MCE_TlbErrorType tlb_error_type:8;
+ enum MCE_UserErrorType user_error_type:8;
+ enum MCE_RaErrorType ra_error_type:8;
+ enum MCE_LinkErrorType link_error_type:8;
} u;
- uint8_t reserved[2];
+ enum MCE_Severity severity:8;
+ enum MCE_Initiator initiator:8;
};
#define MAX_MC_EVT 100
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index ba9921bf202e..5134ade2e850 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index d0db98793dd8..9f4de0a1035e 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -1,5 +1,8 @@
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+
+#include <asm-generic/5level-fixup.h>
+
/*
* Entries per page directory level. The PTE level must use a 64b record
* for each page table entry. The PMD and PGD level use a 32b record for
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
index 55b28ef3409a..1facb584dd29 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 0cd8a3852763..e5805ad78e12 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd)
return ((hpd_val(hpd) & 0x4) != 0);
#else
/* We clear the top bit to indicate hugepd */
- return ((hpd_val(hpd) & PD_HUGE) == 0);
+ return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
#endif
}
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index d99bd442aacb..e7d6d86563ee 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -284,6 +284,13 @@
#define PPC_INST_BRANCH_COND 0x40800000
#define PPC_INST_LBZCIX 0x7c0006aa
#define PPC_INST_STBCIX 0x7c0007aa
+#define PPC_INST_LWZX 0x7c00002e
+#define PPC_INST_LFSX 0x7c00042e
+#define PPC_INST_STFSX 0x7c00052e
+#define PPC_INST_LFDX 0x7c0004ae
+#define PPC_INST_STFDX 0x7c0005ae
+#define PPC_INST_LVX 0x7c0000ce
+#define PPC_INST_STVX 0x7c0001ce
/* macros to insert fields into opcodes */
#define ___PPC_RA(a) (((a) & 0x1f) << 16)
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 4a90634e8322..35c00d7a0cf8 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -160,12 +160,18 @@ struct of_drconf_cell {
#define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */
#define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */
#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */
-#define OV5_MMU_RADIX_300 0x1880 /* ISA v3.00 radix MMU supported */
-#define OV5_MMU_HASH_300 0x1840 /* ISA v3.00 hash MMU supported */
-#define OV5_MMU_SEGM_RADIX 0x1820 /* radix mode (no segmentation) */
-#define OV5_MMU_PROC_TBL 0x1810 /* hcall selects SLB or proc table */
-#define OV5_MMU_SLB 0x1800 /* always use SLB */
-#define OV5_MMU_GTSE 0x1808 /* Guest translation shootdown */
+/* MMU Base Architecture */
+#define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */
+#define OV5_MMU_HASH 0x1800 /* Hash MMU Only */
+#define OV5_MMU_RADIX 0x1840 /* Radix MMU Only */
+#define OV5_MMU_EITHER 0x1880 /* Hash or Radix Supported */
+#define OV5_MMU_DYNAMIC 0x18C0 /* Hash or Radix Can Switch Later */
+#define OV5_NMMU 0x1820 /* Nest MMU Available */
+/* Hash Table Extensions */
+#define OV5_HASH_SEG_TBL 0x1980 /* In Memory Segment Tables Available */
+#define OV5_HASH_GTSE 0x1940 /* Guest Translation Shoot Down Avail */
+/* Radix Table Extensions */
+#define OV5_RADIX_GTSE 0x1A40 /* Guest Translation Shoot Down Avail */
/* Option Vector 6: IBM PAPR hints */
#define OV6_LINUX 0x02 /* Linux is our OS */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 4b369d83fe9c..1c9470881c4a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
COMPAT_SYS_SPU(preadv2)
COMPAT_SYS_SPU(pwritev2)
SYSCALL(kexec_file_load)
+SYSCALL(statx)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index eb1acee91a20..9ba11dbcaca9 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 383
+#define NR_syscalls 384
#define __NR__exit __NR_exit
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 2f26335a3c42..b85f14228857 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -393,5 +393,6 @@
#define __NR_preadv2 380
#define __NR_pwritev2 381
#define __NR_kexec_file_load 382
+#define __NR_statx 383
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index bb7a1890aeb7..e79b9daa873c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
extern void __flush_tlb_power9(unsigned int action);
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
+extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.flush_tlb = __flush_tlb_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
{ /* Power9 */
@@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.flush_tlb = __flush_tlb_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
{ /* Cell Broadband Engine */
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 5f61cc0349c0..995728736677 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -276,19 +276,21 @@ power_enter_stop:
*/
andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
- bne 1f
+ bne .Lhandle_esl_ec_set
IDLE_STATE_ENTER_SEQ(PPC_STOP)
li r3,0 /* Since we didn't lose state, return 0 */
b pnv_wakeup_noloss
+
+.Lhandle_esl_ec_set:
/*
* Check if the requested state is a deep idle state.
*/
-1: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
+ LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
cmpd r3,r4
- bge 2f
+ bge .Lhandle_deep_stop
IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
-2:
+.Lhandle_deep_stop:
/*
* Entering deep idle state.
* Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index c6923ff45131..a1475e6aef3a 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
case MCE_ERROR_TYPE_TLB:
mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
break;
+ case MCE_ERROR_TYPE_USER:
+ mce->u.user_error.user_error_type = mce_err->u.user_error_type;
+ break;
+ case MCE_ERROR_TYPE_RA:
+ mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ mce->u.link_error.link_error_type = mce_err->u.link_error_type;
+ break;
case MCE_ERROR_TYPE_UNKNOWN:
default:
break;
@@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
mce->gpr3 = regs->gpr[3];
mce->in_use = 1;
- mce->initiator = MCE_INITIATOR_CPU;
/* Mark it recovered if we have handled it and MSR(RI=1). */
if (handled && (regs->msr & MSR_RI))
mce->disposition = MCE_DISPOSITION_RECOVERED;
else
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
- mce->severity = MCE_SEV_ERROR_SYNC;
+
+ mce->initiator = mce_err->initiator;
+ mce->severity = mce_err->severity;
/*
* Populate the mce error_type and type-specific error_type.
@@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
mce->u.erat_error.effective_address_provided = true;
mce->u.erat_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
+ mce->u.user_error.effective_address_provided = true;
+ mce->u.user_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
+ mce->u.ra_error.effective_address_provided = true;
+ mce->u.ra_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
+ mce->u.link_error.effective_address_provided = true;
+ mce->u.link_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
mce->u.ue_error.effective_address_provided = true;
mce->u.ue_error.effective_address = addr;
@@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
"Parity",
"Multihit",
};
+ static const char *mc_user_types[] = {
+ "Indeterminate",
+ "tlbie(l) invalid",
+ };
+ static const char *mc_ra_types[] = {
+ "Indeterminate",
+ "Instruction fetch (bad)",
+ "Page table walk ifetch (bad)",
+ "Page table walk ifetch (foreign)",
+ "Load (bad)",
+ "Store (bad)",
+ "Page table walk Load/Store (bad)",
+ "Page table walk Load/Store (foreign)",
+ "Load/Store (foreign)",
+ };
+ static const char *mc_link_types[] = {
+ "Indeterminate",
+ "Instruction fetch (timeout)",
+ "Page table walk ifetch (timeout)",
+ "Load (timeout)",
+ "Store (timeout)",
+ "Page table walk Load/Store (timeout)",
+ };
/* Print things out */
if (evt->version != MCE_V1) {
@@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
printk("%s Effective address: %016llx\n",
level, evt->u.tlb_error.effective_address);
break;
+ case MCE_ERROR_TYPE_USER:
+ subtype = evt->u.user_error.user_error_type <
+ ARRAY_SIZE(mc_user_types) ?
+ mc_user_types[evt->u.user_error.user_error_type]
+ : "Unknown";
+ printk("%s Error type: User [%s]\n", level, subtype);
+ if (evt->u.user_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.user_error.effective_address);
+ break;
+ case MCE_ERROR_TYPE_RA:
+ subtype = evt->u.ra_error.ra_error_type <
+ ARRAY_SIZE(mc_ra_types) ?
+ mc_ra_types[evt->u.ra_error.ra_error_type]
+ : "Unknown";
+ printk("%s Error type: Real address [%s]\n", level, subtype);
+ if (evt->u.ra_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.ra_error.effective_address);
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ subtype = evt->u.link_error.link_error_type <
+ ARRAY_SIZE(mc_link_types) ?
+ mc_link_types[evt->u.link_error.link_error_type]
+ : "Unknown";
+ printk("%s Error type: Link [%s]\n", level, subtype);
+ if (evt->u.link_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.link_error.effective_address);
+ break;
default:
case MCE_ERROR_TYPE_UNKNOWN:
printk("%s Error type: Unknown\n", level);
@@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
if (evt->u.tlb_error.effective_address_provided)
return evt->u.tlb_error.effective_address;
break;
+ case MCE_ERROR_TYPE_USER:
+ if (evt->u.user_error.effective_address_provided)
+ return evt->u.user_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_RA:
+ if (evt->u.ra_error.effective_address_provided)
+ return evt->u.ra_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ if (evt->u.link_error.effective_address_provided)
+ return evt->u.link_error.effective_address;
+ break;
default:
case MCE_ERROR_TYPE_UNKNOWN:
break;
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 7353991c4ece..763d6f58caa8 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
}
#endif
+static void flush_erat(void)
+{
+ asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
+}
+
+#define MCE_FLUSH_SLB 1
+#define MCE_FLUSH_TLB 2
+#define MCE_FLUSH_ERAT 3
+
+static int mce_flush(int what)
+{
+#ifdef CONFIG_PPC_STD_MMU_64
+ if (what == MCE_FLUSH_SLB) {
+ flush_and_reload_slb();
+ return 1;
+ }
+#endif
+ if (what == MCE_FLUSH_ERAT) {
+ flush_erat();
+ return 1;
+ }
+ if (what == MCE_FLUSH_TLB) {
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
+{
+ if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
+ dsisr &= ~slb;
+ if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
+ dsisr &= ~erat;
+ if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
+ dsisr &= ~tlb;
+ /* Any other errors we don't understand? */
+ if (dsisr)
+ return 0;
+ return 1;
+}
+
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
{
long handled = 1;
@@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
long handled = 1;
struct mce_error_info mce_error_info = { 0 };
+ mce_error_info.severity = MCE_SEV_ERROR_SYNC;
+ mce_error_info.initiator = MCE_INITIATOR_CPU;
+
srr1 = regs->msr;
nip = regs->nip;
@@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
long handled = 1;
struct mce_error_info mce_error_info = { 0 };
+ mce_error_info.severity = MCE_SEV_ERROR_SYNC;
+ mce_error_info.initiator = MCE_INITIATOR_CPU;
+
srr1 = regs->msr;
nip = regs->nip;
@@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
save_mce_event(regs, handled, &mce_error_info, nip, addr);
return handled;
}
+
+static int mce_handle_derror_p9(struct pt_regs *regs)
+{
+ uint64_t dsisr = regs->dsisr;
+
+ return mce_handle_flush_derrors(dsisr,
+ P9_DSISR_MC_SLB_PARITY_MFSLB |
+ P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
+
+ P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
+
+ P9_DSISR_MC_ERAT_MULTIHIT);
+}
+
+static int mce_handle_ierror_p9(struct pt_regs *regs)
+{
+ uint64_t srr1 = regs->msr;
+
+ switch (P9_SRR1_MC_IFETCH(srr1)) {
+ case P9_SRR1_MC_IFETCH_SLB_PARITY:
+ case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
+ return mce_flush(MCE_FLUSH_SLB);
+ case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ return mce_flush(MCE_FLUSH_TLB);
+ case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
+ return mce_flush(MCE_FLUSH_ERAT);
+ default:
+ return 0;
+ }
+}
+
+static void mce_get_derror_p9(struct pt_regs *regs,
+ struct mce_error_info *mce_err, uint64_t *addr)
+{
+ uint64_t dsisr = regs->dsisr;
+
+ mce_err->severity = MCE_SEV_ERROR_SYNC;
+ mce_err->initiator = MCE_INITIATOR_CPU;
+
+ if (dsisr & P9_DSISR_MC_USER_TLBIE)
+ *addr = regs->nip;
+ else
+ *addr = regs->dar;
+
+ if (dsisr & P9_DSISR_MC_UE) {
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
+ } else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+ } else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
+ } else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
+ } else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ } else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_TLB;
+ mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+ } else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
+ mce_err->error_type = MCE_ERROR_TYPE_USER;
+ mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
+ } else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+ } else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+ } else if (dsisr & P9_DSISR_MC_RA_LOAD) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
+ } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+ } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
+ } else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
+ }
+}
+
+static void mce_get_ierror_p9(struct pt_regs *regs,
+ struct mce_error_info *mce_err, uint64_t *addr)
+{
+ uint64_t srr1 = regs->msr;
+
+ switch (P9_SRR1_MC_IFETCH(srr1)) {
+ case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
+ case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
+ mce_err->severity = MCE_SEV_FATAL;
+ break;
+ default:
+ mce_err->severity = MCE_SEV_ERROR_SYNC;
+ break;
+ }
+
+ mce_err->initiator = MCE_INITIATOR_CPU;
+
+ *addr = regs->nip;
+
+ switch (P9_SRR1_MC_IFETCH(srr1)) {
+ case P9_SRR1_MC_IFETCH_UE:
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_SLB_PARITY:
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+ break;
+ case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+ break;
+ case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ break;
+ case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_TLB;
+ mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+ break;
+ case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
+ break;
+ case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
+ break;
+ case P9_SRR1_MC_IFETCH_RA:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
+ break;
+ case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
+ break;
+ case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
+ break;
+ default:
+ break;
+ }
+}
+
+long __machine_check_early_realmode_p9(struct pt_regs *regs)
+{
+ uint64_t nip, addr;
+ long handled;
+ struct mce_error_info mce_error_info = { 0 };
+
+ nip = regs->nip;
+
+ if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
+ handled = mce_handle_derror_p9(regs);
+ mce_get_derror_p9(regs, &mce_error_info, &addr);
+ } else {
+ handled = mce_handle_ierror_p9(regs);
+ mce_get_ierror_p9(regs, &mce_error_info, &addr);
+ }
+
+ /* Handle UE error. */
+ if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
+ handled = mce_handle_ue_error(regs);
+
+ save_mce_event(regs, handled, &mce_error_info, nip, addr);
+ return handled;
+}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index a3944540fe0d..1c1b44ec7642 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start;
static unsigned long __initdata prom_tce_alloc_end;
#endif
+static bool __initdata prom_radix_disable;
+
+struct platform_support {
+ bool hash_mmu;
+ bool radix_mmu;
+ bool radix_gtse;
+};
+
/* Platforms codes are now obsolete in the kernel. Now only used within this
* file and ultimately gone too. Feel free to change them if you need, they
* are not shared with anything outside of this file anymore
@@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void)
prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
#endif
}
+
+ opt = strstr(prom_cmd_line, "disable_radix");
+ if (opt) {
+ prom_debug("Radix disabled from cmdline\n");
+ prom_radix_disable = true;
+ }
}
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
@@ -695,6 +709,8 @@ struct option_vector5 {
u8 byte22;
u8 intarch;
u8 mmu;
+ u8 hash_ext;
+ u8 radix_ext;
} __packed;
struct option_vector6 {
@@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
.reserved3 = 0,
.subprocessors = 1,
.intarch = 0,
- .mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) |
- OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE),
+ .mmu = 0,
+ .hash_ext = 0,
+ .radix_ext = 0,
},
/* option vector 6: IBM PAPR hints */
@@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void)
}
+static void __init prom_parse_mmu_model(u8 val,
+ struct platform_support *support)
+{
+ switch (val) {
+ case OV5_FEAT(OV5_MMU_DYNAMIC):
+ case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
+ prom_debug("MMU - either supported\n");
+ support->radix_mmu = !prom_radix_disable;
+ support->hash_mmu = true;
+ break;
+ case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
+ prom_debug("MMU - radix only\n");
+ if (prom_radix_disable) {
+ /*
+ * If we __have__ to do radix, we're better off ignoring
+ * the command line rather than not booting.
+ */
+ prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
+ }
+ support->radix_mmu = true;
+ break;
+ case OV5_FEAT(OV5_MMU_HASH):
+ prom_debug("MMU - hash only\n");
+ support->hash_mmu = true;
+ break;
+ default:
+ prom_debug("Unknown mmu support option: 0x%x\n", val);
+ break;
+ }
+}
+
+static void __init prom_parse_platform_support(u8 index, u8 val,
+ struct platform_support *support)
+{
+ switch (index) {
+ case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
+ prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
+ break;
+ case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
+ if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
+ prom_debug("Radix - GTSE supported\n");
+ support->radix_gtse = true;
+ }
+ break;
+ }
+}
+
+static void __init prom_check_platform_support(void)
+{
+ struct platform_support supported = {
+ .hash_mmu = false,
+ .radix_mmu = false,
+ .radix_gtse = false
+ };
+ int prop_len = prom_getproplen(prom.chosen,
+ "ibm,arch-vec-5-platform-support");
+ if (prop_len > 1) {
+ int i;
+ u8 vec[prop_len];
+ prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
+ prop_len);
+ prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
+ &vec, sizeof(vec));
+ for (i = 0; i < prop_len; i += 2) {
+ prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
+ , vec[i]
+ , vec[i + 1]);
+ prom_parse_platform_support(vec[i], vec[i + 1],
+ &supported);
+ }
+ }
+
+ if (supported.radix_mmu && supported.radix_gtse) {
+ /* Radix preferred - but we require GTSE for now */
+ prom_debug("Asking for radix with GTSE\n");
+ ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
+ ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
+ } else if (supported.hash_mmu) {
+ /* Default to hash mmu (if we can) */
+ prom_debug("Asking for hash\n");
+ ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
+ } else {
+ /* We're probably on a legacy hypervisor */
+ prom_debug("Assuming legacy hash support\n");
+ }
+}
static void __init prom_send_capabilities(void)
{
@@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void)
prom_arg_t ret;
u32 cores;
+ /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
+ prom_check_platform_support();
+
root = call_prom("open", 1, 1, ADDR("/"));
if (root != 0) {
/* We need to tell the FW about the number of cores we support.
@@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
prom_check_initrd(r3, r4);
+ /*
+ * Do early parsing of command line
+ */
+ early_cmdline_parse();
+
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
* On pSeries, inform the firmware about our capabilities
@@ -3009,11 +3120,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
copy_and_flush(0, kbase, 0x100, 0);
/*
- * Do early parsing of command line
- */
- early_cmdline_parse();
-
- /*
* Initialize memory management within prom_init
*/
prom_init_mem();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index adf2084f214b..9cfaa8b69b5f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
info->line_size = lsize;
info->block_size = bsize;
info->log_block_size = __ilog2(bsize);
- info->blocks_per_page = PAGE_SIZE / bsize;
+ if (bsize)
+ info->blocks_per_page = PAGE_SIZE / bsize;
+ else
+ info->blocks_per_page = 0;
if (sets == 0)
info->assoc = 0xffff;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3158fb16de3..8c68145ba1bd 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hva, NULL, NULL);
if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1);
- if (pte_write(pte))
+ if (__pte_write(pte))
write_ok = 1;
}
local_irq_restore(flags);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6fca970373ee..ce6f2121fffe 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
}
pte = kvmppc_read_update_linux_pte(ptep, writing);
if (pte_present(pte) && !pte_protnone(pte)) {
- if (writing && !pte_write(pte))
+ if (writing && !__pte_write(pte))
/* make the actual HPTE be read-only */
ptel = hpte_make_readonly(ptel);
is_ci = pte_ci(pte);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 0e649d72fe8d..2b5e09020cfe 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -20,6 +20,7 @@ obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
obj64-$(CONFIG_SMP) += locks.o
obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
+obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o
obj-y += checksum_$(BITS).o checksum_wrappers.o
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 846dba2c6360..9c542ec70c5b 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto instr_done;
case LARX:
- if (regs->msr & MSR_LE)
- return 0;
if (op.ea & (size - 1))
break; /* can't handle misaligned */
if (!address_ok(regs, op.ea, size))
@@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto ldst_done;
case STCX:
- if (regs->msr & MSR_LE)
- return 0;
if (op.ea & (size - 1))
break; /* can't handle misaligned */
if (!address_ok(regs, op.ea, size))
@@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto ldst_done;
case LOAD:
- if (regs->msr & MSR_LE)
- return 0;
err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
if (!err) {
if (op.type & SIGNEXT)
@@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#ifdef CONFIG_PPC_FPU
case LOAD_FP:
- if (regs->msr & MSR_LE)
- return 0;
if (size == 4)
err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
else
@@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#endif
#ifdef CONFIG_ALTIVEC
case LOAD_VMX:
- if (regs->msr & MSR_LE)
- return 0;
err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
goto ldst_done;
#endif
#ifdef CONFIG_VSX
case LOAD_VSX:
- if (regs->msr & MSR_LE)
- return 0;
err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
goto ldst_done;
#endif
@@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto instr_done;
case STORE:
- if (regs->msr & MSR_LE)
- return 0;
if ((op.type & UPDATE) && size == sizeof(long) &&
op.reg == 1 && op.update_reg == 1 &&
!(regs->msr & MSR_PR) &&
@@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#ifdef CONFIG_PPC_FPU
case STORE_FP:
- if (regs->msr & MSR_LE)
- return 0;
if (size == 4)
err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
else
@@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#endif
#ifdef CONFIG_ALTIVEC
case STORE_VMX:
- if (regs->msr & MSR_LE)
- return 0;
err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
goto ldst_done;
#endif
#ifdef CONFIG_VSX
case STORE_VSX:
- if (regs->msr & MSR_LE)
- return 0;
err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
goto ldst_done;
#endif
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
new file mode 100644
index 000000000000..2534c1447554
--- /dev/null
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -0,0 +1,434 @@
+/*
+ * Simple sanity test for emulate_step load/store instructions.
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "emulate_step_test: " fmt
+
+#include <linux/ptrace.h>
+#include <asm/sstep.h>
+#include <asm/ppc-opcode.h>
+
+#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
+
+/*
+ * Defined with TEST_ prefix so it does not conflict with other
+ * definitions.
+ */
+#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \
+ ___PPC_RA(base) | IMM_L(i))
+#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \
+ ___PPC_RA(base) | IMM_L(i))
+#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \
+ ___PPC_RA(base) | ((i) & 0xfffc))
+#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b) | \
+ __PPC_EH(eh))
+#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
+#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
+
+
+static void __init init_pt_regs(struct pt_regs *regs)
+{
+ static unsigned long msr;
+ static bool msr_cached;
+
+ memset(regs, 0, sizeof(struct pt_regs));
+
+ if (likely(msr_cached)) {
+ regs->msr = msr;
+ return;
+ }
+
+ asm volatile("mfmsr %0" : "=r"(regs->msr));
+
+ regs->msr |= MSR_FP;
+ regs->msr |= MSR_VEC;
+ regs->msr |= MSR_VSX;
+
+ msr = regs->msr;
+ msr_cached = true;
+}
+
+static void __init show_result(char *ins, char *result)
+{
+ pr_info("%-14s : %s\n", ins, result);
+}
+
+static void __init test_ld(void)
+{
+ struct pt_regs regs;
+ unsigned long a = 0x23;
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long) &a;
+
+ /* ld r5, 0(r3) */
+ stepped = emulate_step(&regs, TEST_LD(5, 3, 0));
+
+ if (stepped == 1 && regs.gpr[5] == a)
+ show_result("ld", "PASS");
+ else
+ show_result("ld", "FAIL");
+}
+
+static void __init test_lwz(void)
+{
+ struct pt_regs regs;
+ unsigned int a = 0x4545;
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long) &a;
+
+ /* lwz r5, 0(r3) */
+ stepped = emulate_step(&regs, TEST_LWZ(5, 3, 0));
+
+ if (stepped == 1 && regs.gpr[5] == a)
+ show_result("lwz", "PASS");
+ else
+ show_result("lwz", "FAIL");
+}
+
+static void __init test_lwzx(void)
+{
+ struct pt_regs regs;
+ unsigned int a[3] = {0x0, 0x0, 0x1234};
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long) a;
+ regs.gpr[4] = 8;
+ regs.gpr[5] = 0x8765;
+
+ /* lwzx r5, r3, r4 */
+ stepped = emulate_step(&regs, TEST_LWZX(5, 3, 4));
+ if (stepped == 1 && regs.gpr[5] == a[2])
+ show_result("lwzx", "PASS");
+ else
+ show_result("lwzx", "FAIL");
+}
+
+static void __init test_std(void)
+{
+ struct pt_regs regs;
+ unsigned long a = 0x1234;
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long) &a;
+ regs.gpr[5] = 0x5678;
+
+ /* std r5, 0(r3) */
+ stepped = emulate_step(&regs, TEST_STD(5, 3, 0));
+ if (stepped == 1 || regs.gpr[5] == a)
+ show_result("std", "PASS");
+ else
+ show_result("std", "FAIL");
+}
+
+static void __init test_ldarx_stdcx(void)
+{
+ struct pt_regs regs;
+ unsigned long a = 0x1234;
+ int stepped = -1;
+ unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */
+
+ init_pt_regs(&regs);
+ asm volatile("mfcr %0" : "=r"(regs.ccr));
+
+
+ /*** ldarx ***/
+
+ regs.gpr[3] = (unsigned long) &a;
+ regs.gpr[4] = 0;
+ regs.gpr[5] = 0x5678;
+
+ /* ldarx r5, r3, r4, 0 */
+ stepped = emulate_step(&regs, TEST_LDARX(5, 3, 4, 0));
+
+ /*
+ * Don't touch 'a' here. Touching 'a' can do Load/store
+ * of 'a' which result in failure of subsequent stdcx.
+ * Instead, use hardcoded value for comparison.
+ */
+ if (stepped <= 0 || regs.gpr[5] != 0x1234) {
+ show_result("ldarx / stdcx.", "FAIL (ldarx)");
+ return;
+ }
+
+
+ /*** stdcx. ***/
+
+ regs.gpr[5] = 0x9ABC;
+
+ /* stdcx. r5, r3, r4 */
+ stepped = emulate_step(&regs, TEST_STDCX(5, 3, 4));
+
+ /*
+ * Two possible scenarios that indicates successful emulation
+ * of stdcx. :
+ * 1. Reservation is active and store is performed. In this
+ * case cr0.eq bit will be set to 1.
+ * 2. Reservation is not active and store is not performed.
+ * In this case cr0.eq bit will be set to 0.
+ */
+ if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq))
+ || (regs.gpr[5] != a && !(regs.ccr & cr0_eq))))
+ show_result("ldarx / stdcx.", "PASS");
+ else
+ show_result("ldarx / stdcx.", "FAIL (stdcx.)");
+}
+
+#ifdef CONFIG_PPC_FPU
+static void __init test_lfsx_stfsx(void)
+{
+ struct pt_regs regs;
+ union {
+ float a;
+ int b;
+ } c;
+ int cached_b;
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+
+
+ /*** lfsx ***/
+
+ c.a = 123.45;
+ cached_b = c.b;
+
+ regs.gpr[3] = (unsigned long) &c.a;
+ regs.gpr[4] = 0;
+
+ /* lfsx frt10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_LFSX(10, 3, 4));
+
+ if (stepped == 1)
+ show_result("lfsx", "PASS");
+ else
+ show_result("lfsx", "FAIL");
+
+
+ /*** stfsx ***/
+
+ c.a = 678.91;
+
+ /* stfsx frs10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_STFSX(10, 3, 4));
+
+ if (stepped == 1 && c.b == cached_b)
+ show_result("stfsx", "PASS");
+ else
+ show_result("stfsx", "FAIL");
+}
+
+static void __init test_lfdx_stfdx(void)
+{
+ struct pt_regs regs;
+ union {
+ double a;
+ long b;
+ } c;
+ long cached_b;
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+
+
+ /*** lfdx ***/
+
+ c.a = 123456.78;
+ cached_b = c.b;
+
+ regs.gpr[3] = (unsigned long) &c.a;
+ regs.gpr[4] = 0;
+
+ /* lfdx frt10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_LFDX(10, 3, 4));
+
+ if (stepped == 1)
+ show_result("lfdx", "PASS");
+ else
+ show_result("lfdx", "FAIL");
+
+
+ /*** stfdx ***/
+
+ c.a = 987654.32;
+
+ /* stfdx frs10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_STFDX(10, 3, 4));
+
+ if (stepped == 1 && c.b == cached_b)
+ show_result("stfdx", "PASS");
+ else
+ show_result("stfdx", "FAIL");
+}
+#else
+static void __init test_lfsx_stfsx(void)
+{
+ show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)");
+ show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+
+static void __init test_lfdx_stfdx(void)
+{
+ show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)");
+ show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+#endif /* CONFIG_PPC_FPU */
+
+#ifdef CONFIG_ALTIVEC
+static void __init test_lvx_stvx(void)
+{
+ struct pt_regs regs;
+ union {
+ vector128 a;
+ u32 b[4];
+ } c;
+ u32 cached_b[4];
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+
+
+ /*** lvx ***/
+
+ cached_b[0] = c.b[0] = 923745;
+ cached_b[1] = c.b[1] = 2139478;
+ cached_b[2] = c.b[2] = 9012;
+ cached_b[3] = c.b[3] = 982134;
+
+ regs.gpr[3] = (unsigned long) &c.a;
+ regs.gpr[4] = 0;
+
+ /* lvx vrt10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_LVX(10, 3, 4));
+
+ if (stepped == 1)
+ show_result("lvx", "PASS");
+ else
+ show_result("lvx", "FAIL");
+
+
+ /*** stvx ***/
+
+ c.b[0] = 4987513;
+ c.b[1] = 84313948;
+ c.b[2] = 71;
+ c.b[3] = 498532;
+
+ /* stvx vrs10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_STVX(10, 3, 4));
+
+ if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
+ cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
+ show_result("stvx", "PASS");
+ else
+ show_result("stvx", "FAIL");
+}
+#else
+static void __init test_lvx_stvx(void)
+{
+ show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)");
+ show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)");
+}
+#endif /* CONFIG_ALTIVEC */
+
+#ifdef CONFIG_VSX
+static void __init test_lxvd2x_stxvd2x(void)
+{
+ struct pt_regs regs;
+ union {
+ vector128 a;
+ u32 b[4];
+ } c;
+ u32 cached_b[4];
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+
+
+ /*** lxvd2x ***/
+
+ cached_b[0] = c.b[0] = 18233;
+ cached_b[1] = c.b[1] = 34863571;
+ cached_b[2] = c.b[2] = 834;
+ cached_b[3] = c.b[3] = 6138911;
+
+ regs.gpr[3] = (unsigned long) &c.a;
+ regs.gpr[4] = 0;
+
+ /* lxvd2x vsr39, r3, r4 */
+ stepped = emulate_step(&regs, TEST_LXVD2X(39, 3, 4));
+
+ if (stepped == 1)
+ show_result("lxvd2x", "PASS");
+ else
+ show_result("lxvd2x", "FAIL");
+
+
+ /*** stxvd2x ***/
+
+ c.b[0] = 21379463;
+ c.b[1] = 87;
+ c.b[2] = 374234;
+ c.b[3] = 4;
+
+ /* stxvd2x vsr39, r3, r4 */
+ stepped = emulate_step(&regs, TEST_STXVD2X(39, 3, 4));
+
+ if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
+ cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
+ show_result("stxvd2x", "PASS");
+ else
+ show_result("stxvd2x", "FAIL");
+}
+#else
+static void __init test_lxvd2x_stxvd2x(void)
+{
+ show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)");
+ show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)");
+}
+#endif /* CONFIG_VSX */
+
+static int __init test_emulate_step(void)
+{
+ test_ld();
+ test_lwz();
+ test_lwzx();
+ test_std();
+ test_ldarx_stdcx();
+ test_lfsx_stfsx();
+ test_lfdx_stfdx();
+ test_lvx_stvx();
+ test_lxvd2x_stxvd2x();
+
+ return 0;
+}
+late_initcall(test_emulate_step);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 6aa3b76aa0d6..9be992083d2a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -356,18 +356,42 @@ static void early_check_vec5(void)
unsigned long root, chosen;
int size;
const u8 *vec5;
+ u8 mmu_supported;
root = of_get_flat_dt_root();
chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
- if (chosen == -FDT_ERR_NOTFOUND)
+ if (chosen == -FDT_ERR_NOTFOUND) {
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
return;
+ }
vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
- if (!vec5)
+ if (!vec5) {
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
return;
- if (size <= OV5_INDX(OV5_MMU_RADIX_300) ||
- !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300)))
- /* Hypervisor doesn't support radix */
+ }
+ if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ return;
+ }
+
+ /* Check for supported configuration */
+ mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
+ OV5_FEAT(OV5_MMU_SUPPORT);
+ if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
+ /* Hypervisor only supports radix - check enabled && GTSE */
+ if (!early_radix_enabled()) {
+ pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
+ }
+ if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
+ OV5_FEAT(OV5_RADIX_GTSE))) {
+ pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
+ }
+ /* Do radix anyway - the hypervisor said we had to */
+ cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
+ } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
+ /* Hypervisor only supports hash - disable radix */
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ }
}
void __init mmu_early_init_devtree(void)
@@ -383,7 +407,7 @@ void __init mmu_early_init_devtree(void)
* even though the ibm,architecture-vec-5 property created by
* skiboot doesn't have the necessary bits set.
*/
- if (early_radix_enabled() && !(mfmsr() & MSR_HV))
+ if (!(mfmsr() & MSR_HV))
early_check_vec5();
if (early_radix_enabled())
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 2a590a98e652..c28165d8970b 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void)
*/
register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
+ "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
}
static void __init radix_init_partition_table(void)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 595dd718ea87..2ff13249f87a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
sdsync = POWER7P_MMCRA_SDAR_VALID;
else if (ppmu->flags & PPMU_ALT_SIPR)
sdsync = POWER6_MMCRA_SDSYNC;
+ else if (ppmu->flags & PPMU_NO_SIAR)
+ sdsync = MMCRA_SAMPLE_ENABLE;
else
sdsync = MMCRA_SDSYNC;
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index e79fb5fb817d..cd951fd231c4 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
return !(event & ~valid_mask);
}
-static u64 mmcra_sdar_mode(u64 event)
+static inline bool is_event_marked(u64 event)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
- return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+ if (event & EVENT_IS_MARKED)
+ return true;
+
+ return false;
+}
- return MMCRA_SDAR_MODE_TLB;
+static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
+{
+ /*
+ * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
+ * continous sampling mode.
+ *
+ * Incase of Power8:
+ * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
+ * mode and will be un-changed when setting MMCRA[63] (Marked events).
+ *
+ * Incase of Power9:
+ * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
+ * or if group already have any marked events.
+ * Non-Marked events (for DD1):
+ * MMCRA[SDAR_MODE] will be set to 0b01
+ * For rest
+ * MMCRA[SDAR_MODE] will be set from event code.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
+ *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
+ else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+ *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+ else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ *mmcra |= MMCRA_SDAR_MODE_TLB;
+ } else
+ *mmcra |= MMCRA_SDAR_MODE_TLB;
}
static u64 thresh_cmp_val(u64 value)
@@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
value |= CNST_L1_QUAL_VAL(cache);
}
- if (event & EVENT_IS_MARKED) {
+ if (is_event_marked(event)) {
mask |= CNST_SAMPLE_MASK;
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
}
@@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
}
/* In continuous sampling mode, update SDAR on TLB miss */
- mmcra |= mmcra_sdar_mode(event[i]);
+ mmcra_sdar_mode(event[i], &mmcra);
if (event[i] & EVENT_IS_L1) {
cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
@@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
}
- if (event[i] & EVENT_IS_MARKED) {
+ if (is_event_marked(event[i])) {
mmcra |= MMCRA_SAMPLE_ENABLE;
val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index cf9bd8990159..899210f14ee4 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -246,6 +246,7 @@
#define MMCRA_THR_CMP_SHIFT 32
#define MMCRA_SDAR_MODE_SHIFT 42
#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
+#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
#define MMCRA_IFM_SHIFT 30
/* MMCR1 Threshold Compare bit constant for power9 */
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 6693f75e93d1..da8a0f7a035c 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -39,8 +39,8 @@ opal_tracepoint_refcount:
BEGIN_FTR_SECTION; \
b 1f; \
END_FTR_SECTION(0, 1); \
- ld r12,opal_tracepoint_refcount@toc(r2); \
- cmpdi r12,0; \
+ ld r11,opal_tracepoint_refcount@toc(r2); \
+ cmpdi r11,0; \
bne- LABEL; \
1:
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 86d9fde93c17..e0f856bfbfe8 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
struct machine_check_event *evt)
{
int recovered = 0;
- uint64_t ea = get_mce_fault_addr(evt);
if (!(regs->msr & MSR_RI)) {
/* If MSR_RI isn't set, we cannot recover */
@@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
/* Platform corrected itself */
recovered = 1;
- } else if (ea && !is_kernel_addr(ea)) {
+ } else if (evt->severity == MCE_SEV_FATAL) {
+ /* Fatal machine check */
+ pr_err("Machine check interrupt is fatal\n");
+ recovered = 0;
+ } else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
+ (user_mode(regs) && !is_global_init(current))) {
/*
- * Faulting address is not in kernel text. We should be fine.
- * We need to find which process uses this address.
* For now, kill the task if we have received exception when
* in userspace.
*
* TODO: Queue up this address for hwpoisioning later.
*/
- if (user_mode(regs) && !is_global_init(current)) {
- _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
- recovered = 1;
- } else
- recovered = 0;
- } else if (user_mode(regs) && !is_global_init(current) &&
- evt->severity == MCE_SEV_ERROR_SYNC) {
- /*
- * If we have received a synchronous error when in userspace
- * kill the task.
- */
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
recovered = 1;
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6901a06da2f9..e36738291c32 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
}
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
- struct pci_bus *bus)
+ struct pci_bus *bus,
+ bool add_to_group)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
set_dma_offset(&dev->dev, pe->tce_bypass_base);
- iommu_add_device(&dev->dev);
+ if (add_to_group)
+ iommu_add_device(&dev->dev);
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
- pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+ pnv_ioda_setup_bus_dma(pe, dev->subordinate,
+ add_to_group);
}
}
@@ -2191,7 +2194,7 @@ found:
set_iommu_table_base(&pe->pdev->dev, tbl);
iommu_add_device(&pe->pdev->dev);
} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
return;
fail:
@@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
pnv_pci_ioda2_set_bypass(pe, false);
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
+ if (pe->pbus)
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
pnv_ioda2_table_free(tbl);
}
@@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
table_group);
pnv_pci_ioda2_setup_default_config(pe);
+ if (pe->pbus)
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
}
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
@@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
level_shift = entries_shift + 3;
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
+ if ((level_shift - 3) * levels + page_shift >= 60)
+ return -EINVAL;
+
/* Allocate TCE table */
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
levels, tce_table_size, &offset, &total_allocated);
@@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
if (pe->flags & PNV_IODA_PE_DEV)
iommu_add_device(&pe->pdev->dev);
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
}
#ifdef CONFIG_PCI_MSI
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 251060cf1713..8b1fe895daa3 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
- mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
+
+ if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
+ mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
}
void radix_init_pseries(void)
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S
index f9760ccf4032..3696ea6c4826 100644
--- a/arch/powerpc/purgatory/trampoline.S
+++ b/arch/powerpc/purgatory/trampoline.S
@@ -116,13 +116,13 @@ dt_offset:
.data
.balign 8
-.globl sha256_digest
-sha256_digest:
+.globl purgatory_sha256_digest
+purgatory_sha256_digest:
.skip 32
- .size sha256_digest, . - sha256_digest
+ .size purgatory_sha256_digest, . - purgatory_sha256_digest
.balign 8
-.globl sha_regions
-sha_regions:
+.globl purgatory_sha_regions
+purgatory_sha_regions:
.skip 8 * 2 * 16
- .size sha_regions, . - sha_regions
+ .size purgatory_sha_regions, . - purgatory_sha_regions
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ada29eaed6e2..f523ac883150 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -274,7 +274,9 @@ failed:
if (bank->disk->major > 0)
unregister_blkdev(bank->disk->major,
bank->disk->disk_name);
- del_gendisk(bank->disk);
+ if (bank->disk->flags & GENHD_FL_UP)
+ del_gendisk(bank->disk);
+ put_disk(bank->disk);
}
device->dev.platform_data = NULL;
if (bank->io_addr != 0)
@@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
device_remove_file(&device->dev, &dev_attr_ecc);
free_irq(bank->irq_id, device);
del_gendisk(bank->disk);
+ put_disk(bank->disk);
iounmap((void __iomem *) bank->io_addr);
kfree(bank);
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index f9670eabfcfa..b53f80f0b4d8 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void)
static void icp_opal_set_cpu_priority(unsigned char cppr)
{
+ /*
+ * Here be dragons. The caller has asked to allow only IPI's and not
+ * external interrupts. But OPAL XIVE doesn't support that. So instead
+ * of allowing no interrupts allow all. That's still not right, but
+ * currently the only caller who does this is xics_migrate_irqs_away()
+ * and it works in that case.
+ */
+ if (cppr >= DEFAULT_PRIORITY)
+ cppr = LOWEST_PRIORITY;
+
xics_set_base_cppr(cppr);
opal_int_set_cppr(cppr);
iosync();
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 69d858e51ac7..23efe4e42172 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/delay.h>
#include <asm/prom.h>
#include <asm/io.h>
@@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void)
/* Remove ourselves from the global interrupt queue */
xics_set_cpu_giq(xics_default_distrib_server, 0);
- /* Allow IPIs again... */
- icp_ops->set_priority(DEFAULT_PRIORITY);
-
for_each_irq_desc(virq, desc) {
struct irq_chip *chip;
long server;
@@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void)
unlock:
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
+
+ /* Allow "sufficient" time to drop any inflight IRQ's */
+ mdelay(5);
+
+ /*
+ * Allow IPIs again. This is done at the very end, after migrating all
+ * interrupts, the expectation is that we'll only get woken up by an IPI
+ * interrupt beyond this point, but leave externals masked just to be
+ * safe. If we're using icp-opal this may actually allow all
+ * interrupts anyway, but that should be OK.
+ */
+ icp_ops->set_priority(DEFAULT_PRIORITY);
+
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 143b1e00b818..4b176fe83da4 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -609,7 +609,7 @@ CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index f05d2d6e1087..0de46cc397f6 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -560,7 +560,7 @@ CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 2358bf33c5ef..e167557b434c 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -558,7 +558,7 @@ CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index d69ea495c4d7..716b17238599 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -474,8 +474,11 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (k < n) {
- if (__ctr_paes_set_key(ctx) != 0)
+ if (__ctr_paes_set_key(ctx) != 0) {
+ if (locked)
+ spin_unlock(&ctrblk_lock);
return blkcipher_walk_done(desc, walk, -EIO);
+ }
}
}
if (locked)
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 68bfd09f1b02..97189dbaf34b 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -179,7 +179,7 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index d1c407ddf703..9072bf63a846 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -8,31 +8,27 @@
#define _S390_CPUTIME_H
#include <linux/types.h>
-#include <asm/div64.h>
+#include <asm/timex.h>
#define CPUTIME_PER_USEC 4096ULL
#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
-typedef unsigned long long __nocast cputime_t;
-typedef unsigned long long __nocast cputime64_t;
-
#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
-static inline unsigned long __div(unsigned long long n, unsigned long base)
-{
- return n / base;
-}
-
/*
- * Convert cputime to microseconds and back.
+ * Convert cputime to microseconds.
*/
-static inline unsigned int cputime_to_usecs(const cputime_t cputime)
+static inline u64 cputime_to_usecs(const u64 cputime)
{
- return (__force unsigned long long) cputime >> 12;
+ return cputime >> 12;
}
+/*
+ * Convert cputime to nanoseconds.
+ */
+#define cputime_to_nsecs(cputime) tod_to_ns(cputime)
u64 arch_cpu_idle_time(int cpu);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 7ed1972b1920..93e37b12e882 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -24,6 +24,7 @@
* the S390 page table tree.
*/
#ifndef __ASSEMBLY__
+#include <asm-generic/5level-fixup.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 354344dcc198..118535123f34 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -206,20 +206,16 @@ static inline unsigned long long get_tod_clock_monotonic(void)
* ns = (todval * 125) >> 9;
*
* In order to avoid an overflow with the multiplication we can rewrite this.
- * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
+ * With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits)
* we end up with
*
- * ns = ((2^32 * th + tl) * 125 ) >> 9;
- * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
+ * ns = ((2^9 * th + tl) * 125 ) >> 9;
+ * -> ns = (th * 125) + ((tl * 125) >> 9);
*
*/
static inline unsigned long long tod_to_ns(unsigned long long todval)
{
- unsigned long long ns;
-
- ns = ((todval >> 32) << 23) * 125;
- ns += ((todval & 0xffffffff) * 125) >> 9;
- return ns;
+ return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
}
#endif
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 4384bc797a54..152de9b796e1 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -313,7 +313,9 @@
#define __NR_copy_file_range 375
#define __NR_preadv2 376
#define __NR_pwritev2 377
-#define NR_syscalls 378
+/* Number 378 is reserved for guarded storage */
+#define __NR_statx 379
+#define NR_syscalls 380
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index ae2cda5eee5a..e89cc2e71db1 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -178,3 +178,4 @@ COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
+COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index dff2152350a7..6a7d737d514c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -490,7 +490,7 @@ ENTRY(pgm_check_handler)
jnz .Lpgm_svcper # -> single stepped svc
1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 3f
+ j 4f
2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
lg %r15,__LC_KERNEL_STACK
lgr %r14,%r12
@@ -499,8 +499,8 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 3f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-3: la %r11,STACK_FRAME_OVERHEAD(%r15)
- stg %r10,__THREAD_last_break(%r14)
+3: stg %r10,__THREAD_last_break(%r14)
+4: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
@@ -509,14 +509,14 @@ ENTRY(pgm_check_handler)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception
- jz 4f
+ jz 5f
tmhh %r8,0x0001 # kernel per event ?
jz .Lpgm_kprobe
oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-4: REENABLE_IRQS
+5: REENABLE_IRQS
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index b67dafb7b7cf..e545ffe5155a 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -564,6 +564,8 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
+ if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
+ diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
diag308(DIAG308_LOAD_CLEAR, NULL);
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 20cd339e11ae..f29e41c5e2ec 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -124,7 +124,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
/* Initialize per thread user and system timer values */
p->thread.user_timer = 0;
+ p->thread.guest_timer = 0;
p->thread.system_timer = 0;
+ p->thread.hardirq_timer = 0;
+ p->thread.softirq_timer = 0;
frame->sf.back_chain = 0;
/* new return point is ret_from_fork */
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9b59e6212d8f..2659b5cfeddb 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -386,3 +386,5 @@ SYSCALL(sys_mlock2,compat_sys_mlock2)
SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
SYSCALL(sys_preadv2,compat_sys_preadv2)
SYSCALL(sys_pwritev2,compat_sys_pwritev2)
+NI_SYSCALL
+SYSCALL(sys_statx,compat_sys_statx)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c14fc9029912..072d84ba42a3 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -111,7 +111,7 @@ static inline u64 scale_vtime(u64 vtime)
}
static void account_system_index_scaled(struct task_struct *p,
- cputime_t cputime, cputime_t scaled,
+ u64 cputime, u64 scaled,
enum cpu_usage_stat index)
{
p->stimescaled += cputime_to_nsecs(scaled);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b48dc5f1900b..463e5ef02304 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -608,12 +608,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
{
spinlock_t *ptl;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
pgste_t pgste;
pte_t *ptep;
pte_t pte;
bool dirty;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pgd = pgd_offset(mm, addr);
+ pud = pud_alloc(mm, pgd, addr);
+ if (!pud)
+ return false;
+ pmd = pmd_alloc(mm, pud, addr);
+ if (!pmd)
+ return false;
+ /* We can't run guests backed by huge pages, but userspace can
+ * still set them up and then try to migrate them without any
+ * migration support.
+ */
+ if (pmd_large(*pmd))
+ return true;
+
+ ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (unlikely(!ptep))
return false;
diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h
index 0553e5cd5985..46ff8fd678a7 100644
--- a/arch/score/include/asm/pgtable.h
+++ b/arch/score/include/asm/pgtable.h
@@ -2,6 +2,7 @@
#define _ASM_SCORE_PGTABLE_H
#include <linux/const.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/fixmap.h>
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c
index e359ec675869..12daf45369b4 100644
--- a/arch/score/kernel/traps.c
+++ b/arch/score/kernel/traps.c
@@ -24,6 +24,7 @@
*/
#include <linux/extable.h>
+#include <linux/ptrace.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
diff --git a/arch/score/mm/extable.c b/arch/score/mm/extable.c
index ec871355fc2d..6736a3ad6286 100644
--- a/arch/score/mm/extable.c
+++ b/arch/score/mm/extable.c
@@ -24,6 +24,8 @@
*/
#include <linux/extable.h>
+#include <linux/ptrace.h>
+#include <asm/extable.h>
int fixup_exception(struct pt_regs *regs)
{
diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c
index 340fd40b381d..9c292c27e0d7 100644
--- a/arch/sh/boards/mach-cayman/setup.c
+++ b/arch/sh/boards/mach-cayman/setup.c
@@ -128,7 +128,6 @@ static int __init smsc_superio_setup(void)
SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
-#ifdef CONFIG_IDE
/*
* Only IDE1 exists on the Cayman
*/
@@ -158,7 +157,6 @@ static int __init smsc_superio_setup(void)
SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
-#endif
/* Exit the configuration state */
outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h
index 19bd89db17e7..f75cf4387257 100644
--- a/arch/sh/include/asm/pgtable-2level.h
+++ b/arch/sh/include/asm/pgtable-2level.h
@@ -1,6 +1,7 @@
#ifndef __ASM_SH_PGTABLE_2LEVEL_H
#define __ASM_SH_PGTABLE_2LEVEL_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/*
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 249a985d9648..9b1e776eca31 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -1,6 +1,7 @@
#ifndef __ASM_SH_PGTABLE_3LEVEL_H
#define __ASM_SH_PGTABLE_3LEVEL_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
/*
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 56e49c8f770d..8a598528ec1f 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -12,6 +12,7 @@
* the SpitFire page tables.
*/
+#include <asm-generic/5level-fixup.h>
#include <linux/compiler.h>
#include <linux/const.h>
#include <asm/types.h>
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index d26a42279036..5f8c615cb5e9 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -74,6 +74,7 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
#define MAXMEM (_VMALLOC_START - PAGE_OFFSET)
/* We have no pmd or pud since we are strictly a two-level page table */
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
static inline int pud_huge_page(pud_t pud) { return 0; }
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index e96cec52f6d8..96fe58b45118 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -59,6 +59,7 @@
#ifndef __ASSEMBLY__
/* We have no pud since we are a three-level page table. */
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
/*
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h
index cfbe59752469..179c0ea87a0c 100644
--- a/arch/um/include/asm/pgtable-2level.h
+++ b/arch/um/include/asm/pgtable-2level.h
@@ -8,6 +8,7 @@
#ifndef __UM_PGTABLE_2LEVEL_H
#define __UM_PGTABLE_2LEVEL_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/* PGDIR_SHIFT determines what a third-level page table entry can map */
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
index bae8523a162f..c4d876dfb9ac 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -7,6 +7,7 @@
#ifndef __UM_PGTABLE_3LEVEL_H
#define __UM_PGTABLE_3LEVEL_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
/* PGDIR_SHIFT determines what a third-level page table entry can map */
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
index 818d0f5598e3..a4f2bef37e70 100644
--- a/arch/unicore32/include/asm/pgtable.h
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -12,6 +12,7 @@
#ifndef __UNICORE_PGTABLE_H__
#define __UNICORE_PGTABLE_H__
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/cpu-single.h>
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 7ef4a099defc..6205d3b81e6d 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -176,6 +176,7 @@ CONFIG_E1000E=y
CONFIG_SKY2=y
CONFIG_FORCEDETH=y
CONFIG_8139TOO=y
+CONFIG_R8169=y
CONFIG_FDDI=y
CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index afb222b63cae..c84584bb9402 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -604,7 +604,7 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
return &amd_f15_PMC20;
}
case AMD_EVENT_NB:
- /* moved to perf_event_amd_uncore.c */
+ /* moved to uncore.c */
return &emptyconstraint;
default:
return &emptyconstraint;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 349d4d17aa7f..2aa1ad194db2 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
static void refresh_pce(void *ignored)
{
- if (current->mm)
- load_mm_cr4(current->mm);
+ if (current->active_mm)
+ load_mm_cr4(current->active_mm);
}
static void x86_pmu_event_mapped(struct perf_event *event)
@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
+ /*
+ * This function relies on not being called concurrently in two
+ * tasks in the same mm. Otherwise one task could observe
+ * perf_rdpmc_allowed > 1 and return all the way back to
+ * userspace with CR4.PCE clear while another task is still
+ * doing on_each_cpu_mask() to propagate CR4.PCE.
+ *
+ * For now, this can't happen because all callers hold mmap_sem
+ * for write. If this changes, we'll need a different solution.
+ */
+ lockdep_assert_held_exclusive(&current->mm->mmap_sem);
+
if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
}
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index aff4b5b69d40..238ae3248ba5 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -1,5 +1,5 @@
/*
- * perf_event_intel_cstate.c: support cstate residency counters
+ * Support cstate residency counters
*
* Copyright (C) 2015, Intel Corp.
* Author: Kan Liang (kan.liang@intel.com)
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 22054ca49026..9d05c7e67f60 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -1,5 +1,5 @@
/*
- * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters
+ * Support Intel RAPL energy consumption counters
* Copyright (C) 2013 Google, Inc., Stephane Eranian
*
* Intel RAPL interface is specified in the IA-32 Manual Vol3b
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index ad986c1e29bc..df5989f27b1b 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -360,7 +360,7 @@ extern struct list_head pci2phy_map_head;
extern struct pci_extra_dev *uncore_extra_pci_dev;
extern struct event_constraint uncore_constraint_empty;
-/* perf_event_intel_uncore_snb.c */
+/* uncore_snb.c */
int snb_uncore_pci_init(void);
int ivb_uncore_pci_init(void);
int hsw_uncore_pci_init(void);
@@ -371,7 +371,7 @@ void nhm_uncore_cpu_init(void);
void skl_uncore_cpu_init(void);
int snb_pci2phy_map_init(int devid);
-/* perf_event_intel_uncore_snbep.c */
+/* uncore_snbep.c */
int snbep_uncore_pci_init(void);
void snbep_uncore_cpu_init(void);
int ivbep_uncore_pci_init(void);
@@ -385,5 +385,5 @@ void knl_uncore_cpu_init(void);
int skx_uncore_pci_init(void);
void skx_uncore_cpu_init(void);
-/* perf_event_intel_uncore_nhmex.c */
+/* uncore_nhmex.c */
void nhmex_uncore_cpu_init(void);
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index db64baf0e500..8bef70e7f3cc 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -158,13 +158,13 @@ void hyperv_init(void)
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
return;
}
+register_msr_cs:
#endif
/*
* For 32 bit guests just use the MSR based mechanism for reading
* the partition counter.
*/
-register_msr_cs:
hyperv_cs = &hyperv_cs_msr;
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 4e7772387c6e..b04bb6dfed7f 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -289,7 +289,8 @@
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
-#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */
+#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
+#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 72277b1028a5..50d35e3185f5 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
*(tmp + 1) = 0;
}
-#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
- defined(CONFIG_PARAVIRT))
static inline void native_pud_clear(pud_t *pudp)
{
}
-#endif
static inline void pud_clear(pud_t *pudp)
{
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1cfb36b8c024..585ee0d42d18 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
#endif
-#ifndef __PAGETABLE_PMD_FOLDED
+#ifndef __PAGETABLE_PUD_FOLDED
#define pud_clear(pud) native_pud_clear(pud)
#endif
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 8b4de22d6429..62484333673d 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -273,6 +273,8 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
}
#if CONFIG_PGTABLE_LEVELS > 3
+#include <asm-generic/5level-fixup.h>
+
typedef struct { pudval_t pud; } pud_t;
static inline pud_t native_make_pud(pmdval_t val)
@@ -285,6 +287,7 @@ static inline pudval_t native_pud_val(pud_t pud)
return pud.pud;
}
#else
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
static inline pudval_t native_pud_val(pud_t pud)
@@ -306,6 +309,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
return pmd.pmd;
}
#else
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
static inline pmdval_t native_pmd_val(pmd_t pmd)
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index 34684adb6899..b3b09b98896d 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
static inline
bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
{
+ /*
+ * "Allocated" pkeys are those that have been returned
+ * from pkey_alloc(). pkey 0 is special, and never
+ * returned from pkey_alloc().
+ */
+ if (pkey <= 0)
+ return false;
+ if (pkey >= arch_max_pkey())
+ return false;
return mm_pkey_allocation_map(mm) & (1U << pkey);
}
@@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm)
static inline
int mm_pkey_free(struct mm_struct *mm, int pkey)
{
- /*
- * pkey 0 is special, always allocated and can never
- * be freed.
- */
- if (!pkey)
- return -EINVAL;
if (!mm_pkey_is_allocated(mm, pkey))
return -EINVAL;
diff --git a/arch/x86/include/asm/purgatory.h b/arch/x86/include/asm/purgatory.h
new file mode 100644
index 000000000000..d7da2729903d
--- /dev/null
+++ b/arch/x86/include/asm/purgatory.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_X86_PURGATORY_H
+#define _ASM_X86_PURGATORY_H
+
+#ifndef __ASSEMBLY__
+#include <linux/purgatory.h>
+
+extern void purgatory(void);
+/*
+ * These forward declarations serve two purposes:
+ *
+ * 1) Make sparse happy when checking arch/purgatory
+ * 2) Document that these are required to be global so the symbol
+ * lookup in kexec works
+ */
+extern unsigned long purgatory_backup_dest;
+extern unsigned long purgatory_backup_src;
+extern unsigned long purgatory_backup_sz;
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PURGATORY_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6fa85944af83..fc5abff9b7fd 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -188,7 +188,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
static inline void __flush_tlb_all(void)
{
- if (static_cpu_has(X86_FEATURE_PGE))
+ if (boot_cpu_has(X86_FEATURE_PGE))
__flush_tlb_global();
else
__flush_tlb();
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 5138dacf8bb8..07244ea16765 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -58,7 +58,7 @@ struct setup_header {
__u32 header;
__u16 version;
__u32 realmode_swtch;
- __u16 start_sys;
+ __u16 start_sys_seg;
__u16 kernel_version;
__u8 type_of_loader;
__u8 loadflags;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index ae32838cac5f..b2879cc23db4 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
return -EINVAL;
}
+ if (!enabled) {
+ ++disabled_cpus;
+ return -EINVAL;
+ }
+
if (boot_cpu_physical_apicid != -1U)
ver = boot_cpu_apic_version;
- cpu = __generic_processor_info(id, ver, enabled);
+ cpu = generic_processor_info(id, ver);
if (cpu >= 0)
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
@@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
int nid;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 4261b3282ad9..8ccb7ef512e0 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1610,24 +1610,15 @@ static inline void try_to_enable_x2apic(int remap_mode) { }
static inline void __x2apic_enable(void) { }
#endif /* !CONFIG_X86_X2APIC */
-static int __init try_to_enable_IR(void)
-{
-#ifdef CONFIG_X86_IO_APIC
- if (!x2apic_enabled() && skip_ioapic_setup) {
- pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
- return -1;
- }
-#endif
- return irq_remapping_enable();
-}
-
void __init enable_IR_x2apic(void)
{
unsigned long flags;
int ret, ir_stat;
- if (skip_ioapic_setup)
+ if (skip_ioapic_setup) {
+ pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
return;
+ }
ir_stat = irq_remapping_prepare();
if (ir_stat < 0 && !x2apic_supported())
@@ -1645,7 +1636,7 @@ void __init enable_IR_x2apic(void)
/* If irq_remapping_prepare() succeeded, try to enable it */
if (ir_stat >= 0)
- ir_stat = try_to_enable_IR();
+ ir_stat = irq_remapping_enable();
/* ir_stat contains the remap mode or an error code */
try_to_enable_x2apic(ir_stat);
@@ -2062,17 +2053,17 @@ static int allocate_logical_cpuid(int apicid)
/* Allocate a new cpuid. */
if (nr_logical_cpuids >= nr_cpu_ids) {
- WARN_ONCE(1, "Only %d processors supported."
+ WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %i reached. "
"Processor %d/0x%x and the rest are ignored.\n",
- nr_cpu_ids - 1, nr_logical_cpuids, apicid);
- return -1;
+ nr_cpu_ids, nr_logical_cpuids, apicid);
+ return -EINVAL;
}
cpuid_to_apicid[nr_logical_cpuids] = apicid;
return nr_logical_cpuids++;
}
-int __generic_processor_info(int apicid, int version, bool enabled)
+int generic_processor_info(int apicid, int version)
{
int cpu, max = nr_cpu_ids;
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2130,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
if (num_processors >= nr_cpu_ids) {
int thiscpu = max + disabled_cpus;
- if (enabled) {
- pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
- "reached. Processor %d/0x%x ignored.\n",
- max, thiscpu, apicid);
- }
+ pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
+ "reached. Processor %d/0x%x ignored.\n",
+ max, thiscpu, apicid);
disabled_cpus++;
return -EINVAL;
@@ -2186,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
apic->x86_32_early_logical_apicid(cpu);
#endif
set_cpu_possible(cpu, true);
-
- if (enabled) {
- num_processors++;
- physid_set(apicid, phys_cpu_present_map);
- set_cpu_present(cpu, true);
- } else {
- disabled_cpus++;
- }
+ physid_set(apicid, phys_cpu_present_map);
+ set_cpu_present(cpu, true);
+ num_processors++;
return cpu;
}
-int generic_processor_info(int apicid, int version)
-{
- return __generic_processor_info(apicid, version, true);
-}
-
int hard_smp_processor_id(void)
{
return read_apic_id();
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 35a5d5dca2fa..c36140d788fe 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -556,10 +556,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
- if (check_tsc_unstable())
- clear_sched_clock_stable();
- } else {
- clear_sched_clock_stable();
}
/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index adc0ebd8bed0..43955ee6715b 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -105,8 +105,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#endif
-
- clear_sched_clock_stable();
}
static void init_centaur(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b11b38c3b0bd..58094a1f9e9d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -88,7 +88,6 @@ static void default_init(struct cpuinfo_x86 *c)
strcpy(c->x86_model_id, "386");
}
#endif
- clear_sched_clock_stable();
}
static const struct cpu_dev default_cpu = {
@@ -1077,8 +1076,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
*/
if (this_cpu->c_init)
this_cpu->c_init(c);
- else
- clear_sched_clock_stable();
/* Disable the PN if appropriate */
squash_the_stupid_serial_number(c);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 0a3bc19de017..a70fd61095f8 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -185,7 +185,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
break;
}
- clear_sched_clock_stable();
}
static void init_cyrix(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index fe0a615a051b..063197771b8d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -162,10 +162,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
- if (check_tsc_unstable())
- clear_sched_clock_stable();
- } else {
- clear_sched_clock_stable();
}
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 0bbe0f3a039f..9ac2a5cdd9c2 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -28,7 +28,6 @@
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/slab.h>
-#include <linux/cpu.h>
#include <linux/task_work.h>
#include <uapi/linux/magic.h>
@@ -728,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
(rdtgrp->flags & RDT_DELETED)) {
kernfs_unbreak_active_protection(kn);
- kernfs_put(kn);
+ kernfs_put(rdtgrp->kn);
kfree(rdtgrp);
} else {
kernfs_unbreak_active_protection(kn);
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 8457b4978668..d77d07ab310b 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -16,8 +16,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
if (xlvl >= 0x80860001)
c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
}
-
- clear_sched_clock_stable();
}
static void init_transmeta(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 891f4dad7b2c..22403a28caf5 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -30,7 +30,6 @@
#include <asm/hypervisor.h>
#include <asm/timer.h>
#include <asm/apic.h>
-#include <asm/timer.h>
#undef pr_fmt
#define pr_fmt(fmt) "vmware: " fmt
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8639bb2ae058..8f3d9cf26ff9 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -535,7 +535,7 @@ static void run_sync(void)
{
int enable_irqs = irqs_disabled();
- /* We may be called with interrupts disbled (on bootup). */
+ /* We may be called with interrupts disabled (on bootup). */
if (enable_irqs)
local_irq_enable();
on_each_cpu(do_sync_core, NULL, 1);
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 54a2372f5dbb..b5785c197e53 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -4,6 +4,7 @@
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*/
+#define DISABLE_BRANCH_PROFILING
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/types.h>
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index dc6ba5bda9fc..89ff7af2de50 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -354,7 +354,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
- disable_irq(hdev->irq);
+ disable_hardirq(hdev->irq);
irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
enable_irq(hdev->irq);
}
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index bdb83e431d89..38b64587b31b 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -167,7 +167,7 @@ static int __init boot_params_kdebugfs_init(void)
struct dentry *dbp, *version, *data;
int error = -ENOMEM;
- dbp = debugfs_create_dir("boot_params", NULL);
+ dbp = debugfs_create_dir("boot_params", arch_debugfs_dir);
if (!dbp)
return -ENOMEM;
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index c6ee63f927ab..d688826e5736 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -67,7 +67,7 @@
#endif
/* Ensure if the instruction can be boostable */
-extern int can_boost(kprobe_opcode_t *instruction);
+extern int can_boost(kprobe_opcode_t *instruction, void *addr);
/* Recover instruction if given address is probed */
extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
unsigned long addr);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 6384eb754a58..993fa4fe4f68 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -167,12 +167,12 @@ NOKPROBE_SYMBOL(skip_prefixes);
* Returns non-zero if opcode is boostable.
* RIP relative instructions are adjusted at copying time in 64 bits mode
*/
-int can_boost(kprobe_opcode_t *opcodes)
+int can_boost(kprobe_opcode_t *opcodes, void *addr)
{
kprobe_opcode_t opcode;
kprobe_opcode_t *orig_opcodes = opcodes;
- if (search_exception_tables((unsigned long)opcodes))
+ if (search_exception_tables((unsigned long)addr))
return 0; /* Page fault may occur on this address. */
retry:
@@ -417,7 +417,7 @@ static int arch_copy_kprobe(struct kprobe *p)
* __copy_instruction can modify the displacement of the instruction,
* but it doesn't affect boostable check.
*/
- if (can_boost(p->ainsn.insn))
+ if (can_boost(p->ainsn.insn, p->addr))
p->ainsn.boostable = 0;
else
p->ainsn.boostable = -1;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 3d1bee9d6a72..3e7c6e5a08ff 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -178,7 +178,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
while (len < RELATIVEJUMP_SIZE) {
ret = __copy_instruction(dest + len, src + len);
- if (!ret || !can_boost(dest + len))
+ if (!ret || !can_boost(dest + len, src + len))
return -EINVAL;
len += ret;
}
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 307b1f4543de..857cdbd02867 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -194,19 +194,22 @@ static int arch_update_purgatory(struct kimage *image)
/* Setup copying of backup region */
if (image->type == KEXEC_TYPE_CRASH) {
- ret = kexec_purgatory_get_set_symbol(image, "backup_dest",
+ ret = kexec_purgatory_get_set_symbol(image,
+ "purgatory_backup_dest",
&image->arch.backup_load_addr,
sizeof(image->arch.backup_load_addr), 0);
if (ret)
return ret;
- ret = kexec_purgatory_get_set_symbol(image, "backup_src",
+ ret = kexec_purgatory_get_set_symbol(image,
+ "purgatory_backup_src",
&image->arch.backup_src_start,
sizeof(image->arch.backup_src_start), 0);
if (ret)
return ret;
- ret = kexec_purgatory_get_set_symbol(image, "backup_sz",
+ ret = kexec_purgatory_get_set_symbol(image,
+ "purgatory_backup_sz",
&image->arch.backup_src_sz,
sizeof(image->arch.backup_src_sz), 0);
if (ret)
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index f088ea4c66e7..a723ae9440ab 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
spin_lock_irqsave(&desc->lock, flags);
/*
- * most handlers of type NMI_UNKNOWN never return because
- * they just assume the NMI is theirs. Just a sanity check
- * to manage expectations
+ * Indicate if there are multiple registrations on the
+ * internal NMI handler call chains (SERR and IO_CHECK).
*/
- WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e244c19a2451..067f9813fd2c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -223,6 +223,22 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
+ { /* Handle problems with rebooting on ASUS EeeBook X205TA */
+ .callback = set_acpi_reboot,
+ .ident = "ASUS EeeBook X205TA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X205TA"),
+ },
+ },
+ { /* Handle problems with rebooting on ASUS EeeBook X205TAW */
+ .callback = set_acpi_reboot,
+ .ident = "ASUS EeeBook X205TAW",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"),
+ },
+ },
/* Certec */
{ /* Handle problems with rebooting on Certec BPC600 */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 46bcda4cb1c2..c73a7f9e881a 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -327,9 +327,16 @@ unsigned long long sched_clock(void)
{
return paravirt_sched_clock();
}
+
+static inline bool using_native_sched_clock(void)
+{
+ return pv_time_ops.sched_clock == native_sched_clock;
+}
#else
unsigned long long
sched_clock(void) __attribute__((alias("native_sched_clock")));
+
+static inline bool using_native_sched_clock(void) { return true; }
#endif
int check_tsc_unstable(void)
@@ -1112,8 +1119,10 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
{
if (tsc_unstable)
return;
+
tsc_unstable = 1;
- clear_sched_clock_stable();
+ if (using_native_sched_clock())
+ clear_sched_clock_stable();
disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to clocksource watchdog\n");
}
@@ -1135,18 +1144,20 @@ static struct clocksource clocksource_tsc = {
void mark_tsc_unstable(char *reason)
{
- if (!tsc_unstable) {
- tsc_unstable = 1;
+ if (tsc_unstable)
+ return;
+
+ tsc_unstable = 1;
+ if (using_native_sched_clock())
clear_sched_clock_stable();
- disable_sched_clock_irqtime();
- pr_info("Marking TSC unstable due to %s\n", reason);
- /* Change only the rating, when not registered */
- if (clocksource_tsc.mult)
- clocksource_mark_unstable(&clocksource_tsc);
- else {
- clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
- clocksource_tsc.rating = 0;
- }
+ disable_sched_clock_irqtime();
+ pr_info("Marking TSC unstable due to %s\n", reason);
+ /* Change only the rating, when not registered */
+ if (clocksource_tsc.mult) {
+ clocksource_mark_unstable(&clocksource_tsc);
+ } else {
+ clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
+ clocksource_tsc.rating = 0;
}
}
@@ -1322,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
* the refined calibration and directly register it as a clocksource.
*/
if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
+ if (boot_cpu_has(X86_FEATURE_ART))
+ art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz);
return 0;
}
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 478d15dbaee4..08339262b666 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
return sizeof(*regs);
}
+#ifdef CONFIG_X86_32
+#define GCC_REALIGN_WORDS 3
+#else
+#define GCC_REALIGN_WORDS 1
+#endif
+
static bool is_last_task_frame(struct unwind_state *state)
{
- unsigned long bp = (unsigned long)state->bp;
- unsigned long regs = (unsigned long)task_pt_regs(state->task);
+ unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
+ unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
/*
* We have to check for the last task frame at two different locations
* because gcc can occasionally decide to realign the stack pointer and
- * change the offset of the stack frame by a word in the prologue of a
- * function called by head/entry code.
+ * change the offset of the stack frame in the prologue of a function
+ * called by head/entry code. Examples:
+ *
+ * <start_secondary>:
+ * push %edi
+ * lea 0x8(%esp),%edi
+ * and $0xfffffff8,%esp
+ * pushl -0x4(%edi)
+ * push %ebp
+ * mov %esp,%ebp
+ *
+ * <x86_64_start_kernel>:
+ * lea 0x8(%rsp),%r10
+ * and $0xfffffffffffffff0,%rsp
+ * pushq -0x8(%r10)
+ * push %rbp
+ * mov %rsp,%rbp
+ *
+ * Note that after aligning the stack, it pushes a duplicate copy of
+ * the return address before pushing the frame pointer.
*/
- return bp == regs - FRAME_HEADER_SIZE ||
- bp == regs - FRAME_HEADER_SIZE - sizeof(long);
+ return (state->bp == last_bp ||
+ (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
}
/*
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 283aa8601833..98e82ee1e699 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7258,9 +7258,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
static int handle_vmclear(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 zero = 0;
gpa_t vmptr;
- struct vmcs12 *vmcs12;
- struct page *page;
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -7271,22 +7270,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
if (vmptr == vmx->nested.current_vmptr)
nested_release_vmcs12(vmx);
- page = nested_get_page(vcpu, vmptr);
- if (page == NULL) {
- /*
- * For accurate processor emulation, VMCLEAR beyond available
- * physical memory should do nothing at all. However, it is
- * possible that a nested vmx bug, not a guest hypervisor bug,
- * resulted in this case, so let's shut down before doing any
- * more damage:
- */
- kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
- return 1;
- }
- vmcs12 = kmap(page);
- vmcs12->launch_state = 0;
- kunmap(page);
- nested_release_page(page);
+ kvm_vcpu_write_guest(vcpu,
+ vmptr + offsetof(struct vmcs12, launch_state),
+ &zero, sizeof(zero));
nested_free_vmcs02(vmx, vmptr);
@@ -9694,10 +9680,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
return false;
page = nested_get_page(vcpu, vmcs12->msr_bitmap);
- if (!page) {
- WARN_ON(1);
+ if (!page)
return false;
- }
msr_bitmap_l1 = (unsigned long *)kmap(page);
memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
@@ -11121,8 +11105,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
*/
static void vmx_leave_nested(struct kvm_vcpu *vcpu)
{
- if (is_guest_mode(vcpu))
+ if (is_guest_mode(vcpu)) {
+ to_vmx(vcpu)->nested.nested_run_pending = 0;
nested_vmx_vmexit(vcpu, -1, 0, 0);
+ }
free_nested(to_vmx(vcpu));
}
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 99c7805a9693..1f3b6ef105cd 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -106,32 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
struct dev_pagemap *pgmap = NULL;
- int nr_start = *nr;
- pte_t *ptep;
+ int nr_start = *nr, ret = 0;
+ pte_t *ptep, *ptem;
- ptep = pte_offset_map(&pmd, addr);
+ /*
+ * Keep the original mapped PTE value (ptem) around since we
+ * might increment ptep off the end of the page when finishing
+ * our loop iteration.
+ */
+ ptem = ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *page;
/* Similar to the PMD case, NUMA hinting must take slow path */
- if (pte_protnone(pte)) {
- pte_unmap(ptep);
- return 0;
- }
+ if (pte_protnone(pte))
+ break;
+
+ if (!pte_allows_gup(pte_val(pte), write))
+ break;
if (pte_devmap(pte)) {
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, pages);
- pte_unmap(ptep);
- return 0;
+ break;
}
- } else if (!pte_allows_gup(pte_val(pte), write) ||
- pte_special(pte)) {
- pte_unmap(ptep);
- return 0;
- }
+ } else if (pte_special(pte))
+ break;
+
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
get_page(page);
@@ -141,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
- pte_unmap(ptep - 1);
+ if (addr == end)
+ ret = 1;
+ pte_unmap(ptem);
- return 1;
+ return ret;
}
static inline void get_head_page_multiple(struct page *page, int nr)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 8d63d7a104c3..4c90cfdc128b 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
+#define DISABLE_BRANCH_PROFILING
#define pr_fmt(fmt) "kasan: " fmt
#include <linux/bootmem.h>
#include <linux/kasan.h>
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 5126dfd52b18..cd44ae727df7 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
* we might run off the end of the bounds table if we are on
* a 64-bit kernel and try to get 8 bytes.
*/
-int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
+static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
long __user *bd_entry_ptr)
{
u32 bd_entry_32;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 0cb52ae0a8f0..190e718694b1 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -735,6 +735,15 @@ void pcibios_disable_device (struct pci_dev *dev)
pcibios_disable_irq(dev);
}
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+void pcibios_release_device(struct pci_dev *dev)
+{
+ if (atomic_dec_return(&dev->enable_cnt) >= 0)
+ pcibios_disable_device(dev);
+
+}
+#endif
+
int pci_ext_cfg_avail(void)
{
if (raw_pci_ext_ops)
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index e1fb269c87af..292ab0364a89 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return 1;
for_each_pci_msi_entry(msidesc, dev) {
- __pci_read_msi_msg(msidesc, &msg);
- pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
- ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
- if (msg.data != XEN_PIRQ_MSI_DATA ||
- xen_irq_from_pirq(pirq) < 0) {
- pirq = xen_allocate_pirq_msi(dev, msidesc);
- if (pirq < 0) {
- irq = -ENODEV;
- goto error;
- }
- xen_msi_compose_msg(dev, pirq, &msg);
- __pci_write_msi_msg(msidesc, &msg);
- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
- } else {
- dev_dbg(&dev->dev,
- "xen: msi already bound to pirq=%d\n", pirq);
+ pirq = xen_allocate_pirq_msi(dev, msidesc);
+ if (pirq < 0) {
+ irq = -ENODEV;
+ goto error;
}
+ xen_msi_compose_msg(dev, pirq, &msg);
+ __pci_write_msi_msg(msidesc, &msg);
+ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ?
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index a7dbec4dce27..3dbde04febdc 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
# MISC Devices
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
new file mode 100644
index 000000000000..a6c3705a28ad
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
@@ -0,0 +1,82 @@
+/*
+ * Intel Merrifield power button support
+ *
+ * (C) Copyright 2017 Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/sfi.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+
+static struct resource mrfld_power_btn_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mrfld_power_btn_dev = {
+ .name = "msic_power_btn",
+ .id = PLATFORM_DEVID_NONE,
+ .num_resources = ARRAY_SIZE(mrfld_power_btn_resources),
+ .resource = mrfld_power_btn_resources,
+};
+
+static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
+ unsigned long code, void *data)
+{
+ if (code == SCU_DOWN) {
+ platform_device_unregister(&mrfld_power_btn_dev);
+ return 0;
+ }
+
+ return platform_device_register(&mrfld_power_btn_dev);
+}
+
+static struct notifier_block mrfld_power_btn_scu_notifier = {
+ .notifier_call = mrfld_power_btn_scu_status_change,
+};
+
+static int __init register_mrfld_power_btn(void)
+{
+ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+ return -ENODEV;
+
+ /*
+ * We need to be sure that the SCU IPC is ready before
+ * PMIC power button device can be registered:
+ */
+ intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
+
+ return 0;
+}
+arch_initcall(register_mrfld_power_btn);
+
+static void __init *mrfld_power_btn_platform_data(void *info)
+{
+ struct resource *res = mrfld_power_btn_resources;
+ struct sfi_device_table_entry *pentry = info;
+
+ res->start = res->end = pentry->irq;
+ return NULL;
+}
+
+static const struct devs_id mrfld_power_btn_dev_id __initconst = {
+ .name = "bcove_power_btn",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .msic = 1,
+ .get_platform_data = &mrfld_power_btn_platform_data,
+};
+
+sfi_device(mrfld_power_btn_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index 86edd1e941eb..9e304e2ea4f5 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -19,7 +19,7 @@
#include <asm/intel_scu_ipc.h>
#include <asm/io_apic.h>
-#define TANGIER_EXT_TIMER0_MSI 15
+#define TANGIER_EXT_TIMER0_MSI 12
static struct platform_device wdt_dev = {
.name = "intel_mid_wdt",
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
index e793fe509971..e42978d4deaf 100644
--- a/arch/x86/platform/intel-mid/mfld.c
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -17,16 +17,6 @@
#include "intel_mid_weak_decls.h"
-static void penwell_arch_setup(void);
-/* penwell arch ops */
-static struct intel_mid_ops penwell_ops = {
- .arch_setup = penwell_arch_setup,
-};
-
-static void mfld_power_off(void)
-{
-}
-
static unsigned long __init mfld_calibrate_tsc(void)
{
unsigned long fast_calibrate;
@@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
static void __init penwell_arch_setup(void)
{
x86_platform.calibrate_tsc = mfld_calibrate_tsc;
- pm_power_off = mfld_power_off;
}
+static struct intel_mid_ops penwell_ops = {
+ .arch_setup = penwell_arch_setup,
+};
+
void *get_penwell_ops(void)
{
return &penwell_ops;
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 766d4d3529a1..f25982cdff90 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1847,7 +1847,6 @@ static void pq_init(int node, int pnode)
ops.write_payload_first(pnode, first);
ops.write_payload_last(pnode, last);
- ops.write_g_sw_ack(pnode, 0xffffUL);
/* in effect, all msg_type's are set to MSG_NOOP */
memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index 25e068ba3382..470edad96bb9 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -10,21 +10,19 @@
* Version 2. See the file COPYING for more details.
*/
+#include <linux/bug.h>
+#include <asm/purgatory.h>
+
#include "sha256.h"
#include "../boot/string.h"
-struct sha_region {
- unsigned long start;
- unsigned long len;
-};
-
-unsigned long backup_dest = 0;
-unsigned long backup_src = 0;
-unsigned long backup_sz = 0;
+unsigned long purgatory_backup_dest __section(.kexec-purgatory);
+unsigned long purgatory_backup_src __section(.kexec-purgatory);
+unsigned long purgatory_backup_sz __section(.kexec-purgatory);
-u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 };
+u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(.kexec-purgatory);
-struct sha_region sha_regions[16] = {};
+struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(.kexec-purgatory);
/*
* On x86, second kernel requries first 640K of memory to boot. Copy
@@ -33,26 +31,28 @@ struct sha_region sha_regions[16] = {};
*/
static int copy_backup_region(void)
{
- if (backup_dest)
- memcpy((void *)backup_dest, (void *)backup_src, backup_sz);
-
+ if (purgatory_backup_dest) {
+ memcpy((void *)purgatory_backup_dest,
+ (void *)purgatory_backup_src, purgatory_backup_sz);
+ }
return 0;
}
-int verify_sha256_digest(void)
+static int verify_sha256_digest(void)
{
- struct sha_region *ptr, *end;
+ struct kexec_sha_region *ptr, *end;
u8 digest[SHA256_DIGEST_SIZE];
struct sha256_state sctx;
sha256_init(&sctx);
- end = &sha_regions[sizeof(sha_regions)/sizeof(sha_regions[0])];
- for (ptr = sha_regions; ptr < end; ptr++)
+ end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
+
+ for (ptr = purgatory_sha_regions; ptr < end; ptr++)
sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
sha256_final(&sctx, digest);
- if (memcmp(digest, sha256_digest, sizeof(digest)))
+ if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)))
return 1;
return 0;
diff --git a/arch/x86/purgatory/setup-x86_64.S b/arch/x86/purgatory/setup-x86_64.S
index fe3c91ba1bd0..dfae9b9e60b5 100644
--- a/arch/x86/purgatory/setup-x86_64.S
+++ b/arch/x86/purgatory/setup-x86_64.S
@@ -9,6 +9,7 @@
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
+#include <asm/purgatory.h>
.text
.globl purgatory_start
diff --git a/arch/x86/purgatory/sha256.h b/arch/x86/purgatory/sha256.h
index bd15a4127735..2867d9825a57 100644
--- a/arch/x86/purgatory/sha256.h
+++ b/arch/x86/purgatory/sha256.h
@@ -10,7 +10,6 @@
#ifndef SHA256_H
#define SHA256_H
-
#include <linux/types.h>
#include <crypto/sha.h>
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 8aa0e0d9cbb2..30dd5b2e4ad5 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -11,6 +11,7 @@
#ifndef _XTENSA_PGTABLE_H
#define _XTENSA_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
#include <asm/kmem_layout.h>
diff --git a/block/bio.c b/block/bio.c
index 5eec5e08417f..e75878f8b14a 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
bio_list_init(&punt);
bio_list_init(&nopunt);
- while ((bio = bio_list_pop(current->bio_list)))
+ while ((bio = bio_list_pop(&current->bio_list[0])))
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+ current->bio_list[0] = nopunt;
- *current->bio_list = nopunt;
+ bio_list_init(&nopunt);
+ while ((bio = bio_list_pop(&current->bio_list[1])))
+ bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+ current->bio_list[1] = nopunt;
spin_lock(&bs->rescue_lock);
bio_list_merge(&bs->rescue_list, &punt);
@@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
* we retry with the original gfp_flags.
*/
- if (current->bio_list && !bio_list_empty(current->bio_list))
+ if (current->bio_list &&
+ (!bio_list_empty(&current->bio_list[0]) ||
+ !bio_list_empty(&current->bio_list[1])))
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
p = mempool_alloc(bs->bio_pool, gfp_mask);
diff --git a/block/blk-core.c b/block/blk-core.c
index 1086dac8724c..d772c221cc17 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -578,8 +578,6 @@ void blk_cleanup_queue(struct request_queue *q)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
- put_disk_devt(q->disk_devt);
-
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
@@ -1975,7 +1973,14 @@ end_io:
*/
blk_qc_t generic_make_request(struct bio *bio)
{
- struct bio_list bio_list_on_stack;
+ /*
+ * bio_list_on_stack[0] contains bios submitted by the current
+ * make_request_fn.
+ * bio_list_on_stack[1] contains bios that were submitted before
+ * the current make_request_fn, but that haven't been processed
+ * yet.
+ */
+ struct bio_list bio_list_on_stack[2];
blk_qc_t ret = BLK_QC_T_NONE;
if (!generic_make_request_checks(bio))
@@ -1992,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
* should be added at the tail
*/
if (current->bio_list) {
- bio_list_add(current->bio_list, bio);
+ bio_list_add(&current->bio_list[0], bio);
goto out;
}
@@ -2011,23 +2016,39 @@ blk_qc_t generic_make_request(struct bio *bio)
* bio_list, and call into ->make_request() again.
*/
BUG_ON(bio->bi_next);
- bio_list_init(&bio_list_on_stack);
- current->bio_list = &bio_list_on_stack;
+ bio_list_init(&bio_list_on_stack[0]);
+ current->bio_list = bio_list_on_stack;
do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
if (likely(blk_queue_enter(q, false) == 0)) {
+ struct bio_list lower, same;
+
+ /* Create a fresh bio_list for all subordinate requests */
+ bio_list_on_stack[1] = bio_list_on_stack[0];
+ bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
blk_queue_exit(q);
- bio = bio_list_pop(current->bio_list);
+ /* sort new bios into those for a lower level
+ * and those for the same level
+ */
+ bio_list_init(&lower);
+ bio_list_init(&same);
+ while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
+ if (q == bdev_get_queue(bio->bi_bdev))
+ bio_list_add(&same, bio);
+ else
+ bio_list_add(&lower, bio);
+ /* now assemble so we handle the lowest level first */
+ bio_list_merge(&bio_list_on_stack[0], &lower);
+ bio_list_merge(&bio_list_on_stack[0], &same);
+ bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
} else {
- struct bio *bio_next = bio_list_pop(current->bio_list);
-
bio_io_error(bio);
- bio = bio_next;
}
+ bio = bio_list_pop(&bio_list_on_stack[0]);
} while (bio);
current->bio_list = NULL; /* deactivate */
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 295e69670c39..d745ab81033a 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -17,6 +17,15 @@ static void blk_mq_sysfs_release(struct kobject *kobj)
{
}
+static void blk_mq_hw_sysfs_release(struct kobject *kobj)
+{
+ struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
+ kobj);
+ free_cpumask_var(hctx->cpumask);
+ kfree(hctx->ctxs);
+ kfree(hctx);
+}
+
struct blk_mq_ctx_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct blk_mq_ctx *, char *);
@@ -200,7 +209,7 @@ static struct kobj_type blk_mq_ctx_ktype = {
static struct kobj_type blk_mq_hw_ktype = {
.sysfs_ops = &blk_mq_hw_sysfs_ops,
.default_attrs = default_hw_ctx_attrs,
- .release = blk_mq_sysfs_release,
+ .release = blk_mq_hw_sysfs_release,
};
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -242,24 +251,15 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx;
- int i, j;
+ int i;
- queue_for_each_hw_ctx(q, hctx, i) {
+ queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
- hctx_for_each_ctx(hctx, ctx, j)
- kobject_put(&ctx->kobj);
-
- kobject_put(&hctx->kobj);
- }
-
blk_mq_debugfs_unregister_hctxs(q);
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
- kobject_put(&q->mq_kobj);
-
kobject_put(&dev->kobj);
q->mq_sysfs_init_done = false;
@@ -277,7 +277,19 @@ void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
}
-static void blk_mq_sysfs_init(struct request_queue *q)
+void blk_mq_sysfs_deinit(struct request_queue *q)
+{
+ struct blk_mq_ctx *ctx;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ ctx = per_cpu_ptr(q->queue_ctx, cpu);
+ kobject_put(&ctx->kobj);
+ }
+ kobject_put(&q->mq_kobj);
+}
+
+void blk_mq_sysfs_init(struct request_queue *q)
{
struct blk_mq_ctx *ctx;
int cpu;
@@ -297,8 +309,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
blk_mq_disable_hotplug();
- blk_mq_sysfs_init(q);
-
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
if (ret < 0)
goto out;
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index e48bc2c72615..9d97bfc4d465 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
for (i = 0; i < set->nr_hw_queues; i++) {
struct blk_mq_tags *tags = set->tags[i];
+ if (!tags)
+ continue;
+
for (j = 0; j < tags->nr_tags; j++) {
if (!tags->static_rqs[j])
continue;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b2fd175e84d7..a4546f060e80 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1434,7 +1434,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
}
-static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
+static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
+ bool may_sleep)
{
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
@@ -1475,7 +1476,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
}
insert:
- blk_mq_sched_insert_request(rq, false, true, true, false);
+ blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
}
/*
@@ -1569,11 +1570,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
rcu_read_lock();
- blk_mq_try_issue_directly(old_rq, &cookie);
+ blk_mq_try_issue_directly(old_rq, &cookie, false);
rcu_read_unlock();
} else {
srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
- blk_mq_try_issue_directly(old_rq, &cookie);
+ blk_mq_try_issue_directly(old_rq, &cookie, true);
srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
}
goto done;
@@ -1955,16 +1956,6 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
}
}
-static void blk_mq_free_hw_queues(struct request_queue *q,
- struct blk_mq_tag_set *set)
-{
- struct blk_mq_hw_ctx *hctx;
- unsigned int i;
-
- queue_for_each_hw_ctx(q, hctx, i)
- free_cpumask_var(hctx->cpumask);
-}
-
static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
@@ -2045,7 +2036,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
struct blk_mq_hw_ctx *hctx;
- memset(__ctx, 0, sizeof(*__ctx));
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
INIT_LIST_HEAD(&__ctx->rq_list);
@@ -2257,15 +2247,19 @@ void blk_mq_release(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx)
continue;
- kfree(hctx->ctxs);
- kfree(hctx);
+ kobject_put(&hctx->kobj);
}
q->mq_map = NULL;
kfree(q->queue_hw_ctx);
- /* ctx kobj stays in queue_ctx */
+ /*
+ * release .mq_kobj and sw queue's kobject now because
+ * both share lifetime with request queue.
+ */
+ blk_mq_sysfs_deinit(q);
+
free_percpu(q->queue_ctx);
}
@@ -2330,10 +2324,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
if (hctx->tags)
blk_mq_free_map_and_requests(set, j);
blk_mq_exit_hctx(q, set, hctx, j);
- free_cpumask_var(hctx->cpumask);
kobject_put(&hctx->kobj);
- kfree(hctx->ctxs);
- kfree(hctx);
hctxs[j] = NULL;
}
@@ -2352,6 +2343,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->queue_ctx)
goto err_exit;
+ /* init q->mq_kobj and sw queues' kobjects */
+ blk_mq_sysfs_init(q);
+
q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node);
if (!q->queue_hw_ctx)
@@ -2442,7 +2436,6 @@ void blk_mq_free_queue(struct request_queue *q)
blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
- blk_mq_free_hw_queues(q, set);
}
/* Basically redo blk_mq_init_queue with queue frozen */
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 088ced003c13..b79f9a7d8cf6 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -77,6 +77,8 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
/*
* sysfs helpers
*/
+extern void blk_mq_sysfs_init(struct request_queue *q);
+extern void blk_mq_sysfs_deinit(struct request_queue *q);
extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
diff --git a/block/genhd.c b/block/genhd.c
index b26a5ea115d0..a9c516a8b37d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -572,20 +572,6 @@ exit:
disk_part_iter_exit(&piter);
}
-void put_disk_devt(struct disk_devt *disk_devt)
-{
- if (disk_devt && atomic_dec_and_test(&disk_devt->count))
- disk_devt->release(disk_devt);
-}
-EXPORT_SYMBOL(put_disk_devt);
-
-void get_disk_devt(struct disk_devt *disk_devt)
-{
- if (disk_devt)
- atomic_inc(&disk_devt->count);
-}
-EXPORT_SYMBOL(get_disk_devt);
-
/**
* device_add_disk - add partitioning information to kernel list
* @parent: parent device for the disk
@@ -626,13 +612,6 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
disk_alloc_events(disk);
- /*
- * Take a reference on the devt and assign it to queue since it
- * must not be reallocated while the bdi is registered
- */
- disk->queue->disk_devt = disk->disk_devt;
- get_disk_devt(disk->disk_devt);
-
/* Register BDI before referencing it from bdev */
bdi = disk->queue->backing_dev_info;
bdi_register_owner(bdi, disk_to_dev(disk));
@@ -681,12 +660,16 @@ void del_gendisk(struct gendisk *disk)
disk->flags &= ~GENHD_FL_UP;
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
- /*
- * Unregister bdi before releasing device numbers (as they can get
- * reused and we'd get clashes in sysfs).
- */
- bdi_unregister(disk->queue->backing_dev_info);
- blk_unregister_queue(disk);
+ if (disk->queue) {
+ /*
+ * Unregister bdi before releasing device numbers (as they can
+ * get reused and we'd get clashes in sysfs).
+ */
+ bdi_unregister(disk->queue->backing_dev_info);
+ blk_unregister_queue(disk);
+ } else {
+ WARN_ON(1);
+ }
blk_unregister_region(disk_devt(disk), disk->minors);
part_stat_set_all(&disk->part0, 0);
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 1e18dca360fc..14035f826b5e 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -1023,7 +1023,6 @@ static int finalize_and_send(struct opal_dev *dev, cont_fn cont)
static int gen_key(struct opal_dev *dev, void *data)
{
- const u8 *method;
u8 uid[OPAL_UID_LENGTH];
int err = 0;
@@ -1031,7 +1030,6 @@ static int gen_key(struct opal_dev *dev, void *data)
set_comid(dev, dev->comid);
memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len));
- method = opalmethod[OPAL_GENKEY];
kfree(dev->prev_data);
dev->prev_data = NULL;
@@ -1669,7 +1667,6 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
{
u8 lr_buffer[OPAL_UID_LENGTH];
- const u8 *method;
struct opal_lock_unlock *lkul = data;
u8 read_locked = 1, write_locked = 1;
int err = 0;
@@ -1677,7 +1674,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
clear_opal_cmd(dev);
set_comid(dev, dev->comid);
- method = opalmethod[OPAL_SET];
if (build_locking_range(lr_buffer, sizeof(lr_buffer),
lkul->session.opal_key.lr) < 0)
return -ERANGE;
@@ -1733,14 +1729,12 @@ static int lock_unlock_locking_range_sum(struct opal_dev *dev, void *data)
{
u8 lr_buffer[OPAL_UID_LENGTH];
u8 read_locked = 1, write_locked = 1;
- const u8 *method;
struct opal_lock_unlock *lkul = data;
int ret;
clear_opal_cmd(dev);
set_comid(dev, dev->comid);
- method = opalmethod[OPAL_SET];
if (build_locking_range(lr_buffer, sizeof(lr_buffer),
lkul->session.opal_key.lr) < 0)
return -ERANGE;
@@ -2133,7 +2127,7 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
pr_err("Locking state was not RO or RW\n");
return -EINVAL;
}
- if (lk_unlk->session.who < OPAL_USER1 &&
+ if (lk_unlk->session.who < OPAL_USER1 ||
lk_unlk->session.who > OPAL_USER9) {
pr_err("Authority was not within the range of users: %d\n",
lk_unlk->session.who);
@@ -2316,7 +2310,7 @@ static int opal_activate_user(struct opal_dev *dev,
int ret;
/* We can't activate Admin1 it's active as manufactured */
- if (opal_session->who < OPAL_USER1 &&
+ if (opal_session->who < OPAL_USER1 ||
opal_session->who > OPAL_USER9) {
pr_err("Who was not a valid user: %d\n", opal_session->who);
return -EINVAL;
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index f5e18c2a4852..690deca17c35 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -266,7 +266,7 @@ unlock:
return err;
}
-int af_alg_accept(struct sock *sk, struct socket *newsock)
+int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
{
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
@@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
if (!type)
goto unlock;
- sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
+ sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
err = -ENOMEM;
if (!sk2)
goto unlock;
@@ -323,9 +323,10 @@ unlock:
}
EXPORT_SYMBOL_GPL(af_alg_accept);
-static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
+static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
- return af_alg_accept(sock->sk, newsock);
+ return af_alg_accept(sock->sk, newsock, kern);
}
static const struct proto_ops alg_proto_ops = {
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 54fc90e8339c..5e92bd275ef3 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -239,7 +239,8 @@ unlock:
return err ?: len;
}
-static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
if (err)
return err;
- err = af_alg_accept(ask->parent, newsock);
+ err = af_alg_accept(ask->parent, newsock, kern);
if (err)
return err;
@@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
}
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
int err;
@@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
if (err)
return err;
- return hash_accept(sock, newsock, flags);
+ return hash_accept(sock, newsock, flags, kern);
}
static struct proto_ops algif_hash_ops_nokey = {
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 4467a8089ab8..0143135b3abe 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
void __weak arch_unregister_cpu(int cpu) {}
-int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
-{
- return -ENODEV;
-}
-
static int acpi_processor_hotadd_init(struct acpi_processor *pr)
{
unsigned long long sta;
@@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->acpi_id = value;
}
+ if (acpi_duplicate_processor_id(pr->acpi_id)) {
+ dev_err(&device->dev,
+ "Failed to get unique processor _UID (0x%x)\n",
+ pr->acpi_id);
+ return -ENODEV;
+ }
+
pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
pr->acpi_id);
if (invalid_phys_cpuid(pr->phys_id))
@@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = {
static int nr_unique_ids __initdata;
/* The number of the duplicate processor IDs */
-static int nr_duplicate_ids __initdata;
+static int nr_duplicate_ids;
/* Used to store the unique processor IDs */
static int unique_processor_ids[] __initdata = {
@@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = {
};
/* Used to store the duplicate processor IDs */
-static int duplicate_processor_ids[] __initdata = {
+static int duplicate_processor_ids[] = {
[0 ... NR_CPUS - 1] = -1,
};
@@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
void **rv)
{
acpi_status status;
+ acpi_object_type acpi_type;
+ unsigned long long uid;
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ status = acpi_get_type(handle, &acpi_type);
if (ACPI_FAILURE(status))
- acpi_handle_info(handle, "Not get the processor object\n");
- else
- processor_validated_ids_update(object.processor.proc_id);
+ return false;
+
+ switch (acpi_type) {
+ case ACPI_TYPE_PROCESSOR:
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ goto err;
+ uid = object.processor.proc_id;
+ break;
+
+ case ACPI_TYPE_DEVICE:
+ status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
+ if (ACPI_FAILURE(status))
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ processor_validated_ids_update(uid);
+ return true;
+
+err:
+ acpi_handle_info(handle, "Invalid processor object\n");
+ return false;
- return AE_OK;
}
-static void __init acpi_processor_check_duplicates(void)
+void __init acpi_processor_check_duplicates(void)
{
- /* Search all processor nodes in ACPI namespace */
+ /* check the correctness for all processors in ACPI namespace */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
acpi_processor_ids_walk,
NULL, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
+ NULL, NULL);
}
-bool __init acpi_processor_validate_proc_id(int proc_id)
+bool acpi_duplicate_processor_id(int proc_id)
{
int i;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 80cb5eb75b63..34fbe027e73a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1249,7 +1249,6 @@ static int __init acpi_init(void)
acpi_wakeup_device_init();
acpi_debugger_init();
acpi_setup_sb_notify_handler();
- acpi_set_processor_mapping();
return 0;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 219b90bc0922..f15900132912 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -41,8 +41,10 @@ void acpi_gpe_apply_masked_gpes(void);
void acpi_container_init(void);
void acpi_memory_hotplug_init(void);
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+void pci_ioapic_remove(struct acpi_pci_root *root);
int acpi_ioapic_remove(struct acpi_pci_root *root);
#else
+static inline void pci_ioapic_remove(struct acpi_pci_root *root) { return; }
static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
#endif
#ifdef CONFIG_ACPI_DOCK
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index 6d7ce6e12aaa..1120dfd625b8 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -206,24 +206,34 @@ int acpi_ioapic_add(acpi_handle root_handle)
return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
}
-int acpi_ioapic_remove(struct acpi_pci_root *root)
+void pci_ioapic_remove(struct acpi_pci_root *root)
{
- int retval = 0;
struct acpi_pci_ioapic *ioapic, *tmp;
mutex_lock(&ioapic_list_lock);
list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
if (root->device->handle != ioapic->root_handle)
continue;
-
- if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
- retval = -EBUSY;
-
if (ioapic->pdev) {
pci_release_region(ioapic->pdev, 0);
pci_disable_device(ioapic->pdev);
pci_dev_put(ioapic->pdev);
}
+ }
+ mutex_unlock(&ioapic_list_lock);
+}
+
+int acpi_ioapic_remove(struct acpi_pci_root *root)
+{
+ int retval = 0;
+ struct acpi_pci_ioapic *ioapic, *tmp;
+
+ mutex_lock(&ioapic_list_lock);
+ list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
+ if (root->device->handle != ioapic->root_handle)
+ continue;
+ if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
+ retval = -EBUSY;
if (ioapic->res.flags && ioapic->res.parent)
release_resource(&ioapic->res);
list_del(&ioapic->list);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index bf601d4df8cf..919be0aa2578 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -648,12 +648,12 @@ static void acpi_pci_root_remove(struct acpi_device *device)
pci_stop_root_bus(root->bus);
- WARN_ON(acpi_ioapic_remove(root));
-
+ pci_ioapic_remove(root);
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
pci_remove_root_bus(root->bus);
+ WARN_ON(acpi_ioapic_remove(root));
dmar_device_remove(device->handle);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 611a5585a902..b933061b6b60 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
}
static int map_lapic_id(struct acpi_subtable_header *entry,
- u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled)
+ u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_apic *lapic =
container_of(entry, struct acpi_madt_local_apic, header);
- if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED))
+ if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (lapic->processor_id != acpi_id)
@@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
- bool ignore_disabled)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_x2apic *apic =
container_of(entry, struct acpi_madt_local_x2apic, header);
- if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED))
+ if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration && (apic->uid == acpi_id)) {
@@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
- bool ignore_disabled)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
container_of(entry, struct acpi_madt_local_sapic, header);
- if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED))
+ if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration) {
@@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
* Retrieve the ARM CPU physical identifier (MPIDR)
*/
static int map_gicc_mpidr(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr,
- bool ignore_disabled)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
{
struct acpi_madt_generic_interrupt *gicc =
container_of(entry, struct acpi_madt_generic_interrupt, header);
- if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED))
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
return -ENODEV;
/* device_declaration means Device object in DSDT, in the
@@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
}
static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
- int type, u32 acpi_id, bool ignore_disabled)
+ int type, u32 acpi_id)
{
unsigned long madt_end, entry;
phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
@@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
- if (!map_lapic_id(header, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_lapic_id(header, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
- if (!map_x2apic_id(header, type, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_x2apic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
- if (!map_lsapic_id(header, type, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
- if (!map_gicc_mpidr(header, type, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
break;
}
entry += header->length;
@@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
if (!madt)
return PHYS_CPUID_INVALID;
- rv = map_madt_entry(madt, 1, acpi_id, true);
+ rv = map_madt_entry(madt, 1, acpi_id);
acpi_put_table((struct acpi_table_header *)madt);
return rv;
}
-static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
- bool ignore_disabled)
+static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
@@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
- map_lapic_id(header, acpi_id, &phys_id, ignore_disabled);
+ map_lapic_id(header, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
- map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+ map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
- map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+ map_x2apic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
- map_gicc_mpidr(header, type, acpi_id, &phys_id,
- ignore_disabled);
+ map_gicc_mpidr(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
return phys_id;
}
-static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type,
- u32 acpi_id, bool ignore_disabled)
+phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
phys_cpuid_t phys_id;
- phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled);
+ phys_id = map_mat_entry(handle, type, acpi_id);
if (invalid_phys_cpuid(phys_id))
- phys_id = map_madt_entry(get_madt_table(), type, acpi_id,
- ignore_disabled);
+ phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
return phys_id;
}
-phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
-{
- return __acpi_get_phys_id(handle, type, acpi_id, true);
-}
-
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
@@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static bool __init
-map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
-{
- int type, id;
- u32 acpi_id;
- acpi_status status;
- acpi_object_type acpi_type;
- unsigned long long tmp;
- union acpi_object object = { 0 };
- struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
- status = acpi_get_type(handle, &acpi_type);
- if (ACPI_FAILURE(status))
- return false;
-
- switch (acpi_type) {
- case ACPI_TYPE_PROCESSOR:
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = object.processor.proc_id;
-
- /* validate the acpi_id */
- if(acpi_processor_validate_proc_id(acpi_id))
- return false;
- break;
- case ACPI_TYPE_DEVICE:
- status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = tmp;
- break;
- default:
- return false;
- }
-
- type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
-
- *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
- id = acpi_map_cpuid(*phys_id, acpi_id);
-
- if (id < 0)
- return false;
- *cpuid = id;
- return true;
-}
-
-static acpi_status __init
-set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
- void **rv)
-{
- phys_cpuid_t phys_id;
- int cpu_id;
-
- if (!map_processor(handle, &phys_id, &cpu_id))
- return AE_ERROR;
-
- acpi_map_cpu2node(handle, cpu_id, phys_id);
- return AE_OK;
-}
-
-void __init acpi_set_processor_mapping(void)
-{
- /* Set persistent cpu <-> node mapping for all processors. */
- acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, set_processor_node_mapping,
- NULL, NULL, NULL);
-}
-#else
-void __init acpi_set_processor_mapping(void) {}
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
u64 *phys_addr, int *ioapic_id)
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 85d833289f28..4c96f3ac4976 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -177,7 +177,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
case AHCI_LS1043A:
if (!qpriv->ecc_addr)
return -EINVAL;
- writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
+ writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
+ qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
@@ -194,7 +195,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
case AHCI_LS1046A:
if (!qpriv->ecc_addr)
return -EINVAL;
- writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
+ writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
+ qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2bd92dca3e62..274d6d7193d7 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1482,7 +1482,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
break;
default:
- WARN_ON_ONCE(1);
return AC_ERR_SYSTEM;
}
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 46698232e6bf..19e6e539a061 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
static void ata_tport_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
device_initialize(dev);
dev->type = &ata_port_type;
- dev->parent = get_device(parent);
+ dev->parent = parent;
dev->release = ata_tport_release;
dev_set_name(dev, "ata%d", ap->print_id);
transport_setup_device(dev);
@@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
static void ata_tlink_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
int error;
device_initialize(dev);
- dev->parent = get_device(&ap->tdev);
+ dev->parent = &ap->tdev;
dev->release = ata_tlink_release;
if (ata_is_host_link(link))
dev_set_name(dev, "link%d", ap->print_id);
@@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
static void ata_tdev_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
int error;
device_initialize(dev);
- dev->parent = get_device(&link->tdev);
+ dev->parent = &link->tdev;
dev->release = ata_tdev_release;
if (ata_is_host_link(link))
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 684bda4d14a1..6bb60fb6a30b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
return restart_syscall();
}
-void assert_held_device_hotplug(void)
-{
- lockdep_assert_held(&device_hotplug_lock);
-}
-
#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 10aed84244f5..939641d6e262 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -50,7 +50,7 @@
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
- major You may use this parameter to overide the
+ major You may use this parameter to override the
default major number (46) that this driver
will use. Be sure to change the device
name as well.
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 644ba0888bd4..9cfd2e06a649 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -61,7 +61,7 @@
first drive found.
- major You may use this parameter to overide the
+ major You may use this parameter to override the
default major number (45) that this driver
will use. Be sure to change the device
name as well.
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ed93e8badf56..14c5d32f5d8b 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -59,7 +59,7 @@
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
- major You may use this parameter to overide the
+ major You may use this parameter to override the
default major number (47) that this driver
will use. Be sure to change the device
name as well.
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 5db955fe3a94..3b5882bfb736 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -84,7 +84,7 @@
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
- major You may use this parameter to overide the
+ major You may use this parameter to override the
default major number (97) that this driver
will use. Be sure to change the device
name as well.
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 61fc6824299a..e815312a00ad 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -61,7 +61,7 @@
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
- major You may use this parameter to overide the
+ major You may use this parameter to override the
default major number (96) that this driver
will use. Be sure to change the device
name as well.
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4d6807723798..517838b65964 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -120,10 +120,11 @@ static int atomic_dec_return_safe(atomic_t *v)
/* Feature bits */
-#define RBD_FEATURE_LAYERING (1<<0)
-#define RBD_FEATURE_STRIPINGV2 (1<<1)
-#define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2)
-#define RBD_FEATURE_DATA_POOL (1<<7)
+#define RBD_FEATURE_LAYERING (1ULL<<0)
+#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
+#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
+#define RBD_FEATURE_DATA_POOL (1ULL<<7)
+
#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
RBD_FEATURE_STRIPINGV2 | \
RBD_FEATURE_EXCLUSIVE_LOCK | \
@@ -499,16 +500,23 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
return is_lock_owner;
}
+static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
+{
+ return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
+}
+
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
+static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL);
static struct attribute *rbd_bus_attrs[] = {
&bus_attr_add.attr,
&bus_attr_remove.attr,
&bus_attr_add_single_major.attr,
&bus_attr_remove_single_major.attr,
+ &bus_attr_supported_features.attr,
NULL,
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e27d89a36c34..dceb5edd1e54 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1189,6 +1189,8 @@ static int zram_add(void)
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
+ zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
+ zram->disk->queue->limits.chunk_sectors = 0;
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
/*
* zram_bio_discard() will clear all logical blocks if logical block
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 3ad86fdf954e..b1ad12552b56 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
irq, err);
return err;
}
- omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
- priv->clk = of_clk_get(pdev->dev.of_node, 0);
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR(priv->clk)) {
@@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
dev_err(&pdev->dev, "unable to enable the clk, "
"err = %d\n", err);
}
+
+ /*
+ * On OMAP4, enabling the shutdown_oflo interrupt is
+ * done in the interrupt mask register. There is no
+ * such register on EIP76, and it's enabled by the
+ * same bit in the control register
+ */
+ if (priv->pdata->regs[RNG_INTMASK_REG])
+ omap_rng_write(priv, RNG_INTMASK_REG,
+ RNG_SHUTDOWN_OFLO_MASK);
+ else
+ omap_rng_write(priv, RNG_CONTROL_REG,
+ RNG_SHUTDOWN_OFLO_MASK);
}
return 0;
}
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index a5b1eb276c0b..e6d0d271c58c 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/timer.h>
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1ef26403bcc8..0ab024918907 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -313,13 +313,6 @@ static int random_read_wakeup_bits = 64;
static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
/*
- * The minimum number of seconds between urandom pool reseeding. We
- * do this to limit the amount of entropy that can be drained from the
- * input pool even if there are heavy demands on /dev/urandom.
- */
-static int random_min_urandom_seed = 60;
-
-/*
* Originally, we used a primitive polynomial of degree .poolwords
* over GF(2). The taps for various sizes are defined below. They
* were chosen to be evenly spaced except for the last tap, which is 1
@@ -409,7 +402,6 @@ static struct poolinfo {
*/
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
-static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
static struct fasync_struct *fasync;
static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -467,7 +459,6 @@ struct entropy_store {
int entropy_count;
int entropy_total;
unsigned int initialized:1;
- unsigned int limit:1;
unsigned int last_data_init:1;
__u8 last_data[EXTRACT_SIZE];
};
@@ -485,7 +476,6 @@ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
.name = "input",
- .limit = 1,
.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
.pool = input_pool_data
};
@@ -493,7 +483,6 @@ static struct entropy_store input_pool = {
static struct entropy_store blocking_pool = {
.poolinfo = &poolinfo_table[1],
.name = "blocking",
- .limit = 1,
.pull = &input_pool,
.lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
.pool = blocking_pool_data,
@@ -855,13 +844,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
spin_unlock_irqrestore(&primary_crng.lock, flags);
}
-static inline void maybe_reseed_primary_crng(void)
-{
- if (crng_init > 2 &&
- time_after(jiffies, primary_crng.init_time + CRNG_RESEED_INTERVAL))
- crng_reseed(&primary_crng, &input_pool);
-}
-
static inline void crng_wait_ready(void)
{
wait_event_interruptible(crng_init_wait, crng_ready());
@@ -1220,15 +1202,6 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
r->entropy_count > r->poolinfo->poolfracbits)
return;
- if (r->limit == 0 && random_min_urandom_seed) {
- unsigned long now = jiffies;
-
- if (time_before(now,
- r->last_pulled + random_min_urandom_seed * HZ))
- return;
- r->last_pulled = now;
- }
-
_xfer_secondary_pool(r, nbytes);
}
@@ -1236,8 +1209,6 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
__u32 tmp[OUTPUT_POOL_WORDS];
- /* For /dev/random's pool, always leave two wakeups' worth */
- int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
int bytes = nbytes;
/* pull at least as much as a wakeup */
@@ -1248,7 +1219,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
bytes = extract_entropy(r->pull, tmp, bytes,
- random_read_wakeup_bits / 8, rsvd_bytes);
+ random_read_wakeup_bits / 8, 0);
mix_pool_bytes(r, tmp, bytes);
credit_entropy_bits(r, bytes*8);
}
@@ -1276,7 +1247,7 @@ static void push_to_pool(struct work_struct *work)
static size_t account(struct entropy_store *r, size_t nbytes, int min,
int reserved)
{
- int entropy_count, orig;
+ int entropy_count, orig, have_bytes;
size_t ibytes, nfrac;
BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
@@ -1285,14 +1256,12 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
retry:
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
ibytes = nbytes;
- /* If limited, never pull more than available */
- if (r->limit) {
- int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
+ /* never pull more than available */
+ have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
- if ((have_bytes -= reserved) < 0)
- have_bytes = 0;
- ibytes = min_t(size_t, ibytes, have_bytes);
- }
+ if ((have_bytes -= reserved) < 0)
+ have_bytes = 0;
+ ibytes = min_t(size_t, ibytes, have_bytes);
if (ibytes < min)
ibytes = 0;
@@ -1912,6 +1881,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
static int min_read_thresh = 8, min_write_thresh;
static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
static int max_write_thresh = INPUT_POOL_WORDS * 32;
+static int random_min_urandom_seed = 60;
static char sysctl_bootid[16];
/*
@@ -2042,63 +2012,64 @@ struct ctl_table random_table[] = {
};
#endif /* CONFIG_SYSCTL */
-static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
-
-int random_int_secret_init(void)
-{
- get_random_bytes(random_int_secret, sizeof(random_int_secret));
- return 0;
-}
-
-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
- __aligned(sizeof(unsigned long));
+struct batched_entropy {
+ union {
+ u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
+ u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
+ };
+ unsigned int position;
+};
/*
- * Get a random word for internal kernel use only. Similar to urandom but
- * with the goal of minimal entropy pool depletion. As a result, the random
- * value is not cryptographically secure but for several uses the cost of
- * depleting entropy is too high
+ * Get a random word for internal kernel use only. The quality of the random
+ * number is either as good as RDRAND or as good as /dev/urandom, with the
+ * goal of being quite fast and not depleting entropy.
*/
-unsigned int get_random_int(void)
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+u64 get_random_u64(void)
{
- __u32 *hash;
- unsigned int ret;
+ u64 ret;
+ struct batched_entropy *batch;
- if (arch_get_random_int(&ret))
+#if BITS_PER_LONG == 64
+ if (arch_get_random_long((unsigned long *)&ret))
return ret;
+#else
+ if (arch_get_random_long((unsigned long *)&ret) &&
+ arch_get_random_long((unsigned long *)&ret + 1))
+ return ret;
+#endif
- hash = get_cpu_var(get_random_int_hash);
-
- hash[0] += current->pid + jiffies + random_get_entropy();
- md5_transform(hash, random_int_secret);
- ret = hash[0];
- put_cpu_var(get_random_int_hash);
-
+ batch = &get_cpu_var(batched_entropy_u64);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ extract_crng((u8 *)batch->entropy_u64);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u64[batch->position++];
+ put_cpu_var(batched_entropy_u64);
return ret;
}
-EXPORT_SYMBOL(get_random_int);
+EXPORT_SYMBOL(get_random_u64);
-/*
- * Same as get_random_int(), but returns unsigned long.
- */
-unsigned long get_random_long(void)
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+u32 get_random_u32(void)
{
- __u32 *hash;
- unsigned long ret;
+ u32 ret;
+ struct batched_entropy *batch;
- if (arch_get_random_long(&ret))
+ if (arch_get_random_int(&ret))
return ret;
- hash = get_cpu_var(get_random_int_hash);
-
- hash[0] += current->pid + jiffies + random_get_entropy();
- md5_transform(hash, random_int_secret);
- ret = *(unsigned long *)hash;
- put_cpu_var(get_random_int_hash);
-
+ batch = &get_cpu_var(batched_entropy_u32);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ extract_crng((u8 *)batch->entropy_u32);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u32[batch->position++];
+ put_cpu_var(batched_entropy_u32);
return ret;
}
-EXPORT_SYMBOL(get_random_long);
+EXPORT_SYMBOL(get_random_u32);
/**
* randomize_page - Generate a random, page aligned address
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 745844ee973e..d4ca9962a759 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -10,7 +10,6 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/atmel_tc.h>
-#include <linux/sched_clock.h>
/*
@@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
return (upper << 16) | lower;
}
-static u32 tc_get_cv32(void)
-{
- return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
-}
-
static u64 tc_get_cycles32(struct clocksource *cs)
{
- return tc_get_cv32();
+ return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
}
static struct clocksource clksrc = {
@@ -75,11 +69,6 @@ static struct clocksource clksrc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static u64 notrace tc_read_sched_clock(void)
-{
- return tc_get_cv32();
-}
-
#ifdef CONFIG_GENERIC_CLOCKEVENTS
struct tc_clkevt_device {
@@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void)
clksrc.read = tc_get_cycles32;
/* setup ony channel 0 */
tcb_setup_single_chan(tc, best_divisor_idx);
-
- /* register sched_clock on chips with single 32 bit counter */
- sched_clock_register(tc_read_sched_clock, 32, divided_rate);
} else {
/* tclib will give us three clocks no matter what the
* underlying platform supports.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a47543281864..b8ff617d449d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy);
- if (!cur_freq)
- return sprintf(buf, "<unknown>");
- return sprintf(buf, "%u\n", cur_freq);
+
+ if (cur_freq)
+ return sprintf(buf, "%u\n", cur_freq);
+
+ return sprintf(buf, "<unknown>\n");
}
/**
@@ -2532,4 +2534,5 @@ static int __init cpufreq_core_init(void)
return 0;
}
+module_param(off, int, 0444);
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b1fbaa30ae04..08e134ffba68 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
return div64_u64(x << EXT_FRAC_BITS, y);
}
+static inline int32_t percent_ext_fp(int percent)
+{
+ return div_ext_fp(percent, 100);
+}
+
/**
* struct sample - Store performance sample
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
@@ -377,6 +382,7 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
intel_pstate_init_limits(limits);
limits->min_perf_pct = 100;
limits->min_perf = int_ext_tofp(1);
+ limits->min_sysfs_pct = 100;
}
static DEFINE_MUTEX(intel_pstate_driver_lock);
@@ -844,12 +850,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
{
- int min, hw_min, max, hw_max, cpu, range, adj_range;
+ int min, hw_min, max, hw_max, cpu;
struct perf_limits *perf_limits = limits;
u64 value, cap;
for_each_cpu(cpu, policy->cpus) {
- int max_perf_pct, min_perf_pct;
struct cpudata *cpu_data = all_cpu_data[cpu];
s16 epp;
@@ -862,20 +867,15 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
hw_max = HWP_GUARANTEED_PERF(cap);
else
hw_max = HWP_HIGHEST_PERF(cap);
- range = hw_max - hw_min;
- max_perf_pct = perf_limits->max_perf_pct;
- min_perf_pct = perf_limits->min_perf_pct;
+ min = fp_ext_toint(hw_max * perf_limits->min_perf);
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
- adj_range = min_perf_pct * range / 100;
- min = hw_min + adj_range;
+
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
- adj_range = max_perf_pct * range / 100;
- max = hw_min + adj_range;
-
+ max = fp_ext_toint(hw_max * perf_limits->max_perf);
value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max);
@@ -968,17 +968,27 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
}
static void intel_pstate_update_policies(void)
+ __releases(&intel_pstate_limits_lock)
+ __acquires(&intel_pstate_limits_lock)
{
+ struct perf_limits *saved_limits = limits;
int cpu;
+ mutex_unlock(&intel_pstate_limits_lock);
+
for_each_possible_cpu(cpu)
cpufreq_update_policy(cpu);
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ limits = saved_limits;
}
/************************** debugfs begin ************************/
static int pid_param_set(void *data, u64 val)
{
*(u32 *)data = val;
+ pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
intel_pstate_reset_all_pid();
return 0;
}
@@ -1180,10 +1190,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
limits->no_turbo = clamp_t(int, input, 0, 1);
- mutex_unlock(&intel_pstate_limits_lock);
-
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_limits_lock);
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1215,12 +1225,12 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf_pct = max(limits->min_perf_pct,
limits->max_perf_pct);
- limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
-
- mutex_unlock(&intel_pstate_limits_lock);
+ limits->max_perf = percent_ext_fp(limits->max_perf_pct);
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_limits_lock);
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1252,12 +1262,12 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf_pct = min(limits->max_perf_pct,
limits->min_perf_pct);
- limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
-
- mutex_unlock(&intel_pstate_limits_lock);
+ limits->min_perf = percent_ext_fp(limits->min_perf_pct);
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_limits_lock);
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1874,13 +1884,11 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
pstate = clamp_t(int, pstate, min_perf, max_perf);
- trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
return pstate;
}
static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
{
- pstate = intel_pstate_prepare_request(cpu, pstate);
if (pstate == cpu->pstate.current_pstate)
return;
@@ -1900,6 +1908,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
update_turbo_state();
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
intel_pstate_update_pstate(cpu, target_pstate);
sample = &cpu->sample;
@@ -2070,36 +2080,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
struct perf_limits *limits)
{
+ int32_t max_policy_perf, min_policy_perf;
- limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
- policy->cpuinfo.max_freq);
- limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
+ max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
+ max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
if (policy->max == policy->min) {
- limits->min_policy_pct = limits->max_policy_pct;
+ min_policy_perf = max_policy_perf;
} else {
- limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
- policy->cpuinfo.max_freq);
- limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
- 0, 100);
+ min_policy_perf = div_ext_fp(policy->min,
+ policy->cpuinfo.max_freq);
+ min_policy_perf = clamp_t(int32_t, min_policy_perf,
+ 0, max_policy_perf);
}
- /* Normalize user input to [min_policy_pct, max_policy_pct] */
- limits->min_perf_pct = max(limits->min_policy_pct,
- limits->min_sysfs_pct);
- limits->min_perf_pct = min(limits->max_policy_pct,
- limits->min_perf_pct);
- limits->max_perf_pct = min(limits->max_policy_pct,
- limits->max_sysfs_pct);
- limits->max_perf_pct = max(limits->min_policy_pct,
- limits->max_perf_pct);
+ /* Normalize user input to [min_perf, max_perf] */
+ limits->min_perf = max(min_policy_perf,
+ percent_ext_fp(limits->min_sysfs_pct));
+ limits->min_perf = min(limits->min_perf, max_policy_perf);
+ limits->max_perf = min(max_policy_perf,
+ percent_ext_fp(limits->max_sysfs_pct));
+ limits->max_perf = max(min_policy_perf, limits->max_perf);
- /* Make sure min_perf_pct <= max_perf_pct */
- limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+ /* Make sure min_perf <= max_perf */
+ limits->min_perf = min(limits->min_perf, limits->max_perf);
- limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
- limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
+ limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
+ limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
limits->max_perf_pct, limits->min_perf_pct);
@@ -2132,16 +2140,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
mutex_lock(&intel_pstate_limits_lock);
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("set performance\n");
if (!perf_limits) {
limits = &performance_limits;
perf_limits = limits;
}
- if (policy->max >= policy->cpuinfo.max_freq &&
- !limits->no_turbo) {
- pr_debug("set performance\n");
- intel_pstate_set_performance_limits(perf_limits);
- goto out;
- }
} else {
pr_debug("set powersave\n");
if (!perf_limits) {
@@ -2152,7 +2155,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
}
intel_pstate_update_perf_limits(policy, perf_limits);
- out:
+
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
/*
* NOHZ_FULL CPUs need this as the governor callback may not
@@ -2198,9 +2201,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
unsigned int max_freq, min_freq;
max_freq = policy->cpuinfo.max_freq *
- limits->max_sysfs_pct / 100;
+ perf_limits->max_sysfs_pct / 100;
min_freq = policy->cpuinfo.max_freq *
- limits->min_sysfs_pct / 100;
+ perf_limits->min_sysfs_pct / 100;
cpufreq_verify_within_limits(policy, min_freq, max_freq);
}
@@ -2243,13 +2246,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
- /*
- * We need sane value in the cpu->perf_limits, so inherit from global
- * perf_limits limits, which are seeded with values based on the
- * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
- */
if (per_cpu_limits)
- memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));
+ intel_pstate_init_limits(cpu->perf_limits);
policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -2301,7 +2299,6 @@ static struct cpufreq_driver intel_pstate = {
static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
- struct perf_limits *perf_limits = limits;
update_turbo_state();
policy->cpuinfo.max_freq = limits->turbo_disabled ?
@@ -2309,15 +2306,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
cpufreq_verify_within_cpu_limits(policy);
- if (per_cpu_limits)
- perf_limits = cpu->perf_limits;
-
- mutex_lock(&intel_pstate_limits_lock);
-
- intel_pstate_update_perf_limits(policy, perf_limits);
-
- mutex_unlock(&intel_pstate_limits_lock);
-
return 0;
}
@@ -2370,6 +2358,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
+ freqs.new = target_pstate * cpu->pstate.scaling;
cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
@@ -2383,8 +2372,9 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
intel_pstate_update_pstate(cpu, target_pstate);
- return target_freq;
+ return target_pstate * cpu->pstate.scaling;
}
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -2437,8 +2427,11 @@ static int intel_pstate_register_driver(void)
intel_pstate_init_limits(&powersave_limits);
intel_pstate_set_performance_limits(&performance_limits);
- limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ?
- &performance_limits : &powersave_limits;
+ if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
+ intel_pstate_driver == &intel_pstate)
+ limits = &performance_limits;
+ else
+ limits = &powersave_limits;
ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) {
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index dce1af0ce85c..1b9da3dc799b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
scatterwalk_done(&walk, out, 0);
}
-static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+static void s5p_sg_done(struct s5p_aes_dev *dev)
{
if (dev->sg_dst_cpy) {
dev_dbg(dev->dev,
@@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
}
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+}
- /* holding a lock outside */
+/* Calls the completion. Cannot be called with dev->lock hold. */
+static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+{
dev->req->base.complete(&dev->req->base, err);
dev->busy = false;
}
@@ -368,51 +371,44 @@ exit:
}
/*
- * Returns true if new transmitting (output) data is ready and its
- * address+length have to be written to device (by calling
- * s5p_set_dma_outdata()). False otherwise.
+ * Returns -ERRNO on error (mapping of new data failed).
+ * On success returns:
+ * - 0 if there is no more data,
+ * - 1 if new transmitting (output) data is ready and its address+length
+ * have to be written to device (by calling s5p_set_dma_outdata()).
*/
-static bool s5p_aes_tx(struct s5p_aes_dev *dev)
+static int s5p_aes_tx(struct s5p_aes_dev *dev)
{
- int err = 0;
- bool ret = false;
+ int ret = 0;
s5p_unset_outdata(dev);
if (!sg_is_last(dev->sg_dst)) {
- err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
- if (err)
- s5p_aes_complete(dev, err);
- else
- ret = true;
- } else {
- s5p_aes_complete(dev, err);
-
- dev->busy = true;
- tasklet_schedule(&dev->tasklet);
+ ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
+ if (!ret)
+ ret = 1;
}
return ret;
}
/*
- * Returns true if new receiving (input) data is ready and its
- * address+length have to be written to device (by calling
- * s5p_set_dma_indata()). False otherwise.
+ * Returns -ERRNO on error (mapping of new data failed).
+ * On success returns:
+ * - 0 if there is no more data,
+ * - 1 if new receiving (input) data is ready and its address+length
+ * have to be written to device (by calling s5p_set_dma_indata()).
*/
-static bool s5p_aes_rx(struct s5p_aes_dev *dev)
+static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
{
- int err;
- bool ret = false;
+ int ret = 0;
s5p_unset_indata(dev);
if (!sg_is_last(dev->sg_src)) {
- err = s5p_set_indata(dev, sg_next(dev->sg_src));
- if (err)
- s5p_aes_complete(dev, err);
- else
- ret = true;
+ ret = s5p_set_indata(dev, sg_next(dev->sg_src));
+ if (!ret)
+ ret = 1;
}
return ret;
@@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
- bool set_dma_tx = false;
- bool set_dma_rx = false;
+ int err_dma_tx = 0;
+ int err_dma_rx = 0;
+ bool tx_end = false;
unsigned long flags;
uint32_t status;
+ int err;
spin_lock_irqsave(&dev->lock, flags);
+ /*
+ * Handle rx or tx interrupt. If there is still data (scatterlist did not
+ * reach end), then map next scatterlist entry.
+ * In case of such mapping error, s5p_aes_complete() should be called.
+ *
+ * If there is no more data in tx scatter list, call s5p_aes_complete()
+ * and schedule new tasklet.
+ */
status = SSS_READ(dev, FCINTSTAT);
if (status & SSS_FCINTSTAT_BRDMAINT)
- set_dma_rx = s5p_aes_rx(dev);
- if (status & SSS_FCINTSTAT_BTDMAINT)
- set_dma_tx = s5p_aes_tx(dev);
+ err_dma_rx = s5p_aes_rx(dev);
+
+ if (status & SSS_FCINTSTAT_BTDMAINT) {
+ if (sg_is_last(dev->sg_dst))
+ tx_end = true;
+ err_dma_tx = s5p_aes_tx(dev);
+ }
SSS_WRITE(dev, FCINTPEND, status);
- /*
- * Writing length of DMA block (either receiving or transmitting)
- * will start the operation immediately, so this should be done
- * at the end (even after clearing pending interrupts to not miss the
- * interrupt).
- */
- if (set_dma_tx)
- s5p_set_dma_outdata(dev, dev->sg_dst);
- if (set_dma_rx)
- s5p_set_dma_indata(dev, dev->sg_src);
+ if (err_dma_rx < 0) {
+ err = err_dma_rx;
+ goto error;
+ }
+ if (err_dma_tx < 0) {
+ err = err_dma_tx;
+ goto error;
+ }
+
+ if (tx_end) {
+ s5p_sg_done(dev);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ s5p_aes_complete(dev, 0);
+ dev->busy = true;
+ tasklet_schedule(&dev->tasklet);
+ } else {
+ /*
+ * Writing length of DMA block (either receiving or
+ * transmitting) will start the operation immediately, so this
+ * should be done at the end (even after clearing pending
+ * interrupts to not miss the interrupt).
+ */
+ if (err_dma_tx == 1)
+ s5p_set_dma_outdata(dev, dev->sg_dst);
+ if (err_dma_rx == 1)
+ s5p_set_dma_indata(dev, dev->sg_src);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+
+ return IRQ_HANDLED;
+
+error:
+ s5p_sg_done(dev);
spin_unlock_irqrestore(&dev->lock, flags);
+ s5p_aes_complete(dev, err);
return IRQ_HANDLED;
}
@@ -597,8 +633,9 @@ outdata_error:
s5p_unset_indata(dev);
indata_error:
- s5p_aes_complete(dev, err);
+ s5p_sg_done(dev);
spin_unlock_irqrestore(&dev->lock, flags);
+ s5p_aes_complete(dev, err);
}
static void s5p_tasklet_cb(unsigned long data)
@@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
dev_warn(dev, "feed control interrupt is not available.\n");
goto err_irq;
}
- err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
- IRQF_SHARED, pdev->name, pdev);
+ err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
+ s5p_aes_interrupt, IRQF_ONESHOT,
+ pdev->name, pdev);
if (err < 0) {
dev_warn(dev, "feed control interrupt is not available.\n");
goto err_irq;
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index 43a0c8a26ab0..00a16ab601cb 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -82,7 +82,7 @@ void cryp_activity(struct cryp_device_data *device_data,
void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
{
/*
- * We always need to disble the hardware before trying to flush the
+ * We always need to disable the hardware before trying to flush the
* FIFO. This is something that isn't written in the design
* specification, but we have been informed by the hardware designers
* that this must be done.
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 8d9829ff2a78..80c6db279ae1 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
int rc = VM_FAULT_SIGBUS;
phys_addr_t phys;
pfn_t pfn;
+ unsigned int fault_size = PAGE_SIZE;
if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
@@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (fault_size != dax_region->align)
+ return VM_FAULT_SIGBUS;
+
phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
if (phys == -1) {
- dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
vmf->pgoff);
return VM_FAULT_SIGBUS;
}
@@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
phys_addr_t phys;
pgoff_t pgoff;
pfn_t pfn;
+ unsigned int fault_size = PMD_SIZE;
if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
@@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (fault_size < dax_region->align)
+ return VM_FAULT_SIGBUS;
+ else if (fault_size > dax_region->align)
+ return VM_FAULT_FALLBACK;
+
+ /* if we are outside of the VMA */
+ if (pmd_addr < vmf->vma->vm_start ||
+ (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
pgoff = linear_page_index(vmf->vma, pmd_addr);
phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
if (phys == -1) {
- dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
pgoff);
return VM_FAULT_SIGBUS;
}
@@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
phys_addr_t phys;
pgoff_t pgoff;
pfn_t pfn;
+ unsigned int fault_size = PUD_SIZE;
+
if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
@@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (fault_size < dax_region->align)
+ return VM_FAULT_SIGBUS;
+ else if (fault_size > dax_region->align)
+ return VM_FAULT_FALLBACK;
+
+ /* if we are outside of the VMA */
+ if (pud_addr < vmf->vma->vm_start ||
+ (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
pgoff = linear_page_index(vmf->vma, pud_addr);
phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
if (phys == -1) {
- dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
pgoff);
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 349dc3e1e52e..974c5a31a005 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -65,6 +65,7 @@ static bool __init efi_virtmap_init(void)
bool systab_found;
efi_mm.pgd = pgd_alloc(&efi_mm);
+ mm_init_cpumask(&efi_mm);
init_new_context(NULL, &efi_mm);
systab_found = false;
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 6def402bf569..5da36e56b36a 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -45,6 +45,8 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
size = sizeof(secboot);
status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
NULL, &size, &secboot);
+ if (status == EFI_NOT_FOUND)
+ return efi_secureboot_mode_disabled;
if (status != EFI_SUCCESS)
goto out_efi_err;
@@ -78,7 +80,5 @@ secure_boot_enabled:
out_efi_err:
pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
- if (status == EFI_NOT_FOUND)
- return efi_secureboot_mode_disabled;
return efi_secureboot_mode_unknown;
}
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8363cb57915b..8a08e81ee90d 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -3,6 +3,4 @@
# of AMDSOC/AMDGPU drm driver.
# It provides the HW control for ACP related functionalities.
-subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
-
AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d2d0f60ff36d..99424cb8020b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -240,6 +240,8 @@ free_partial_kdata:
for (; i >= 0; i--)
drm_free_large(p->chunks[i].kdata);
kfree(p->chunks);
+ p->chunks = NULL;
+ p->nchunks = 0;
put_ctx:
amdgpu_ctx_put(p->ctx);
free_chunk:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6abb238b25c9..a3a105ec99e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2094,8 +2094,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
r = amdgpu_late_init(adev);
- if (r)
+ if (r) {
+ if (fbcon)
+ console_unlock();
return r;
+ }
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -2587,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
use_bank = 0;
}
- *pos &= 0x3FFFF;
+ *pos &= (1UL << 22) - 1;
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
@@ -2663,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
use_bank = 0;
}
- *pos &= 0x3FFFF;
+ *pos &= (1UL << 22) - 1;
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 75fc376ba735..f7adbace428a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -59,9 +59,10 @@
* - 3.7.0 - Add support for VCE clock list packet
* - 3.8.0 - Add support raster config init in the kernel
* - 3.9.0 - Add support for memory query info about VRAM and GTT.
+ * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 9
+#define KMS_DRIVER_MINOR 10
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 51d759463384..106cf83c2e6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -202,6 +202,27 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
bool kernel = false;
int r;
+ /* reject invalid gem flags */
+ if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED|
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+ r = -EINVAL;
+ goto error_unlock;
+ }
+ /* reject invalid gem domains */
+ if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
+ AMDGPU_GEM_DOMAIN_GTT |
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GDS |
+ AMDGPU_GEM_DOMAIN_GWS |
+ AMDGPU_GEM_DOMAIN_OA)) {
+ r = -EINVAL;
+ goto error_unlock;
+ }
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 31375bdde6f1..011800f621c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -788,7 +788,7 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
}
}
- /* disble sdma engine before programing it */
+ /* disable sdma engine before programing it */
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index f55e45b52fbc..33b504bafb88 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3464,6 +3464,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6667)) {
max_sclk = 75000;
}
+ } else if (adev->asic_type == CHIP_OLAND) {
+ if ((adev->pdev->device == 0x6604) &&
+ (adev->pdev->subsystem_vendor == 0x1028) &&
+ (adev->pdev->subsystem_device == 0x066F)) {
+ max_sclk = 75000;
+ }
}
if (rps->vce_active) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 50bdb24ef8d6..4a785d6acfb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
/* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0;
if (adev->rev_id != 0x00) {
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ adev->pg_flags |=
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_CP |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 8cf71f3c6d0e..261b828ad590 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
if (bgate) {
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ AMD_PG_STATE_GATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index bad4d80cb711..f9d665550d3e 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
clk_prepare_enable(hwdev->pxlclk);
- /* mclk needs to be set to the same or higher rate than pxlclk */
- clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+ /* We rely on firmware to set mclk to a sensible level. */
clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
hwdev->modeset(hwdev, &vm);
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 488aedf5b58d..9f5513006eee 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
- { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 },
+ { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
};
#define MALIDP_DE_DEFAULT_PREFETCH_START 5
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 414aada10fe5..d5aec082294c 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -37,6 +37,8 @@
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
#define MALIDP_LAYER_COMP_SIZE 0x010
#define MALIDP_LAYER_OFFSET 0x014
+#define MALIDP550_LS_ENABLE 0x01c
+#define MALIDP550_LS_R1_IN_SIZE 0x020
/*
* This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
LAYER_V_VAL(plane->state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
+ if (mp->layer->id == DE_SMART)
+ malidp_hw_write(mp->hwdev,
+ LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
+ mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
+
/* first clear the rotation bits */
val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
val &= ~LAYER_ROT_MASK;
@@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
plane->hwdev = malidp->dev;
plane->layer = &map->layers[i];
- /* Skip the features which the SMART layer doesn't have */
- if (id == DE_SMART)
+ if (id == DE_SMART) {
+ /*
+ * Enable the first rectangle in the SMART layer to be
+ * able to use it as a drm plane.
+ */
+ malidp_hw_write(malidp->dev, 1,
+ plane->layer->base + MALIDP550_LS_ENABLE);
+ /* Skip the features which the SMART layer doesn't have. */
continue;
+ }
drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index aff6d4a84e99..b816067a65c5 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -84,6 +84,7 @@
/* Stride register offsets relative to Lx_BASE */
#define MALIDP_DE_LG_STRIDE 0x18
#define MALIDP_DE_LV_STRIDE0 0x18
+#define MALIDP550_DE_LS_R1_STRIDE 0x28
/* macros to set values into registers */
#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 99144f879a4f..fad3d44e4642 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -149,6 +149,9 @@ static const struct edid_quirk {
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+
+ /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
+ { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
};
/*
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 4a6a2ed65732..b7d7721e72fa 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -41,6 +41,54 @@ enum {
INTEL_GVT_PCI_BAR_MAX,
};
+/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
+ * byte) byte by byte in standard pci configuration space. (not the full
+ * 256 bytes.)
+ */
+static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
+ [PCI_COMMAND] = 0xff, 0x07,
+ [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
+ [PCI_CACHE_LINE_SIZE] = 0xff,
+ [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
+ [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
+ [PCI_INTERRUPT_LINE] = 0xff,
+};
+
+/**
+ * vgpu_pci_cfg_mem_write - write virtual cfg space memory
+ *
+ * Use this function to write virtual cfg space memory.
+ * For standard cfg space, only RW bits can be changed,
+ * and we emulates the RW1C behavior of PCI_STATUS register.
+ */
+static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
+ u8 *src, unsigned int bytes)
+{
+ u8 *cfg_base = vgpu_cfg_space(vgpu);
+ u8 mask, new, old;
+ int i = 0;
+
+ for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
+ mask = pci_cfg_space_rw_bmp[off + i];
+ old = cfg_base[off + i];
+ new = src[i] & mask;
+
+ /**
+ * The PCI_STATUS high byte has RW1C bits, here
+ * emulates clear by writing 1 for these bits.
+ * Writing a 0b to RW1C bits has no effect.
+ */
+ if (off + i == PCI_STATUS + 1)
+ new = (~new & old) & mask;
+
+ cfg_base[off + i] = (old & ~mask) | new;
+ }
+
+ /* For other configuration space directly copy as it is. */
+ if (i < bytes)
+ memcpy(cfg_base + off + i, src + i, bytes - i);
+}
+
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
*
@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 changed = old ^ new;
int ret;
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
@@ -237,6 +285,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
{
int ret;
+ if (vgpu->failsafe)
+ return 0;
+
if (WARN_ON(bytes > 4))
return -EINVAL;
@@ -274,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (ret)
return ret;
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break;
default:
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break;
}
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 81076d8ec41a..da6bbca90d97 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
if (d_info == NULL)
return;
- gvt_err("opcode=0x%x %s sub_ops:",
+ gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
cmd >> (32 - d_info->op_len), d_info->name);
for (i = 0; i < d_info->nr_sub_op; i++)
@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0;
int i;
- gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
+ gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
s->ring_id, s->ring_start, s->ring_start + s->ring_size,
s->ring_head, s->ring_tail);
- gvt_err(" %s %s ip_gma(%08lx) ",
+ gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
"RING_BUFFER" : "BATCH_BUFFER",
s->buf_addr_type == GTT_BUFFER ?
"GTT" : "PPGTT", s->ip_gma);
if (s->ip_va == NULL) {
- gvt_err(" ip_va(NULL)");
+ gvt_dbg_cmd(" ip_va(NULL)");
return;
}
- gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6d8fde880c39..5419ae6ec633 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
return 0;
}
+static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
+ {
+/* EDID with 1024x768 as its resolution */
+ /*Header*/
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ /* Vendor & Product Identification */
+ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+ /* Version & Revision */
+ 0x01, 0x04,
+ /* Basic Display Parameters & Features */
+ 0xa5, 0x34, 0x20, 0x78, 0x23,
+ /* Color Characteristics */
+ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+ /* Established Timings: maximum resolution is 1024x768 */
+ 0x21, 0x08, 0x00,
+ /* Standard Timings. All invalid */
+ 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
+ /* 18 Byte Data Blocks 1: invalid */
+ 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+ /* 18 Byte Data Blocks 2: invalid */
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ /* 18 Byte Data Blocks 3: invalid */
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+ /* 18 Byte Data Blocks 4: invalid */
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+ 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+ /* Extension Block Count */
+ 0x00,
+ /* Checksum */
+ 0xef,
+ },
+ {
/* EDID with 1920x1200 as its resolution */
-static unsigned char virtual_dp_monitor_edid[] = {
- /*Header*/
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
- /* Vendor & Product Identification */
- 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
- /* Version & Revision */
- 0x01, 0x04,
- /* Basic Display Parameters & Features */
- 0xa5, 0x34, 0x20, 0x78, 0x23,
- /* Color Characteristics */
- 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
- /* Established Timings: maximum resolution is 1024x768 */
- 0x21, 0x08, 0x00,
- /*
- * Standard Timings.
- * below new resolutions can be supported:
- * 1920x1080, 1280x720, 1280x960, 1280x1024,
- * 1440x900, 1600x1200, 1680x1050
- */
- 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
- 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
- /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
- 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
- 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
- /* 18 Byte Data Blocks 2: invalid */
- 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- /* 18 Byte Data Blocks 3: invalid */
- 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
- 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
- /* 18 Byte Data Blocks 4: invalid */
- 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
- 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
- /* Extension Block Count */
- 0x00,
- /* Checksum */
- 0x45,
+ /*Header*/
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ /* Vendor & Product Identification */
+ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+ /* Version & Revision */
+ 0x01, 0x04,
+ /* Basic Display Parameters & Features */
+ 0xa5, 0x34, 0x20, 0x78, 0x23,
+ /* Color Characteristics */
+ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+ /* Established Timings: maximum resolution is 1024x768 */
+ 0x21, 0x08, 0x00,
+ /*
+ * Standard Timings.
+ * below new resolutions can be supported:
+ * 1920x1080, 1280x720, 1280x960, 1280x1024,
+ * 1440x900, 1600x1200, 1680x1050
+ */
+ 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
+ 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
+ /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
+ 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+ /* 18 Byte Data Blocks 2: invalid */
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ /* 18 Byte Data Blocks 3: invalid */
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+ /* 18 Byte Data Blocks 4: invalid */
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+ 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+ /* Extension Block Count */
+ 0x00,
+ /* Checksum */
+ 0x45,
+ },
};
#define DPCD_HEADER_SIZE 0xb
@@ -140,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
+ }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
+ }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
+ }
if (IS_SKYLAKE(dev_priv) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
@@ -160,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_PORT_DP_A_HOTPLUG;
else
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
}
}
@@ -175,10 +219,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
}
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
- int type)
+ int type, unsigned int resolution)
{
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+ if (WARN_ON(resolution >= GVT_EDID_NUM))
+ return -EINVAL;
+
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
if (!port->edid)
return -ENOMEM;
@@ -189,7 +236,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
return -ENOMEM;
}
- memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
+ memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution],
EDID_SIZE);
port->edid->data_valid = true;
@@ -322,16 +369,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
* Zero on success, negative error code if failed.
*
*/
-int intel_vgpu_init_display(struct intel_vgpu *vgpu)
+int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv))
- return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
+ return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
+ resolution);
else
- return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
+ return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B,
+ resolution);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index 8b234ea961f6..d73de22102e2 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -154,10 +154,28 @@ struct intel_vgpu_port {
int type;
};
+enum intel_vgpu_edid {
+ GVT_EDID_1024_768,
+ GVT_EDID_1920_1200,
+ GVT_EDID_NUM,
+};
+
+static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
+{
+ switch (id) {
+ case GVT_EDID_1024_768:
+ return "1024x768";
+ case GVT_EDID_1920_1200:
+ return "1920x1200";
+ default:
+ return "";
+ }
+}
+
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
-int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 1cb29b2d7dc6..933a7c211a1c 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
- firmware = vmalloc(size);
+ firmware = vzalloc(size);
if (!firmware)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 28c92346db0e..6a5ff23ded90 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gma = g_gtt_index << GTT_PAGE_SHIFT;
/* the VM may configure the whole GM space when ballooning is used */
- if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
- "vgpu%d: found oob ggtt write, offset %x\n",
- vgpu->id, off)) {
+ if (!vgpu_gmadr_is_valid(vgpu, gma))
return 0;
- }
ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
@@ -2015,6 +2012,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
return create_scratch_page_tree(vgpu);
}
+static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_mm *mm;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ if (mm->type == type) {
+ vgpu->gvt->gtt.mm_free_page_table(mm);
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+ kfree(mm);
+ }
+ }
+}
+
/**
* intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
* @vgpu: a vGPU
@@ -2027,19 +2040,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
*/
void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
{
- struct list_head *pos, *n;
- struct intel_vgpu_mm *mm;
-
ppgtt_free_all_shadow_page(vgpu);
release_scratch_page_tree(vgpu);
- list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
- mm = container_of(pos, struct intel_vgpu_mm, list);
- vgpu->gvt->gtt.mm_free_page_table(mm);
- list_del(&mm->list);
- list_del(&mm->lru_list);
- kfree(mm);
- }
+ intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
+ intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
}
static void clean_spt_oos(struct intel_gvt *gvt)
@@ -2322,6 +2327,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
int i;
ppgtt_free_all_shadow_page(vgpu);
+
+ /* Shadow pages are only created when there is no page
+ * table tracking data, so remove page tracking data after
+ * removing the shadow pages.
+ */
+ intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
+
if (!dmlr)
return;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index ce9ac1569cbc..6dfc48b63b71 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -143,6 +143,8 @@ struct intel_vgpu {
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
+ bool pv_notified;
+ bool failsafe;
bool resetting;
void *sched_data;
@@ -202,18 +204,18 @@ struct intel_gvt_firmware {
};
struct intel_gvt_opregion {
- void __iomem *opregion_va;
+ void *opregion_va;
u32 opregion_pa;
};
#define NR_MAX_INTEL_VGPU_TYPES 20
struct intel_vgpu_type {
char name[16];
- unsigned int max_instance;
unsigned int avail_instance;
unsigned int low_gm_size;
unsigned int high_gm_size;
unsigned int fence;
+ enum intel_vgpu_edid resolution;
};
struct intel_gvt {
@@ -317,6 +319,7 @@ struct intel_vgpu_creation_params {
__u64 low_gm_sz; /* in MB */
__u64 high_gm_sz; /* in MB */
__u64 fence_sz;
+ __u64 resolution;
__s32 primary;
__u64 vgpu_id;
};
@@ -449,6 +452,11 @@ struct intel_gvt_ops {
};
+enum {
+ GVT_FAILSAFE_UNSUPPORTED_GUEST,
+ GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
+};
+
#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1d450627ff65..8e43395c748a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
info->size = size;
info->length = (i + 4) < end ? 4 : (end - i);
info->addr_mask = addr_mask;
+ info->ro_mask = ro_mask;
info->device = device;
info->read = read ? read : intel_vgpu_default_mmio_read;
info->write = write ? write : intel_vgpu_default_mmio_write;
@@ -150,15 +151,44 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
#define fence_num_to_offset(num) \
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
+
+static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
+{
+ switch (reason) {
+ case GVT_FAILSAFE_UNSUPPORTED_GUEST:
+ pr_err("Detected your guest driver doesn't support GVT-g.\n");
+ break;
+ case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
+ pr_err("Graphics resource is not enough for the guest\n");
+ default:
+ break;
+ }
+ pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
+ vgpu->failsafe = true;
+}
+
static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
unsigned int fence_num, void *p_data, unsigned int bytes)
{
if (fence_num >= vgpu_fence_sz(vgpu)) {
- gvt_err("vgpu%d: found oob fence register access\n",
- vgpu->id);
- gvt_err("vgpu%d: total fence num %d access fence num %d\n",
- vgpu->id, vgpu_fence_sz(vgpu), fence_num);
+
+ /* When guest access oob fence regs without access
+ * pv_info first, we treat guest not supporting GVT,
+ * and we will let vgpu enter failsafe mode.
+ */
+ if (!vgpu->pv_notified)
+ enter_failsafe_mode(vgpu,
+ GVT_FAILSAFE_UNSUPPORTED_GUEST);
+
+ if (!vgpu->mmio.disable_warn_untrack) {
+ gvt_err("vgpu%d: found oob fence register access\n",
+ vgpu->id);
+ gvt_err("vgpu%d: total fence %d, access fence %d\n",
+ vgpu->id, vgpu_fence_sz(vgpu),
+ fence_num);
+ }
memset(p_data, 0, bytes);
+ return -EINVAL;
}
return 0;
}
@@ -369,6 +399,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
+/* ascendingly sorted */
+static i915_reg_t force_nonpriv_white_list[] = {
+ GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
+ GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
+ GEN8_CS_CHICKEN1,//_MMIO(0x2580)
+ _MMIO(0x2690),
+ _MMIO(0x2694),
+ _MMIO(0x2698),
+ _MMIO(0x4de0),
+ _MMIO(0x4de4),
+ _MMIO(0x4dfc),
+ GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
+ _MMIO(0x7014),
+ HDC_CHICKEN0,//_MMIO(0x7300)
+ GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
+ _MMIO(0x7700),
+ _MMIO(0x7704),
+ _MMIO(0x7708),
+ _MMIO(0x770c),
+ _MMIO(0xb110),
+ GEN8_L3SQCREG4,//_MMIO(0xb118)
+ _MMIO(0xe100),
+ _MMIO(0xe18c),
+ _MMIO(0xe48c),
+ _MMIO(0xe5f4),
+};
+
+/* a simple bsearch */
+static inline bool in_whitelist(unsigned int reg)
+{
+ int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
+ i915_reg_t *array = force_nonpriv_white_list;
+
+ while (left < right) {
+ int mid = (left + right)/2;
+
+ if (reg > array[mid].reg)
+ left = mid + 1;
+ else if (reg < array[mid].reg)
+ right = mid;
+ else
+ return true;
+ }
+ return false;
+}
+
+static int force_nonpriv_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 reg_nonpriv = *(u32 *)p_data;
+ int ret = -EINVAL;
+
+ if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
+ vgpu->id, offset, bytes);
+ return ret;
+ }
+
+ if (in_whitelist(reg_nonpriv)) {
+ ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+ bytes);
+ } else {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
+ vgpu->id, reg_nonpriv);
+ }
+ return ret;
+}
+
static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -1001,6 +1099,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
if (invalid_read)
gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
offset, bytes, *(u32 *)p_data);
+ vgpu->pv_notified = true;
return 0;
}
@@ -1039,7 +1138,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
char vmid_str[20];
char display_ready_str[20];
- snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
+ snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
env[0] = display_ready_str;
snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
@@ -1078,6 +1177,9 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
case _vgtif_reg(execlist_context_descriptor_lo):
case _vgtif_reg(execlist_context_descriptor_hi):
break;
+ case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
+ break;
default:
gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data);
@@ -1203,26 +1305,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
switch (cmd) {
- case 0x6:
- /**
- * "Read memory latency" command on gen9.
- * Below memory latency values are read
- * from skylake platform.
- */
- if (!*data0)
- *data0 = 0x1e1a1100;
- else
- *data0 = 0x61514b3d;
+ case GEN9_PCODE_READ_MEM_LATENCY:
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from skylake platform.
+ */
+ if (!*data0)
+ *data0 = 0x1e1a1100;
+ else
+ *data0 = 0x61514b3d;
+ }
+ break;
+ case SKL_PCODE_CDCLK_CONTROL:
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv))
+ *data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
- case 0x5:
+ case GEN6_PCODE_READ_RC6VIDS:
*data0 |= 0x1;
break;
}
gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
vgpu->id, value, *data0);
-
- value &= ~(1 << 31);
+ /**
+ * PCODE_READY clear means ready for pcode read/write,
+ * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
+ * always emulate as pcode read/write success and ready for access
+ * anytime, since we don't touch real physical registers here.
+ */
+ value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
}
@@ -1318,6 +1431,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist;
write_vreg(vgpu, offset, p_data, bytes);
+
+ /* when PPGTT mode enabled, we will check if guest has called
+ * pvinfo, if not, we will treat this guest as non-gvtg-aware
+ * guest, and stop emulating its cfg space, mmio, gtt, etc.
+ */
+ if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
+ (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
+ && !vgpu->pv_notified) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
@@ -1400,6 +1524,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
#define MMIO_GM(reg, d, r, w) \
MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
+#define MMIO_GM_RDR(reg, d, r, w) \
+ MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
+
#define MMIO_RO(reg, d, f, rm, r, w) \
MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
@@ -1419,6 +1546,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
#define MMIO_RING_GM(prefix, d, r, w) \
MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
+#define MMIO_RING_GM_RDR(prefix, d, r, w) \
+ MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
+
#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
@@ -1427,73 +1557,81 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
- MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
+ intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(SDEISR, D_ALL);
- MMIO_RING_D(RING_HWSTAM, D_ALL);
+ MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
- MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
- MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
- MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
#define RING_REG(base) (base + 0x28)
- MMIO_RING_D(RING_REG, D_ALL);
+ MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x134)
- MMIO_RING_D(RING_REG, D_ALL);
+ MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
- MMIO_GM(0x2148, D_ALL, NULL, NULL);
- MMIO_GM(CCID, D_ALL, NULL, NULL);
- MMIO_GM(0x12198, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
- MMIO_RING_D(RING_TAIL, D_ALL);
- MMIO_RING_D(RING_HEAD, D_ALL);
- MMIO_RING_D(RING_CTL, D_ALL);
- MMIO_RING_D(RING_ACTHD, D_ALL);
- MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
+ MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
#define RING_REG(base) (base + 0x29c)
- MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
+ MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ ring_mode_mmio_write);
#undef RING_REG
- MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
- MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_D(GAM_ECOCHK, D_ALL);
- MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0x9030, D_ALL);
- MMIO_D(0x20a0, D_ALL);
- MMIO_D(0x2420, D_ALL);
- MMIO_D(0x2430, D_ALL);
- MMIO_D(0x2434, D_ALL);
- MMIO_D(0x2438, D_ALL);
- MMIO_D(0x243c, D_ALL);
- MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
@@ -2022,8 +2160,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(FORCEWAKE_ACK, D_ALL);
MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
- MMIO_D(GTFIFODBG, D_ALL);
- MMIO_D(GTFIFOCTL, D_ALL);
+ MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
MMIO_D(ECOBUS, D_ALL);
@@ -2080,7 +2218,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
+ MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
MMIO_D(GEN6_PCODE_DATA, D_ALL);
MMIO_D(0x13812c, D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@@ -2159,36 +2297,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x1a054, D_ALL);
MMIO_D(0x44070, D_ALL);
-
- MMIO_D(0x215c, D_HSW_PLUS);
+ MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
- MMIO_D(GEN7_OACONTROL, D_HSW);
+ MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
+ MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x2b00, D_BDW_PLUS);
MMIO_D(0x2360, D_BDW_PLUS);
- MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(BCS_SWCTRL, D_ALL);
-
- MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
@@ -2196,6 +2333,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
+ MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2204,7 +2352,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
- MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
+ MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2269,24 +2417,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
- MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(0x1c134, D_BDW_PLUS);
-
- MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
- MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
- MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
- NULL, NULL);
- MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
- NULL, NULL);
+ MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
+ MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ ring_mode_mmio_write);
+ MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
- MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+ MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
#define RING_REG(base) (base + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
@@ -2303,13 +2458,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
#undef RING_REG
#define RING_REG(base) (base + 0x234)
- MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
+ NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
+ ~0LL, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x244)
- MMIO_RING_D(RING_REG, D_BDW_PLUS);
- MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x370)
@@ -2331,6 +2489,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
MMIO_D(0x1c054, D_BDW_PLUS);
+ MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
+
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
@@ -2341,14 +2501,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
- MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
- MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+ MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
@@ -2362,27 +2522,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
- MMIO_D(0xfdc, D_BDW);
- MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
- MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
+ MMIO_D(0xfdc, D_BDW_PLUS);
+ MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0xb1f0, D_BDW);
- MMIO_D(0xb1c0, D_BDW);
+ MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0xb100, D_BDW);
- MMIO_D(0xb10c, D_BDW);
+ MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0xb110, D_BDW);
- MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
+ NULL, force_nonpriv_write);
+
+ MMIO_D(0x22040, D_BDW_PLUS);
+ MMIO_D(0x44484, D_BDW_PLUS);
+ MMIO_D(0x4448c, D_BDW_PLUS);
- MMIO_D(0x83a4, D_BDW);
+ MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
- MMIO_D(0x8430, D_BDW);
+ MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x110000, D_BDW_PLUS);
@@ -2394,10 +2558,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
-
- MMIO_D(0x2248, D_BDW);
-
+ MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2420,7 +2593,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
- MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2578,16 +2750,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
MMIO_D(0xd08, D_SKL);
- MMIO_D(0x20e0, D_SKL);
- MMIO_D(0x20ec, D_SKL);
+ MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* TRTT */
- MMIO_D(0x4de0, D_SKL);
- MMIO_D(0x4de4, D_SKL);
- MMIO_D(0x4de8, D_SKL);
- MMIO_D(0x4dec, D_SKL);
- MMIO_D(0x4df0, D_SKL);
- MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
+ MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
MMIO_D(0x45008, D_SKL);
@@ -2611,7 +2783,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x65f08, D_SKL);
MMIO_D(0x320f0, D_SKL);
- MMIO_D(_REG_VCS2_EXCC, D_SKL);
+ MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x70034, D_SKL);
MMIO_D(0x71034, D_SKL);
MMIO_D(0x72034, D_SKL);
@@ -2624,6 +2796,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
MMIO_D(0x44500, D_SKL);
+ MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 0f7f5d97f582..84d801638ede 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr;
- page = pfn_to_page(pfn);
- if (is_error_page(page))
+ if (unlikely(!pfn_valid(pfn)))
return -EFAULT;
+ page = pfn_to_page(pfn);
daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr))
@@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence);
+ "fence: %d\nresolution: %s\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence, vgpu_edid_str(type->resolution));
}
static MDEV_TYPE_ATTR_RO(available_instances);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 4df078bc5d04..60b698cb8365 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
+static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes, bool read)
+{
+ struct intel_gvt *gvt = NULL;
+ void *pt = NULL;
+ unsigned int offset = 0;
+
+ if (!vgpu || !p_data)
+ return;
+
+ gvt = vgpu->gvt;
+ mutex_lock(&gvt->lock);
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+ if (reg_is_mmio(gvt, offset)) {
+ if (read)
+ intel_vgpu_default_mmio_read(vgpu, offset, p_data,
+ bytes);
+ else
+ intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+ bytes);
+ } else if (reg_is_gtt(gvt, offset) &&
+ vgpu->gtt.ggtt_mm->virtual_page_table) {
+ offset -= gvt->device_info.gtt_start_offset;
+ pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
+ if (read)
+ memcpy(p_data, pt, bytes);
+ else
+ memcpy(pt, p_data, bytes);
+
+ } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ /* Since we enter the failsafe mode early during guest boot,
+ * guest may not have chance to set up its ppgtt table, so
+ * there should not be any wp pages for guest. Keep the wp
+ * related code here in case we need to handle it in furture.
+ */
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ /* remove write protection to prevent furture traps */
+ intel_vgpu_clean_guest_page(vgpu, gp);
+ if (read)
+ intel_gvt_hypervisor_read_gpa(vgpu, pa,
+ p_data, bytes);
+ else
+ intel_gvt_hypervisor_write_gpa(vgpu, pa,
+ p_data, bytes);
+ }
+ }
+ mutex_unlock(&gvt->lock);
+}
+
/**
* intel_vgpu_emulate_mmio_read - emulate MMIO read
* @vgpu: a vGPU
@@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
unsigned int offset = 0;
int ret = -EINVAL;
+
+ if (vgpu->failsafe) {
+ failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
+ return 0;
+ }
mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -188,6 +245,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
u32 old_vreg = 0, old_sreg = 0;
int ret = -EINVAL;
+ if (vgpu->failsafe) {
+ failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
+ return 0;
+ }
+
mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (!mmio && !vgpu->mmio.disable_warn_untrack)
- gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
+ gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data);
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
@@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+
+ vgpu->mmio.disable_warn_untrack = false;
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d9fb41ab7119..5d1caf9daba9 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -27,7 +27,6 @@
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
- void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
u8 *buf;
int i;
@@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
if (!vgpu_opregion(vgpu)->va)
return -ENOMEM;
- memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
- INTEL_GVT_OPREGION_SIZE);
+ memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
+ INTEL_GVT_OPREGION_SIZE);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 2b3a642284b6..73f052a4f424 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = {
{RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x24e0), 0, false},
+ {RCS, _MMIO(0x24e4), 0, false},
+ {RCS, _MMIO(0x24e8), 0, false},
+ {RCS, _MMIO(0x24ec), 0, false},
+ {RCS, _MMIO(0x24f0), 0, false},
+ {RCS, _MMIO(0x24f4), 0, false},
+ {RCS, _MMIO(0x24f8), 0, false},
+ {RCS, _MMIO(0x24fc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true},
@@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = {
{RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x24e0), 0, false},
+ {RCS, _MMIO(0x24e4), 0, false},
+ {RCS, _MMIO(0x24e8), 0, false},
+ {RCS, _MMIO(0x24ec), 0, false},
+ {RCS, _MMIO(0x24f0), 0, false},
+ {RCS, _MMIO(0x24f4), 0, false},
+ {RCS, _MMIO(0x24f8), 0, false},
+ {RCS, _MMIO(0x24fc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true},
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 31d2240fdb1f..811a84bdbafb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -137,6 +137,9 @@ static int shadow_context_status_change(struct notifier_block *nb,
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
+ if (unlikely(!workload))
+ return NOTIFY_OK;
+
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
intel_gvt_load_render_mmio(workload->vgpu,
@@ -146,6 +149,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
case INTEL_CONTEXT_SCHEDULE_OUT:
intel_gvt_restore_render_mmio(workload->vgpu,
workload->ring_id);
+ /* If the status is -EINPROGRESS means this workload
+ * doesn't meet any issue during dispatching so when
+ * get the SCHEDULE_OUT set the status to be zero for
+ * good. If the status is NOT -EINPROGRESS means there
+ * is something wrong happened during dispatching and
+ * the status should not be set to zero
+ */
+ if (workload->status == -EINPROGRESS)
+ workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
@@ -357,15 +369,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload = scheduler->current_workload[ring_id];
vgpu = workload->vgpu;
- if (!workload->status && !vgpu->resetting) {
+ /* For the workload w/ request, needs to wait for the context
+ * switch to make sure request is completed.
+ * For the workload w/o request, directly complete the workload.
+ */
+ if (workload->req) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
- update_guest_context(workload);
+ i915_gem_request_put(fetch_and_zero(&workload->req));
+
+ if (!workload->status && !vgpu->resetting) {
+ update_guest_context(workload);
- for_each_set_bit(event, workload->pending_events,
- INTEL_GVT_EVENT_MAX)
- intel_vgpu_trigger_virtual_event(vgpu, event);
+ for_each_set_bit(event, workload->pending_events,
+ INTEL_GVT_EVENT_MAX)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ }
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -395,7 +415,6 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
- long lret;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -444,23 +463,24 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
-
- lret = i915_wait_request(workload->req,
+retry:
+ i915_wait_request(workload->req,
0, MAX_SCHEDULE_TIMEOUT);
- if (lret < 0) {
- workload->status = lret;
- gvt_err("fail to wait workload, skip\n");
- } else {
- workload->status = 0;
+ /* I915 has replay mechanism and a request will be replayed
+ * if there is i915 reset. So the seqno will be updated anyway.
+ * If the seqno is not updated yet after waiting, which means
+ * the replay may still be in progress and we can wait again.
+ */
+ if (!i915_gem_request_completed(workload->req)) {
+ gvt_dbg_sched("workload %p not completed, wait again\n",
+ workload);
+ goto retry;
}
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
workload, workload->status);
- if (workload->req)
- i915_gem_request_put(fetch_and_zero(&workload->req));
-
complete_current_workload(gvt, ring_id);
if (need_force_wake)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 95a97aa0051e..41cfa5ccae84 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
+static struct {
+ unsigned int low_mm;
+ unsigned int high_mm;
+ unsigned int fence;
+ enum intel_vgpu_edid edid;
+ char *name;
+} vgpu_types[] = {
+/* Fixed vGPU type table */
+ { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
+ { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
+ { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
+ { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
+};
+
/**
* intel_gvt_init_vgpu_types - initialize vGPU type list
* @gvt : GVT device
@@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
unsigned int min_low;
/* vGPU type name is defined as GVTg_Vx_y which contains
- * physical GPU generation type and 'y' means maximum vGPU
- * instances user can create on one physical GPU for this
- * type.
+ * physical GPU generation type (e.g V4 as BDW server, V5 as
+ * SKL server).
*
* Depend on physical SKU resource, might see vGPU types like
* GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
@@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
*/
low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
- num_types = 4;
+ num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
GFP_KERNEL);
@@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
min_low = MB_TO_BYTES(32);
for (i = 0; i < num_types; ++i) {
- if (low_avail / min_low == 0)
+ if (low_avail / vgpu_types[i].low_mm == 0)
break;
- gvt->types[i].low_gm_size = min_low;
- gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
- gvt->types[i].fence = 4;
- gvt->types[i].max_instance = min(low_avail / min_low,
- high_avail / gvt->types[i].high_gm_size);
- gvt->types[i].avail_instance = gvt->types[i].max_instance;
+
+ gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
+ gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
+ gvt->types[i].fence = vgpu_types[i].fence;
+ gvt->types[i].resolution = vgpu_types[i].edid;
+ gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
+ high_avail / vgpu_types[i].high_mm);
if (IS_GEN8(gvt->dev_priv))
- sprintf(gvt->types[i].name, "GVTg_V4_%u",
- gvt->types[i].max_instance);
+ sprintf(gvt->types[i].name, "GVTg_V4_%s",
+ vgpu_types[i].name);
else if (IS_GEN9(gvt->dev_priv))
- sprintf(gvt->types[i].name, "GVTg_V5_%u",
- gvt->types[i].max_instance);
+ sprintf(gvt->types[i].name, "GVTg_V5_%s",
+ vgpu_types[i].name);
- min_low <<= 1;
- gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
- i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
+ i, gvt->types[i].name,
gvt->types[i].avail_instance,
gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence);
+ gvt->types[i].high_gm_size, gvt->types[i].fence,
+ vgpu_edid_str(gvt->types[i].resolution));
}
gvt->num_types = i;
@@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
{
int i;
unsigned int low_gm_avail, high_gm_avail, fence_avail;
- unsigned int low_gm_min, high_gm_min, fence_min, total_min;
+ unsigned int low_gm_min, high_gm_min, fence_min;
/* Need to depend on maxium hw resource size but keep on
* static config for now.
@@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
fence_min = fence_avail / gvt->types[i].fence;
- total_min = min(min(low_gm_min, high_gm_min), fence_min);
- gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
- total_min);
+ gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
+ fence_min);
- gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
- i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name,
gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
gvt->types[i].high_gm_size, gvt->types[i].fence);
}
@@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_detach_hypervisor_vgpu;
- ret = intel_vgpu_init_display(vgpu);
+ ret = intel_vgpu_init_display(vgpu, param->resolution);
if (ret)
goto out_clean_gtt;
@@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
param.fence_sz = type->fence;
+ param.resolution = type->resolution;
/* XXX current param based on MB */
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
@@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
populate_pvinfo_page(vgpu);
intel_vgpu_reset_display(vgpu);
- if (dmlr)
+ if (dmlr) {
intel_vgpu_reset_cfg_space(vgpu);
+ /* only reset the failsafe mode when dmlr reset */
+ vgpu->failsafe = false;
+ vgpu->pv_notified = false;
+ }
}
vgpu->resetting = false;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index e10a4eda4078..1144e0c9e894 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -65,13 +65,11 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
switch (format) {
case DRM_FORMAT_RGB565:
dev_dbg(drm->dev, "Setting up RGB565 mode\n");
- ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
ctrl |= CTRL_SET_WORD_LENGTH(0);
ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf);
break;
case DRM_FORMAT_XRGB8888:
dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
- ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
ctrl |= CTRL_SET_WORD_LENGTH(3);
/* Do not use packed pixels = one pixel per word instead. */
ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7);
@@ -87,6 +85,36 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
return 0;
}
+static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
+{
+ struct drm_crtc *crtc = &mxsfb->pipe.crtc;
+ struct drm_device *drm = crtc->dev;
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ u32 reg;
+
+ reg = readl(mxsfb->base + LCDC_CTRL);
+
+ if (mxsfb->connector.display_info.num_bus_formats)
+ bus_format = mxsfb->connector.display_info.bus_formats[0];
+
+ reg &= ~CTRL_BUS_WIDTH_MASK;
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_18BIT);
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
+ break;
+ default:
+ dev_err(drm->dev, "Unknown media bus format %d\n", bus_format);
+ break;
+ }
+ writel(reg, mxsfb->base + LCDC_CTRL);
+}
+
static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
{
u32 reg;
@@ -168,13 +196,22 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
if (m->flags & DRM_MODE_FLAG_PVSYNC)
vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
- if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ /* Make sure Data Enable is high active by default */
+ if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
- if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ /*
+ * DRM_BUS_FLAG_PIXDATA_ defines are controller centric,
+ * controllers VDCTRL0_DOTCLK is display centric.
+ * Drive on positive edge -> display samples on falling edge
+ * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
+ */
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
+ mxsfb_set_bus_fmt(mxsfb);
+
/* Frame length in lines. */
writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1);
@@ -184,8 +221,8 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal),
mxsfb->base + LCDC_VDCTRL2);
- writel(SET_HOR_WAIT_CNT(m->crtc_hblank_end - m->crtc_hsync_end) |
- SET_VERT_WAIT_CNT(m->crtc_vblank_end - m->crtc_vsync_end),
+ writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) |
+ SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start),
mxsfb->base + LCDC_VDCTRL3);
writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay),
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 5ac712325c75..d1b9c34c7c00 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -102,14 +102,18 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
{
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+ drm_panel_prepare(mxsfb->panel);
mxsfb_crtc_enable(mxsfb);
+ drm_panel_enable(mxsfb->panel);
}
static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
{
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+ drm_panel_disable(mxsfb->panel);
mxsfb_crtc_disable(mxsfb);
+ drm_panel_unprepare(mxsfb->panel);
}
static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index fa8d17399407..b8e81422d4e2 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -112,6 +112,7 @@ static int mxsfb_attach_endpoint(struct drm_device *drm,
int mxsfb_create_output(struct drm_device *drm)
{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
struct device_node *ep_np = NULL;
struct of_endpoint ep;
int ret;
@@ -127,5 +128,8 @@ int mxsfb_create_output(struct drm_device *drm)
}
}
+ if (!mxsfb->panel)
+ return -EPROBE_DEFER;
+
return 0;
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
index 31d62cd0d3d7..66a6ba9ec533 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_regs.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
@@ -44,6 +44,7 @@
#define CTRL_DATA_SELECT (1 << 16)
#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
+#define CTRL_BUS_WIDTH_MASK (0x3 << 10)
#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
#define CTRL_MASTER (1 << 5)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index af267c35d813..ee5883f59be5 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct drm_gem_object *obj = buffer->priv;
int ret = 0;
- if (WARN_ON(!obj->filp))
- return -EINVAL;
-
ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index d12b8978142f..72e1588580a1 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2984,6 +2984,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6667)) {
max_sclk = 75000;
}
+ } else if (rdev->family == CHIP_OLAND) {
+ if ((rdev->pdev->device == 0x6604) &&
+ (rdev->pdev->subsystem_vendor == 0x1028) &&
+ (rdev->pdev->subsystem_device == 0x066F)) {
+ max_sclk = 75000;
+ }
}
if (rps->vce_active) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index b5bfbe50bd87..b0ff304ce3dc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -32,6 +32,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
{
const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
struct rcar_du_device *rcdu = crtc->group->dev;
+ struct vsp1_du_lif_config cfg = {
+ .width = mode->hdisplay,
+ .height = mode->vdisplay,
+ };
struct rcar_du_plane_state state = {
.state = {
.crtc = &crtc->crtc,
@@ -66,12 +70,12 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
*/
crtc->group->need_restart = true;
- vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay);
+ vsp1_du_setup_lif(crtc->vsp->vsp, &cfg);
}
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc)
{
- vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0);
+ vsp1_du_setup_lif(crtc->vsp->vsp, NULL);
}
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 93505bcfdf4b..c92faa8f7560 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ unsigned long flags;
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
mutex_lock(&tilcdc_crtc->enable_lock);
@@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
LCDC_PALETTE_LOAD_MODE_MASK);
+
+ /* There is no real chance for a race here as the time stamp
+ * is taken before the raster DMA is started. The spin-lock is
+ * taken to have a memory barrier after taking the time-stamp
+ * and to avoid a context switch between taking the stamp and
+ * enabling the raster.
+ */
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+ tilcdc_crtc->last_vblank = ktime_get();
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
drm_crtc_vblank_on(crtc);
@@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
}
drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- tilcdc_crtc->last_vblank = 0;
tilcdc_crtc->enabled = false;
mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
- unsigned long flags;
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
@@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
drm_framebuffer_reference(fb);
crtc->primary->fb = fb;
+ tilcdc_crtc->event = event;
- spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+ mutex_lock(&tilcdc_crtc->enable_lock);
- if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+ if (tilcdc_crtc->enabled) {
+ unsigned long flags;
ktime_t next_vblank;
s64 tdiff;
- next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
- 1000000 / crtc->hwmode.vrefresh);
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+ next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+ 1000000 / crtc->hwmode.vrefresh);
tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
tilcdc_crtc->next_fb = fb;
- }
-
- if (tilcdc_crtc->next_fb != fb)
- set_scanout(crtc, fb);
+ else
+ set_scanout(crtc, fb);
- tilcdc_crtc->event = event;
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+ }
- spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+ mutex_unlock(&tilcdc_crtc->enable_lock);
return 0;
}
@@ -1047,5 +1058,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
fail:
tilcdc_crtc_destroy(crtc);
- return -ENOMEM;
+ return ret;
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 81a80c82f1bd..bd0d1988feb2 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -543,7 +543,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
/*
* In case a device driver's probe() fails (e.g.,
* util_probe() -> vmbus_open() returns -ENOMEM) and the device is
- * rescinded later (e.g., we dynamically disble an Integrated Service
+ * rescinded later (e.g., we dynamically disable an Integrated Service
* in Hyper-V Manager), the driver's remove() invokes vmbus_close():
* here we should skip most of the below cleanup work.
*/
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 0652281662a8..78792b4d6437 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
u8 *tmp_buf;
int len = 0;
int xfersz = brcmstb_i2c_get_xfersz(dev);
+ u32 cond, cond_per_msg;
if (dev->is_suspended)
return -EBUSY;
@@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
pmsg->buf ? pmsg->buf[0] : '0', pmsg->len);
if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART))
- brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP));
+ cond = ~COND_START_STOP;
else
- brcmstb_set_i2c_start_stop(dev,
- COND_RESTART | COND_NOSTOP);
+ cond = COND_RESTART | COND_NOSTOP;
+
+ brcmstb_set_i2c_start_stop(dev, cond);
/* Send slave address */
if (!(pmsg->flags & I2C_M_NOSTART)) {
@@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
}
}
+ cond_per_msg = cond;
+
/* Perform data transfer */
while (len) {
bytes_to_xfer = min(len, xfersz);
- if (len <= xfersz && i == (num - 1))
- brcmstb_set_i2c_start_stop(dev,
- ~(COND_START_STOP));
+ if (len <= xfersz) {
+ if (i == (num - 1))
+ cond_per_msg = cond_per_msg &
+ ~(COND_RESTART | COND_NOSTOP);
+ else
+ cond_per_msg = cond;
+ } else {
+ cond_per_msg = (cond_per_msg & ~COND_RESTART) |
+ COND_NOSTOP;
+ }
+
+ brcmstb_set_i2c_start_stop(dev, cond_per_msg);
rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf,
bytes_to_xfer, pmsg);
@@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
len -= bytes_to_xfer;
tmp_buf += bytes_to_xfer;
+
+ cond_per_msg = COND_NOSTART | COND_NOSTOP;
}
}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 091c24925c9c..846ea57f85af 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -90,6 +90,7 @@ struct dw_i2c_dev {
void __iomem *base;
struct completion cmd_complete;
struct clk *clk;
+ struct reset_control *rst;
u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
struct dw_pci_controller *controller;
int cmd_err;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index df0ff7d82b49..d8665098dce9 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -38,6 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/io.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/platform_data/i2c-designware.h>
@@ -199,6 +200,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->irq = irq;
platform_set_drvdata(pdev, dev);
+ dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(dev->rst)) {
+ if (PTR_ERR(dev->rst) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ } else {
+ reset_control_deassert(dev->rst);
+ }
+
if (pdata) {
dev->clk_freq = pdata->i2c_scl_freq;
} else {
@@ -235,12 +244,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
&& dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
dev_err(&pdev->dev,
"Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
- return -EINVAL;
+ r = -EINVAL;
+ goto exit_reset;
}
r = i2c_dw_probe_lock_support(dev);
if (r)
- return r;
+ goto exit_reset;
dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
@@ -286,10 +296,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
}
r = i2c_dw_probe(dev);
- if (r && !dev->pm_runtime_disabled)
- pm_runtime_disable(&pdev->dev);
+ if (r)
+ goto exit_probe;
return r;
+
+exit_probe:
+ if (!dev->pm_runtime_disabled)
+ pm_runtime_disable(&pdev->dev);
+exit_reset:
+ if (!IS_ERR_OR_NULL(dev->rst))
+ reset_control_assert(dev->rst);
+ return r;
}
static int dw_i2c_plat_remove(struct platform_device *pdev)
@@ -306,6 +324,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
if (!dev->pm_runtime_disabled)
pm_runtime_disable(&pdev->dev);
+ if (!IS_ERR_OR_NULL(dev->rst))
+ reset_control_assert(dev->rst);
i2c_dw_remove_lock_support(dev);
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index cbd93ce0661f..736a82472101 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -457,7 +457,6 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
int_status = readl(i2c->regs + HSI2C_INT_STATUS);
writel(int_status, i2c->regs + HSI2C_INT_STATUS);
- trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
/* handle interrupt related to the transfer status */
if (i2c->variant->hw == HSI2C_EXYNOS7) {
@@ -482,11 +481,13 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
goto stop;
}
+ trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) {
i2c->state = -EAGAIN;
goto stop;
}
} else if (int_status & HSI2C_INT_I2C) {
+ trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
if (trans_status & HSI2C_NO_DEV_ACK) {
dev_dbg(i2c->dev, "No ACK from device\n");
i2c->state = -ENXIO;
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 2aa61bbbd307..73b97c71a484 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len)
wdata1 |= *buf++ << ((i - 4) * 8);
writel(wdata0, i2c->regs + REG_TOK_WDATA0);
- writel(wdata0, i2c->regs + REG_TOK_WDATA1);
+ writel(wdata1, i2c->regs + REG_TOK_WDATA1);
dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
wdata0, wdata1, len);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 4a7d9bc2142b..45d61714c81b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -172,14 +172,6 @@ static const struct i2c_adapter_quirks mt6577_i2c_quirks = {
.max_comb_2nd_msg_len = 31,
};
-static const struct i2c_adapter_quirks mt8173_i2c_quirks = {
- .max_num_msgs = 65535,
- .max_write_len = 65535,
- .max_read_len = 65535,
- .max_comb_1st_msg_len = 65535,
- .max_comb_2nd_msg_len = 65535,
-};
-
static const struct mtk_i2c_compatible mt6577_compat = {
.quirks = &mt6577_i2c_quirks,
.pmic_i2c = 0,
@@ -199,7 +191,6 @@ static const struct mtk_i2c_compatible mt6589_compat = {
};
static const struct mtk_i2c_compatible mt8173_compat = {
- .quirks = &mt8173_i2c_quirks,
.pmic_i2c = 0,
.dcm = 1,
.auto_restart = 1,
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 8f11d347b3ec..c811af4c8d81 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
}
if (riic->is_last || riic->err) {
- riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
+ riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+ } else {
+ /* Transfer is complete, but do not send STOP */
+ riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
+ complete(&riic->msg_done);
}
return IRQ_HANDLED;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 83768e85a919..2178266bca79 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -429,6 +429,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
while (muxc->num_adapters) {
struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters];
struct i2c_mux_priv *priv = adap->algo_data;
+ struct device_node *np = adap->dev.of_node;
muxc->adapter[muxc->num_adapters] = NULL;
@@ -438,6 +439,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
i2c_del_adapter(adap);
+ of_node_put(np);
kfree(priv);
}
}
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 1eef56a89b1f..f96601268f71 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
static int __init crossbar_of_init(struct device_node *node)
{
- int i, size, max = 0, reserved = 0, entry;
+ u32 max = 0, entry, reg_size;
+ int i, size, reserved = 0;
const __be32 *irqsr;
int ret = -ENOMEM;
@@ -275,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node)
if (!cb->register_offsets)
goto err_irq_map;
- of_property_read_u32(node, "ti,reg-size", &size);
+ of_property_read_u32(node, "ti,reg-size", &reg_size);
- switch (size) {
+ switch (reg_size) {
case 1:
cb->write = crossbar_writeb;
break;
@@ -303,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node)
continue;
cb->register_offsets[i] = reserved;
- reserved += size;
+ reserved += reg_size;
}
of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 23201004fd7a..f77f840d2b5f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1601,6 +1601,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
}
+static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
+{
+ struct its_node *its = data;
+
+ /* On QDF2400, the size of the ITE is 16Bytes */
+ its->ite_size = 16;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -1618,6 +1626,14 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_cavium_23144,
},
#endif
+#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
+ {
+ .desc = "ITS: QDF2400 erratum 0065",
+ .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_qdf2400_e0065,
+ },
+#endif
{
}
};
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 11e13c56126f..2da3ff650e1d 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
return -ENODEV;
}
+ if (hostif->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
dev_info(&udev->dev,
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
__func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
index 409849165838..f64a36007800 100644
--- a/drivers/isdn/hisax/st5481_b.c
+++ b/drivers/isdn/hisax/st5481_b.c
@@ -239,7 +239,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode)
}
}
} else {
- // Disble B channel interrupts
+ // Disable B channel interrupts
st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL);
// Disable B channel FIFOs
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 3f041b187033..f757cef293f8 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -392,6 +392,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
* To get all the fields, copy all archdata
*/
dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
+ dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
#endif /* CONFIG_PCI */
#ifdef DEBUG
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index a126919ed102..5d13930f0f22 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -4,7 +4,6 @@
#include <linux/blkdev.h>
#include <linux/errno.h>
-#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/sched/clock.h>
#include <linux/llist.h>
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f4ffd1eb8f44..dfb75979e455 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
struct dm_offload *o = container_of(cb, struct dm_offload, cb);
struct bio_list list;
struct bio *bio;
+ int i;
INIT_LIST_HEAD(&o->cb.list);
if (unlikely(!current->bio_list))
return;
- list = *current->bio_list;
- bio_list_init(current->bio_list);
-
- while ((bio = bio_list_pop(&list))) {
- struct bio_set *bs = bio->bi_pool;
- if (unlikely(!bs) || bs == fs_bio_set) {
- bio_list_add(current->bio_list, bio);
- continue;
+ for (i = 0; i < 2; i++) {
+ list = current->bio_list[i];
+ bio_list_init(&current->bio_list[i]);
+
+ while ((bio = bio_list_pop(&list))) {
+ struct bio_set *bs = bio->bi_pool;
+ if (unlikely(!bs) || bs == fs_bio_set) {
+ bio_list_add(&current->bio_list[i], bio);
+ continue;
+ }
+
+ spin_lock(&bs->rescue_lock);
+ bio_list_add(&bs->rescue_list, bio);
+ queue_work(bs->rescue_workqueue, &bs->rescue_work);
+ spin_unlock(&bs->rescue_lock);
}
-
- spin_lock(&bs->rescue_lock);
- bio_list_add(&bs->rescue_list, bio);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
- spin_unlock(&bs->rescue_lock);
}
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 2b13117fb918..321ecac23027 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
bm_lockres->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
if (ret == -EAGAIN) {
- memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
s = read_resync_info(mddev, bm_lockres);
if (s) {
pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
@@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->bitmap_lockres);
unlock_all_bitmaps(mddev);
dlm_release_lockspace(cinfo->lockspace, 2);
+ kfree(cinfo);
return 0;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 548d1b8014f8..f6ae1d67bcd0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
}
EXPORT_SYMBOL(md_flush_request);
-void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
-{
- struct mddev *mddev = cb->data;
- md_wakeup_thread(mddev->thread);
- kfree(cb);
-}
-EXPORT_SYMBOL(md_unplug);
-
static inline struct mddev *mddev_get(struct mddev *mddev)
{
atomic_inc(&mddev->active);
@@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
- sb->super_offset = rdev->sb_start;
+ sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
do {
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
/* Check if any mddev parameters have changed */
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
- (mddev->layout != le64_to_cpu(sb->layout)) ||
+ (mddev->layout != le32_to_cpu(sb->layout)) ||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
return true;
@@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->layout = info->layout;
mddev->chunk_sectors = info->chunk_size >> 9;
- mddev->max_disks = MD_SB_DISKS;
-
if (mddev->persistent) {
- mddev->flags = 0;
- mddev->sb_flags = 0;
+ mddev->max_disks = MD_SB_DISKS;
+ mddev->flags = 0;
+ mddev->sb_flags = 0;
}
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
@@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
return -ENOSPC;
}
rv = mddev->pers->resize(mddev, num_sectors);
- if (!rv)
- revalidate_disk(mddev->gendisk);
+ if (!rv) {
+ if (mddev->queue) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
+ }
return rv;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b8859cbf84b6..dde8ecb760c8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
-extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
-static inline int mddev_check_plugged(struct mddev *mddev)
-{
- return !!blk_check_plugged(md_unplug, mddev,
- sizeof(struct blk_plug_cb));
-}
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fbc2d7851b49..a34f58772022 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf)
static void freeze_array(struct r1conf *conf, int extra)
{
/* Stop sync I/O and normal I/O and wait for everything to
- * go quite.
+ * go quiet.
* This is called in two situations:
* 1) management command handlers (reshape, remove disk, quiesce).
* 2) one normal I/O request failed.
@@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}
- if (bio_data_dir(split) == READ)
+ if (bio_data_dir(split) == READ) {
raid1_read_request(mddev, split);
- else
+
+ /*
+ * If a bio is splitted, the first part of bio will
+ * pass barrier but the bio is queued in
+ * current->bio_list (see generic_make_request). If
+ * there is a raise_barrier() called here, the second
+ * part of bio can't pass barrier. But since the first
+ * part bio isn't dispatched to underlaying disks yet,
+ * the barrier is never released, hence raise_barrier
+ * will alays wait. We have a deadlock.
+ * Note, this only happens in read path. For write
+ * path, the first part of bio is dispatched in a
+ * schedule() call (because of blk plug) or offloaded
+ * to raid10d.
+ * Quitting from the function immediately can change
+ * the bio order queued in bio_list and avoid the deadlock.
+ */
+ if (split != bio) {
+ generic_make_request(bio);
+ break;
+ }
+ } else
raid1_write_request(mddev, split);
} while (split != bio);
}
@@ -3246,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
return ret;
}
md_set_array_sectors(mddev, newsize);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > mddev->dev_sectors) {
mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 063c43d83b72..e89a8d78a9ed 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -974,7 +974,8 @@ static void wait_barrier(struct r10conf *conf)
!conf->barrier ||
(atomic_read(&conf->nr_pending) &&
current->bio_list &&
- !bio_list_empty(current->bio_list)),
+ (!bio_list_empty(&current->bio_list[0]) ||
+ !bio_list_empty(&current->bio_list[1]))),
conf->resync_lock);
conf->nr_waiting--;
if (!conf->nr_waiting)
@@ -1477,11 +1478,24 @@ retry_write:
mbio->bi_bdev = (void*)rdev;
atomic_inc(&r10_bio->remaining);
+
+ cb = blk_check_plugged(raid10_unplug, mddev,
+ sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid10_plug_cb,
+ cb);
+ else
+ plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
+ if (!plug)
md_wakeup_thread(mddev->thread);
}
}
@@ -1571,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}
+ /*
+ * If a bio is splitted, the first part of bio will pass
+ * barrier but the bio is queued in current->bio_list (see
+ * generic_make_request). If there is a raise_barrier() called
+ * here, the second part of bio can't pass barrier. But since
+ * the first part bio isn't dispatched to underlaying disks
+ * yet, the barrier is never released, hence raise_barrier will
+ * alays wait. We have a deadlock.
+ * Note, this only happens in read path. For write path, the
+ * first part of bio is dispatched in a schedule() call
+ * (because of blk plug) or offloaded to raid10d.
+ * Quitting from the function immediately can change the bio
+ * order queued in bio_list and avoid the deadlock.
+ */
__make_request(mddev, split);
+ if (split != bio && bio_data_dir(bio) == READ) {
+ generic_make_request(bio);
+ break;
+ }
} while (split != bio);
/* In case raid10d snuck in to freeze_array */
@@ -3943,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
return ret;
}
md_set_array_sectors(mddev, size);
- if (mddev->queue) {
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
- }
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > oldsize) {
mddev->recovery_cp = oldsize;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4fb09b3fcb41..ed5cd705b985 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs,
(test_bit(R5_Wantdrain, &dev->flags) ||
test_bit(R5_InJournal, &dev->flags))) ||
(srctype == SYNDROME_SRC_WRITTEN &&
- dev->written)) {
+ (dev->written ||
+ test_bit(R5_InJournal, &dev->flags)))) {
if (test_bit(R5_InJournal, &dev->flags))
srcs[slot] = sh->dev[i].orig_page;
else
@@ -7605,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
return ret;
}
md_set_array_sectors(mddev, newsize);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > mddev->dev_sectors) {
mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
index 7a681d8202c7..4442e478db72 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -256,8 +256,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
*
* The actual DAP implementation may be restricted to only one of the modes.
* A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the mode defined below.
-*
+* overrides or cannot handle the mode defined below.
*/
#ifndef DRXDAP_SINGLE_MASTER
#define DRXDAP_SINGLE_MASTER 1
@@ -272,7 +271,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
*
* This maximum size may be restricted by the actual DAP implementation.
* A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the chunksize defined below.
+* overrides or cannot handle the chunksize defined below.
*
* Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data
* buffer. Do not undefine or choose too large, unless your system is able to
@@ -292,8 +291,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
*
* This maximum size may be restricted by the actual DAP implementation.
* A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the chunksize defined below.
-*
+* overrides or cannot handle the chunksize defined below.
*/
#ifndef DRXDAP_MAX_RCHUNKSIZE
#define DRXDAP_MAX_RCHUNKSIZE 60
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index b4b583f7137a..b4c0f10fc3b0 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -54,12 +54,11 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
/**
* vsp1_du_setup_lif - Setup the output part of the VSP pipeline
* @dev: the VSP device
- * @width: output frame width in pixels
- * @height: output frame height in pixels
+ * @cfg: the LIF configuration
*
- * Configure the output part of VSP DRM pipeline for the given frame @width and
- * @height. This sets up formats on the BRU source pad, the WPF0 sink and source
- * pads, and the LIF sink pad.
+ * Configure the output part of VSP DRM pipeline for the given frame @cfg.width
+ * and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink
+ * and source pads, and the LIF sink pad.
*
* As the media bus code on the BRU source pad is conditioned by the
* configuration of the BRU sink 0 pad, we also set up the formats on all BRU
@@ -69,8 +68,7 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
*
* Return 0 on success or a negative error code on failure.
*/
-int vsp1_du_setup_lif(struct device *dev, unsigned int width,
- unsigned int height)
+int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
@@ -79,11 +77,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
unsigned int i;
int ret;
- dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
- __func__, width, height);
-
- if (width == 0 || height == 0) {
- /* Zero width or height means the CRTC is being disabled, stop
+ if (!cfg) {
+ /* NULL configuration means the CRTC is being disabled, stop
* the pipeline and turn the light off.
*/
ret = vsp1_pipeline_stop(pipe);
@@ -108,6 +103,9 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
return 0;
}
+ dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
+ __func__, cfg->width, cfg->height);
+
/* Configure the format at the BRU sinks and propagate it through the
* pipeline.
*/
@@ -117,8 +115,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
for (i = 0; i < bru->entity.source_pad; ++i) {
format.pad = i;
- format.format.width = width;
- format.format.height = height;
+ format.format.width = cfg->width;
+ format.format.height = cfg->height;
format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
format.format.field = V4L2_FIELD_NONE;
@@ -133,8 +131,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
}
format.pad = bru->entity.source_pad;
- format.format.width = width;
- format.format.height = height;
+ format.format.width = cfg->width;
+ format.format.height = cfg->height;
format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
format.format.field = V4L2_FIELD_NONE;
@@ -180,7 +178,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
/* Verify that the format at the output of the pipeline matches the
* requested frame size and media bus code.
*/
- if (format.format.width != width || format.format.height != height ||
+ if (format.format.width != cfg->width ||
+ format.format.height != cfg->height ||
format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) {
dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__);
return -EPIPE;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 393dccaabdd0..1688893a65bb 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -436,6 +436,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
return -ERESTARTSYS;
ir = irctls[iminor(inode)];
+ mutex_unlock(&lirc_dev_lock);
+
if (!ir) {
retval = -ENODEV;
goto error;
@@ -476,8 +478,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
}
error:
- mutex_unlock(&lirc_dev_lock);
-
nonseekable_open(inode, file);
return retval;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index b109f8246b96..ec4b25bd2ec2 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -176,12 +176,13 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev,
{
u8 tolerance, config;
struct nvt_dev *nvt = dev->priv;
+ unsigned long flags;
int i;
/* hardcode the tolerance to 10% */
tolerance = DIV_ROUND_UP(count, 10);
- spin_lock(&nvt->lock);
+ spin_lock_irqsave(&nvt->lock, flags);
nvt_clear_cir_wake_fifo(nvt);
nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
@@ -203,7 +204,7 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev,
nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
- spin_unlock(&nvt->lock);
+ spin_unlock_irqrestore(&nvt->lock, flags);
}
static ssize_t wakeup_data_show(struct device *dev,
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 2424946740e6..d84533699668 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1663,6 +1663,7 @@ static int rc_setup_rx_device(struct rc_dev *dev)
{
int rc;
struct rc_map *rc_map;
+ u64 rc_type;
if (!dev->map_name)
return -EINVAL;
@@ -1677,15 +1678,18 @@ static int rc_setup_rx_device(struct rc_dev *dev)
if (rc)
return rc;
- if (dev->change_protocol) {
- u64 rc_type = (1ll << rc_map->rc_type);
+ rc_type = BIT_ULL(rc_map->rc_type);
+ if (dev->change_protocol) {
rc = dev->change_protocol(dev, &rc_type);
if (rc < 0)
goto out_table;
dev->enabled_protocols = rc_type;
}
+ if (dev->driver_type == RC_DRIVER_IR_RAW)
+ ir_raw_load_modules(&rc_type);
+
set_bit(EV_KEY, dev->input_dev->evbit);
set_bit(EV_REP, dev->input_dev->evbit);
set_bit(EV_MSC, dev->input_dev->evbit);
@@ -1777,12 +1781,6 @@ int rc_register_device(struct rc_dev *dev)
dev->input_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
- if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
- rc = rc_setup_rx_device(dev);
- if (rc)
- goto out_dev;
- }
-
if (dev->driver_type == RC_DRIVER_IR_RAW ||
dev->driver_type == RC_DRIVER_IR_RAW_TX) {
if (!raw_init) {
@@ -1791,7 +1789,13 @@ int rc_register_device(struct rc_dev *dev)
}
rc = ir_raw_event_register(dev);
if (rc < 0)
- goto out_rx;
+ goto out_dev;
+ }
+
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+ rc = rc_setup_rx_device(dev);
+ if (rc)
+ goto out_raw;
}
/* Allow the RC sysfs nodes to be accessible */
@@ -1803,8 +1807,8 @@ int rc_register_device(struct rc_dev *dev)
return 0;
-out_rx:
- rc_free_rx_device(dev);
+out_raw:
+ ir_raw_event_unregister(dev);
out_dev:
device_del(&dev->dev);
out_unlock:
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 923fb2299553..41b54e40176c 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -487,10 +487,69 @@ static void serial_ir_timeout(unsigned long arg)
ir_raw_event_handle(serial_ir.rcdev);
}
+/* Needed by serial_ir_probe() */
+static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+ unsigned int count);
+static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle);
+static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier);
+static int serial_ir_open(struct rc_dev *rcdev);
+static void serial_ir_close(struct rc_dev *rcdev);
+
static int serial_ir_probe(struct platform_device *dev)
{
+ struct rc_dev *rcdev;
int i, nlow, nhigh, result;
+ rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW);
+ if (!rcdev)
+ return -ENOMEM;
+
+ if (hardware[type].send_pulse && hardware[type].send_space)
+ rcdev->tx_ir = serial_ir_tx;
+ if (hardware[type].set_send_carrier)
+ rcdev->s_tx_carrier = serial_ir_tx_carrier;
+ if (hardware[type].set_duty_cycle)
+ rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
+
+ switch (type) {
+ case IR_HOMEBREW:
+ rcdev->input_name = "Serial IR type home-brew";
+ break;
+ case IR_IRDEO:
+ rcdev->input_name = "Serial IR type IRdeo";
+ break;
+ case IR_IRDEO_REMOTE:
+ rcdev->input_name = "Serial IR type IRdeo remote";
+ break;
+ case IR_ANIMAX:
+ rcdev->input_name = "Serial IR type AnimaX";
+ break;
+ case IR_IGOR:
+ rcdev->input_name = "Serial IR type IgorPlug";
+ break;
+ }
+
+ rcdev->input_phys = KBUILD_MODNAME "/input0";
+ rcdev->input_id.bustype = BUS_HOST;
+ rcdev->input_id.vendor = 0x0001;
+ rcdev->input_id.product = 0x0001;
+ rcdev->input_id.version = 0x0100;
+ rcdev->open = serial_ir_open;
+ rcdev->close = serial_ir_close;
+ rcdev->dev.parent = &serial_ir.pdev->dev;
+ rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ rcdev->driver_name = KBUILD_MODNAME;
+ rcdev->map_name = RC_MAP_RC6_MCE;
+ rcdev->min_timeout = 1;
+ rcdev->timeout = IR_DEFAULT_TIMEOUT;
+ rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
+ rcdev->rx_resolution = 250000;
+
+ serial_ir.rcdev = rcdev;
+
+ setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
+ (unsigned long)&serial_ir);
+
result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
share_irq ? IRQF_SHARED : 0,
KBUILD_MODNAME, &hardware);
@@ -516,9 +575,6 @@ static int serial_ir_probe(struct platform_device *dev)
return -EBUSY;
}
- setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
- (unsigned long)&serial_ir);
-
result = hardware_init_port();
if (result < 0)
return result;
@@ -552,7 +608,8 @@ static int serial_ir_probe(struct platform_device *dev)
sense ? "low" : "high");
dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
- return 0;
+
+ return devm_rc_register_device(&dev->dev, rcdev);
}
static int serial_ir_open(struct rc_dev *rcdev)
@@ -723,7 +780,6 @@ static void serial_ir_exit(void)
static int __init serial_ir_init_module(void)
{
- struct rc_dev *rcdev;
int result;
switch (type) {
@@ -754,63 +810,9 @@ static int __init serial_ir_init_module(void)
sense = !!sense;
result = serial_ir_init();
- if (result)
- return result;
-
- rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev, RC_DRIVER_IR_RAW);
- if (!rcdev) {
- result = -ENOMEM;
- goto serial_cleanup;
- }
-
- if (hardware[type].send_pulse && hardware[type].send_space)
- rcdev->tx_ir = serial_ir_tx;
- if (hardware[type].set_send_carrier)
- rcdev->s_tx_carrier = serial_ir_tx_carrier;
- if (hardware[type].set_duty_cycle)
- rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
-
- switch (type) {
- case IR_HOMEBREW:
- rcdev->input_name = "Serial IR type home-brew";
- break;
- case IR_IRDEO:
- rcdev->input_name = "Serial IR type IRdeo";
- break;
- case IR_IRDEO_REMOTE:
- rcdev->input_name = "Serial IR type IRdeo remote";
- break;
- case IR_ANIMAX:
- rcdev->input_name = "Serial IR type AnimaX";
- break;
- case IR_IGOR:
- rcdev->input_name = "Serial IR type IgorPlug";
- break;
- }
-
- rcdev->input_phys = KBUILD_MODNAME "/input0";
- rcdev->input_id.bustype = BUS_HOST;
- rcdev->input_id.vendor = 0x0001;
- rcdev->input_id.product = 0x0001;
- rcdev->input_id.version = 0x0100;
- rcdev->open = serial_ir_open;
- rcdev->close = serial_ir_close;
- rcdev->dev.parent = &serial_ir.pdev->dev;
- rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
- rcdev->driver_name = KBUILD_MODNAME;
- rcdev->map_name = RC_MAP_RC6_MCE;
- rcdev->min_timeout = 1;
- rcdev->timeout = IR_DEFAULT_TIMEOUT;
- rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
- rcdev->rx_resolution = 250000;
-
- serial_ir.rcdev = rcdev;
-
- result = rc_register_device(rcdev);
-
if (!result)
return 0;
-serial_cleanup:
+
serial_ir_exit();
return result;
}
@@ -818,7 +820,6 @@ serial_cleanup:
static void __exit serial_ir_exit_module(void)
{
del_timer_sync(&serial_ir.timeout_timer);
- rc_unregister_device(serial_ir.rcdev);
serial_ir_exit();
}
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 6ca502d834b4..4f42d57f81d9 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -68,6 +68,7 @@
struct dw2102_state {
u8 initialized;
u8 last_lock;
+ u8 data[MAX_XFER_SIZE + 4];
struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
@@ -661,62 +662,72 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- u8 obuf[0x40], ibuf[0x40];
+ struct dw2102_state *state;
if (!d)
return -ENODEV;
+
+ state = d->priv;
+
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
+ if (mutex_lock_interruptible(&d->data_mutex) < 0) {
+ mutex_unlock(&d->i2c_mutex);
+ return -EAGAIN;
+ }
switch (num) {
case 1:
switch (msg[0].addr) {
case SU3000_STREAM_CTRL:
- obuf[0] = msg[0].buf[0] + 0x36;
- obuf[1] = 3;
- obuf[2] = 0;
- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0)
+ state->data[0] = msg[0].buf[0] + 0x36;
+ state->data[1] = 3;
+ state->data[2] = 0;
+ if (dvb_usb_generic_rw(d, state->data, 3,
+ state->data, 0, 0) < 0)
err("i2c transfer failed.");
break;
case DW2102_RC_QUERY:
- obuf[0] = 0x10;
- if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0)
+ state->data[0] = 0x10;
+ if (dvb_usb_generic_rw(d, state->data, 1,
+ state->data, 2, 0) < 0)
err("i2c transfer failed.");
- msg[0].buf[1] = ibuf[0];
- msg[0].buf[0] = ibuf[1];
+ msg[0].buf[1] = state->data[0];
+ msg[0].buf[0] = state->data[1];
break;
default:
/* always i2c write*/
- obuf[0] = 0x08;
- obuf[1] = msg[0].addr;
- obuf[2] = msg[0].len;
+ state->data[0] = 0x08;
+ state->data[1] = msg[0].addr;
+ state->data[2] = msg[0].len;
- memcpy(&obuf[3], msg[0].buf, msg[0].len);
+ memcpy(&state->data[3], msg[0].buf, msg[0].len);
- if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3,
- ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
+ state->data, 1, 0) < 0)
err("i2c transfer failed.");
}
break;
case 2:
/* always i2c read */
- obuf[0] = 0x09;
- obuf[1] = msg[0].len;
- obuf[2] = msg[1].len;
- obuf[3] = msg[0].addr;
- memcpy(&obuf[4], msg[0].buf, msg[0].len);
-
- if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4,
- ibuf, msg[1].len + 1, 0) < 0)
+ state->data[0] = 0x09;
+ state->data[1] = msg[0].len;
+ state->data[2] = msg[1].len;
+ state->data[3] = msg[0].addr;
+ memcpy(&state->data[4], msg[0].buf, msg[0].len);
+
+ if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+ state->data, msg[1].len + 1, 0) < 0)
err("i2c transfer failed.");
- memcpy(msg[1].buf, &ibuf[1], msg[1].len);
+ memcpy(msg[1].buf, &state->data[1], msg[1].len);
break;
default:
warn("more than 2 i2c messages at a time is not handled yet.");
break;
}
+ mutex_unlock(&d->data_mutex);
mutex_unlock(&d->i2c_mutex);
return num;
}
@@ -844,17 +855,23 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
{
struct dw2102_state *state = (struct dw2102_state *)d->priv;
- u8 obuf[] = {0xde, 0};
+ int ret = 0;
info("%s: %d, initialized %d", __func__, i, state->initialized);
if (i && !state->initialized) {
+ mutex_lock(&d->data_mutex);
+
+ state->data[0] = 0xde;
+ state->data[1] = 0;
+
state->initialized = 1;
/* reset board */
- return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0);
+ mutex_unlock(&d->data_mutex);
}
- return 0;
+ return ret;
}
static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
@@ -1309,49 +1326,57 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d)
return 0;
}
-static int su3000_frontend_attach(struct dvb_usb_adapter *d)
+static int su3000_frontend_attach(struct dvb_usb_adapter *adap)
{
- u8 obuf[3] = { 0xe, 0x80, 0 };
- u8 ibuf[] = { 0 };
+ struct dvb_usb_device *d = adap->dev;
+ struct dw2102_state *state = d->priv;
+
+ mutex_lock(&d->data_mutex);
+
+ state->data[0] = 0xe;
+ state->data[1] = 0x80;
+ state->data[2] = 0;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x02;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x02;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
msleep(300);
- obuf[0] = 0xe;
- obuf[1] = 0x83;
- obuf[2] = 0;
+ state->data[0] = 0xe;
+ state->data[1] = 0x83;
+ state->data[2] = 0;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x83;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x83;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0x51;
+ state->data[0] = 0x51;
- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
err("command 0x51 transfer failed.");
- d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
- &d->dev->i2c_adap);
- if (d->fe_adap[0].fe == NULL)
+ mutex_unlock(&d->data_mutex);
+
+ adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
+ &d->i2c_adap);
+ if (adap->fe_adap[0].fe == NULL)
return -EIO;
- if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+ if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
&dw2104_ts2020_config,
- &d->dev->i2c_adap)) {
+ &d->i2c_adap)) {
info("Attached DS3000/TS2020!");
return 0;
}
@@ -1360,47 +1385,55 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
return -EIO;
}
-static int t220_frontend_attach(struct dvb_usb_adapter *d)
+static int t220_frontend_attach(struct dvb_usb_adapter *adap)
{
- u8 obuf[3] = { 0xe, 0x87, 0 };
- u8 ibuf[] = { 0 };
+ struct dvb_usb_device *d = adap->dev;
+ struct dw2102_state *state = d->priv;
+
+ mutex_lock(&d->data_mutex);
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ state->data[0] = 0xe;
+ state->data[1] = 0x87;
+ state->data[2] = 0x0;
+
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x86;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x86;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x80;
- obuf[2] = 0;
+ state->data[0] = 0xe;
+ state->data[1] = 0x80;
+ state->data[2] = 0;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
msleep(50);
- obuf[0] = 0xe;
- obuf[1] = 0x80;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x80;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0x51;
+ state->data[0] = 0x51;
- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
err("command 0x51 transfer failed.");
- d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
- &d->dev->i2c_adap, NULL);
- if (d->fe_adap[0].fe != NULL) {
- if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
- &d->dev->i2c_adap, &tda18271_config)) {
+ mutex_unlock(&d->data_mutex);
+
+ adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
+ &d->i2c_adap, NULL);
+ if (adap->fe_adap[0].fe != NULL) {
+ if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60,
+ &d->i2c_adap, &tda18271_config)) {
info("Attached TDA18271HD/CXD2820R!");
return 0;
}
@@ -1410,23 +1443,30 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d)
return -EIO;
}
-static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
+static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap)
{
- u8 obuf[] = { 0x51 };
- u8 ibuf[] = { 0 };
+ struct dvb_usb_device *d = adap->dev;
+ struct dw2102_state *state = d->priv;
+
+ mutex_lock(&d->data_mutex);
- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+ state->data[0] = 0x51;
+
+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
err("command 0x51 transfer failed.");
- d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config,
- &d->dev->i2c_adap);
+ mutex_unlock(&d->data_mutex);
- if (d->fe_adap[0].fe == NULL)
+ adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach,
+ &s421_m88rs2000_config,
+ &d->i2c_adap);
+
+ if (adap->fe_adap[0].fe == NULL)
return -EIO;
- if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+ if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
&dw2104_ts2020_config,
- &d->dev->i2c_adap)) {
+ &d->i2c_adap)) {
info("Attached RS2000/TS2020!");
return 0;
}
@@ -1439,44 +1479,50 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap->dev;
struct dw2102_state *state = d->priv;
- u8 obuf[3] = { 0xe, 0x80, 0 };
- u8 ibuf[] = { 0 };
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
struct i2c_board_info board_info;
struct m88ds3103_platform_data m88ds3103_pdata = {};
struct ts2020_config ts2020_config = {};
- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ mutex_lock(&d->data_mutex);
+
+ state->data[0] = 0xe;
+ state->data[1] = 0x80;
+ state->data[2] = 0x0;
+
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x02;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x02;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
msleep(300);
- obuf[0] = 0xe;
- obuf[1] = 0x83;
- obuf[2] = 0;
+ state->data[0] = 0xe;
+ state->data[1] = 0x83;
+ state->data[2] = 0;
- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x83;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x83;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0x51;
+ state->data[0] = 0x51;
- if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
err("command 0x51 transfer failed.");
+ mutex_unlock(&d->data_mutex);
+
/* attach demod */
m88ds3103_pdata.clk = 27000000;
m88ds3103_pdata.i2c_wr_max = 33;
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 6fb773dbcd0c..93be82fc338a 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -219,15 +219,20 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
int write, unsigned long *paddr, int *pageshift)
{
pgd_t *pgdp;
- pmd_t *pmdp;
+ p4d_t *p4dp;
pud_t *pudp;
+ pmd_t *pmdp;
pte_t pte;
pgdp = pgd_offset(vma->vm_mm, vaddr);
if (unlikely(pgd_none(*pgdp)))
goto err;
- pudp = pud_offset(pgdp, vaddr);
+ p4dp = p4d_offset(pgdp, vaddr);
+ if (unlikely(p4d_none(*p4dp)))
+ goto err;
+
+ pudp = pud_offset(p4dp, vaddr);
if (unlikely(pud_none(*pudp)))
goto err;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 1ae872bfc3ba..747645c74134 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -186,7 +186,7 @@ static inline int write_enable(struct spi_nor *nor)
}
/*
- * Send write disble instruction to the chip.
+ * Send write disable instruction to the chip.
*/
static inline int write_disable(struct spi_nor *nor)
{
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 248f60d171a5..ffea9859f5a7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2272,10 +2272,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
processed = xgbe_rx_poll(channel, budget);
/* If we processed everything, we are done */
- if (processed < budget) {
- /* Turn off polling */
- napi_complete_done(napi, processed);
-
+ if ((processed < budget) && napi_complete_done(napi, processed)) {
/* Enable Tx and Rx interrupts */
if (pdata->channel_irq_mode)
xgbe_enable_rx_tx_int(pdata, channel);
@@ -2317,10 +2314,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
} while ((processed < budget) && (processed != last_processed));
/* If we processed everything, we are done */
- if (processed < budget) {
- /* Turn off polling */
- napi_complete_done(napi, processed);
-
+ if ((processed < budget) && napi_complete_done(napi, processed)) {
/* Enable Tx and Rx interrupts */
xgbe_enable_rx_tx_ints(pdata);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 581de71a958a..4c6c882c6a1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -213,9 +213,9 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
if (!((1U << i) & self->msix_entry_mask))
continue;
- free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
if (pdev->msix_enabled)
irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+ free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
self->msix_entry_mask &= ~(1U << i);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d8d06fdfc42b..ac76fc251d26 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13292,17 +13292,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
- /* VF with OLD Hypervisor or old PF do not support filtering */
if (IS_PF(bp)) {
if (chip_is_e1x)
bp->accept_any_vlan = true;
else
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#ifdef CONFIG_BNX2X_SRIOV
- } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#endif
}
+ /* For VF we'll know whether to enable VLAN filtering after
+ * getting a response to CHANNEL_TLV_ACQUIRE from PF.
+ */
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA;
@@ -13738,7 +13736,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
if (!netif_running(bp->dev)) {
DP(BNX2X_MSG_PTP,
"PTP adjfreq called while the interface is down\n");
- return -EFAULT;
+ return -ENETDOWN;
}
if (ppb < 0) {
@@ -13797,6 +13795,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP adjtime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
timecounter_adjtime(&bp->timecounter, delta);
@@ -13809,6 +13813,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
u64 ns;
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP gettime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
ns = timecounter_read(&bp->timecounter);
DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
@@ -13824,6 +13834,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
u64 ns;
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP settime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
ns = timespec64_to_ns(ts);
DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
@@ -13991,6 +14007,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
if (rc)
goto init_one_freemem;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ /* VF with OLD Hypervisor or old PF do not support filtering */
+ if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+#endif
}
/* Enable SRIOV if capability found in configuration space */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fad22adbbb9..bdfd53b46bc5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
/* Add/Remove the filter */
rc = bnx2x_config_vlan_mac(bp, &ramrod);
- if (rc && rc != -EEXIST) {
+ if (rc == -EEXIST)
+ return 0;
+ if (rc) {
BNX2X_ERR("Failed to %s %s\n",
filter->add ? "add" : "delete",
(filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
@@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
return rc;
}
+ filter->applied = true;
+
return 0;
}
@@ -469,8 +473,10 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* Rollback if needed */
if (i != filters->count) {
BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
- i, filters->count + 1);
+ i, filters->count);
while (--i >= 0) {
+ if (!filters->filters[i].applied)
+ continue;
filters->filters[i].add = !filters->filters[i].add;
bnx2x_vf_mac_vlan_config(bp, vf, qid,
&filters->filters[i],
@@ -1899,7 +1905,8 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
continue;
}
- DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "add addresses for vf %d\n", vf->abs_vfid);
for_each_vfq(vf, j) {
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
@@ -1920,11 +1927,12 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cpu_to_le32(U64_HI(q_stats_addr));
cur_query_entry->address.lo =
cpu_to_le32(U64_LO(q_stats_addr));
- DP(BNX2X_MSG_IOV,
- "added address %x %x for vf %d queue %d client %d\n",
- cur_query_entry->address.hi,
- cur_query_entry->address.lo, cur_query_entry->funcID,
- j, cur_query_entry->index);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "added address %x %x for vf %d queue %d client %d\n",
+ cur_query_entry->address.hi,
+ cur_query_entry->address.lo,
+ cur_query_entry->funcID,
+ j, cur_query_entry->index);
cur_query_entry++;
cur_data_offset += sizeof(struct per_queue_stats);
stats_count++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 7a6d406f4c11..888d0b6632e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
bool add;
+ bool applied;
u8 *mac;
u16 vid;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index bfae300cf25f..76a4668c50fe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc, i = 0;
+ int rc = 0, i = 0;
struct netdev_hw_addr *ha;
if (bp->state != BNX2X_STATE_OPEN) {
@@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
/* Get Rx mode requested */
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+ /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
+ if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
+ DP(NETIF_MSG_IFUP,
+ "VF supports not more than %d multicast MAC addresses\n",
+ PFVF_MAX_MULTICAST_PER_VF);
+ rc = -EINVAL;
+ goto out;
+ }
+
netdev_for_each_mc_addr(ha, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
bnx2x_mc_addr(ha));
@@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
i++;
}
- /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
- * addresses tops
- */
- if (i >= PFVF_MAX_MULTICAST_PER_VF) {
- DP(NETIF_MSG_IFUP,
- "VF supports not more than %d multicast MAC addresses\n",
- PFVF_MAX_MULTICAST_PER_VF);
- return -EINVAL;
- }
-
req->n_multicast = i;
req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
req->vf_qid = 0;
@@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
/* request pf to add a vlan for the vf */
@@ -1778,6 +1777,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err;
}
+ /* build vlan list */
+ fl = NULL;
+
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
+ goto op_err;
+ }
+
}
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 235733e91c79..32de4589d16a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4465,6 +4465,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
}
#endif
+ if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
+ FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
+ bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
@@ -5507,8 +5511,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
- link_info->support_auto_speeds =
- le16_to_cpu(resp->supported_speeds_auto_mode);
+ if (resp->supported_speeds_auto_mode)
+ link_info->support_auto_speeds =
+ le16_to_cpu(resp->supported_speeds_auto_mode);
hwrm_phy_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -6495,8 +6500,14 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (!silent)
bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
+ int rc;
+
+ if (!silent)
+ bnxt_ulp_stop(bp);
bnxt_close_nic(bp, false, false);
- bnxt_open_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
+ if (!silent && !rc)
+ bnxt_ulp_start(bp);
}
}
@@ -7444,6 +7455,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ goto init_err_pci_clean;
+
bnxt_hwrm_fw_set_time(bp);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
@@ -7554,10 +7569,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- rc = bnxt_hwrm_func_reset(bp);
- if (rc)
- goto init_err_pci_clean;
-
rc = bnxt_init_int_mode(bp);
if (rc)
goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index faf26a2f726b..c7a5b84a5cb2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -993,6 +993,7 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
+ #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fdf2d8caf7bf..03532061d211 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -474,7 +474,7 @@ void bnxt_dcb_init(struct bnxt *bp)
return;
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
- if (BNXT_PF(bp))
+ if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
else
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f92896835d2a..69015fa50f20 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
+static int bcmgenet_begin(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Turn on the clock */
+ return clk_prepare_enable(priv->clk);
+}
+
+static void bcmgenet_complete(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Turn off the clock */
+ clk_disable_unprepare(priv->clk);
+}
+
static int bcmgenet_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -778,8 +794,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
/* Misc UniMAC counters */
STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
- UMAC_RBUF_OVFL_CNT),
- STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+ UMAC_RBUF_OVFL_CNT_V1),
+ STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
+ UMAC_RBUF_ERR_CNT_V1),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
@@ -821,6 +838,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
}
}
+static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
+{
+ u16 new_offset;
+ u32 val;
+
+ switch (offset) {
+ case UMAC_RBUF_OVFL_CNT_V1:
+ if (GENET_IS_V2(priv))
+ new_offset = RBUF_OVFL_CNT_V2;
+ else
+ new_offset = RBUF_OVFL_CNT_V3PLUS;
+
+ val = bcmgenet_rbuf_readl(priv, new_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_rbuf_writel(priv, 0, new_offset);
+ break;
+ case UMAC_RBUF_ERR_CNT_V1:
+ if (GENET_IS_V2(priv))
+ new_offset = RBUF_ERR_CNT_V2;
+ else
+ new_offset = RBUF_ERR_CNT_V3PLUS;
+
+ val = bcmgenet_rbuf_readl(priv, new_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_rbuf_writel(priv, 0, new_offset);
+ break;
+ default:
+ val = bcmgenet_umac_readl(priv, offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0, offset);
+ break;
+ }
+
+ return val;
+}
+
static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
{
int i, j = 0;
@@ -836,19 +892,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
case BCMGENET_STAT_NETDEV:
case BCMGENET_STAT_SOFT:
continue;
- case BCMGENET_STAT_MIB_RX:
- case BCMGENET_STAT_MIB_TX:
case BCMGENET_STAT_RUNT:
- if (s->type != BCMGENET_STAT_MIB_RX)
- offset = BCMGENET_STAT_OFFSET;
+ offset += BCMGENET_STAT_OFFSET;
+ /* fall through */
+ case BCMGENET_STAT_MIB_TX:
+ offset += BCMGENET_STAT_OFFSET;
+ /* fall through */
+ case BCMGENET_STAT_MIB_RX:
val = bcmgenet_umac_readl(priv,
UMAC_MIB_START + j + offset);
+ offset = 0; /* Reset Offset */
break;
case BCMGENET_STAT_MISC:
- val = bcmgenet_umac_readl(priv, s->reg_offset);
- /* clear if overflowed */
- if (val == ~0)
- bcmgenet_umac_writel(priv, 0, s->reg_offset);
+ if (GENET_IS_V1(priv)) {
+ val = bcmgenet_umac_readl(priv, s->reg_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0,
+ s->reg_offset);
+ } else {
+ val = bcmgenet_update_stat_misc(priv,
+ s->reg_offset);
+ }
break;
}
@@ -973,6 +1038,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
+ .begin = bcmgenet_begin,
+ .complete = bcmgenet_complete,
.get_strings = bcmgenet_get_strings,
.get_sset_count = bcmgenet_get_sset_count,
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
@@ -1167,7 +1234,6 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
- struct netdev_queue *txq;
unsigned int pkts_compl = 0;
unsigned int bytes_compl = 0;
unsigned int c_index;
@@ -1219,13 +1285,8 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
dev->stats.tx_packets += pkts_compl;
dev->stats.tx_bytes += bytes_compl;
- txq = netdev_get_tx_queue(dev, ring->queue);
- netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
-
- if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- if (netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
- }
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ pkts_compl, bytes_compl);
return pkts_compl;
}
@@ -1248,8 +1309,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
struct bcmgenet_tx_ring *ring =
container_of(napi, struct bcmgenet_tx_ring, napi);
unsigned int work_done = 0;
+ struct netdev_queue *txq;
+ unsigned long flags;
- work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+ spin_lock_irqsave(&ring->lock, flags);
+ work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ netif_tx_wake_queue(txq);
+ }
+ spin_unlock_irqrestore(&ring->lock, flags);
if (work_done == 0) {
napi_complete(napi);
@@ -2457,24 +2526,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Interrupt bottom half */
static void bcmgenet_irq_task(struct work_struct *work)
{
+ unsigned long flags;
+ unsigned int status;
struct bcmgenet_priv *priv = container_of(
work, struct bcmgenet_priv, bcmgenet_irq_work);
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
- if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
- priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->irq0_stat;
+ priv->irq0_stat = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (status & UMAC_IRQ_MPD_R) {
netif_dbg(priv, wol, priv->dev,
"magic packet detected, waking up\n");
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
}
/* Link UP/DOWN event */
- if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
+ if (status & UMAC_IRQ_LINK_EVENT)
phy_mac_interrupt(priv->phydev,
- !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
- priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
- }
+ !!(status & UMAC_IRQ_LINK_UP));
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2483,22 +2556,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
struct bcmgenet_priv *priv = dev_id;
struct bcmgenet_rx_ring *rx_ring;
struct bcmgenet_tx_ring *tx_ring;
- unsigned int index;
+ unsigned int index, status;
- /* Save irq status for bottom-half processing. */
- priv->irq1_stat =
- bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+ /* Read irq status */
+ status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
- bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
- "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+ "%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
for (index = 0; index < priv->hw_params->rx_queues; index++) {
- if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+ if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
rx_ring = &priv->rx_rings[index];
@@ -2511,7 +2583,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
/* Check Tx priority queue interrupts */
for (index = 0; index < priv->hw_params->tx_queues; index++) {
- if (!(priv->irq1_stat & BIT(index)))
+ if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
@@ -2531,19 +2603,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
struct bcmgenet_priv *priv = dev_id;
struct bcmgenet_rx_ring *rx_ring;
struct bcmgenet_tx_ring *tx_ring;
+ unsigned int status;
+ unsigned long flags;
- /* Save irq status for bottom-half processing. */
- priv->irq0_stat =
- bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+ /* Read irq status */
+ status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
- bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
- "IRQ=0x%x\n", priv->irq0_stat);
+ "IRQ=0x%x\n", status);
- if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+ if (status & UMAC_IRQ_RXDMA_DONE) {
rx_ring = &priv->rx_rings[DESC_INDEX];
if (likely(napi_schedule_prep(&rx_ring->napi))) {
@@ -2552,7 +2625,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
- if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+ if (status & UMAC_IRQ_TXDMA_DONE) {
tx_ring = &priv->tx_rings[DESC_INDEX];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
@@ -2561,22 +2634,23 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
- if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
- UMAC_IRQ_PHY_DET_F |
- UMAC_IRQ_LINK_EVENT |
- UMAC_IRQ_HFB_SM |
- UMAC_IRQ_HFB_MM |
- UMAC_IRQ_MPD_R)) {
- /* all other interested interrupts handled in bottom half */
- schedule_work(&priv->bcmgenet_irq_work);
- }
-
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
- priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
wake_up(&priv->wq);
}
+ /* all other interested interrupts handled in bottom half */
+ status &= (UMAC_IRQ_LINK_EVENT |
+ UMAC_IRQ_MPD_R);
+ if (status) {
+ /* Save irq status for bottom-half processing. */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->irq0_stat |= status;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
return IRQ_HANDLED;
}
@@ -2801,6 +2875,8 @@ err_irq0:
err_fini_dma:
bcmgenet_fini_dma(priv);
err_clk_disable:
+ if (priv->internal_phy)
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
clk_disable_unprepare(priv->clk);
return ret;
}
@@ -3177,6 +3253,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
*/
gphy_rev = reg & 0xffff;
+ /* This is reserved so should require special treatment */
+ if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+ pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+ return;
+ }
+
/* This is the good old scheme, just GPHY major, no minor nor patch */
if ((gphy_rev & 0xf0) != 0)
priv->gphy_rev = gphy_rev << 8;
@@ -3185,12 +3267,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
else if ((gphy_rev & 0xff00) != 0)
priv->gphy_rev = gphy_rev;
- /* This is reserved so should require special treatment */
- else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
- pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
- return;
- }
-
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
pr_warn("GENET does not support 40-bits PA\n");
@@ -3233,6 +3309,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
const void *macaddr;
struct resource *r;
int err = -EIO;
+ const char *phy_mode_str;
/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3276,6 +3353,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->lock);
+
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
ether_addr_copy(dev->dev_addr, macaddr);
@@ -3338,6 +3417,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->clk_eee = NULL;
}
+ /* If this is an internal GPHY, power it on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
+ !strcasecmp(phy_mode_str, "internal"))
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
err = reset_umac(priv);
if (err)
goto err_clk_disable;
@@ -3502,6 +3588,8 @@ static int bcmgenet_resume(struct device *d)
return 0;
out_clk_disable:
+ if (priv->internal_phy)
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1e2dc34d331a..db7f289d65ae 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
#define MDIO_REG_SHIFT 16
#define MDIO_REG_MASK 0x1F
-#define UMAC_RBUF_OVFL_CNT 0x61C
+#define UMAC_RBUF_OVFL_CNT_V1 0x61C
+#define RBUF_OVFL_CNT_V2 0x80
+#define RBUF_OVFL_CNT_V3PLUS 0x94
#define UMAC_MPD_CTRL 0x620
#define MPD_EN (1 << 0)
@@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
#define UMAC_MPD_PW_MS 0x624
#define UMAC_MPD_PW_LS 0x628
-#define UMAC_RBUF_ERR_CNT 0x634
+#define UMAC_RBUF_ERR_CNT_V1 0x634
+#define RBUF_ERR_CNT_V2 0x84
+#define RBUF_ERR_CNT_V3PLUS 0x98
#define UMAC_MDF_ERR_CNT 0x638
#define UMAC_MDF_CTRL 0x650
#define UMAC_MDF_ADDR 0x654
@@ -619,11 +623,13 @@ struct bcmgenet_priv {
struct work_struct bcmgenet_irq_work;
int irq0;
int irq1;
- unsigned int irq0_stat;
- unsigned int irq1_stat;
int wol_irq;
bool wol_irq_disabled;
+ /* shared status */
+ spinlock_t lock;
+ unsigned int irq0_stat;
+
/* HW descriptors/checksum variables */
bool desc_64b_en;
bool desc_rxchk_en;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index be9c0e3f5ade..92f46b1375c3 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -152,7 +152,7 @@ struct octnic_gather {
*/
struct octeon_sg_entry *sg;
- u64 sg_dma_ptr;
+ dma_addr_t sg_dma_ptr;
};
struct handshake {
@@ -734,6 +734,9 @@ static void delete_glists(struct lio *lio)
struct octnic_gather *g;
int i;
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+
if (!lio->glist)
return;
@@ -741,23 +744,26 @@ static void delete_glists(struct lio *lio)
do {
g = (struct octnic_gather *)
list_delete_head(&lio->glist[i]);
- if (g) {
- if (g->sg) {
- dma_unmap_single(&lio->oct_dev->
- pci_dev->dev,
- g->sg_dma_ptr,
- g->sg_size,
- DMA_TO_DEVICE);
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
- }
+ if (g)
kfree(g);
- }
} while (g);
+
+ if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+ lio_dma_free(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ lio->glists_virt_base[i],
+ lio->glists_dma_base[i]);
+ }
}
- kfree((void *)lio->glist);
- kfree((void *)lio->glist_lock);
+ kfree(lio->glists_virt_base);
+ lio->glists_virt_base = NULL;
+
+ kfree(lio->glists_dma_base);
+ lio->glists_dma_base = NULL;
+
+ kfree(lio->glist);
+ lio->glist = NULL;
}
/**
@@ -772,13 +778,30 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
GFP_KERNEL);
if (!lio->glist_lock)
- return 1;
+ return -ENOMEM;
lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
GFP_KERNEL);
if (!lio->glist) {
- kfree((void *)lio->glist_lock);
- return 1;
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio->glist_entry_size =
+ ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ /* allocate memory to store virtual and dma base address of
+ * per glist consistent memory
+ */
+ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+ GFP_KERNEL);
+ lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+ GFP_KERNEL);
+
+ if (!lio->glists_virt_base || !lio->glists_dma_base) {
+ delete_glists(lio);
+ return -ENOMEM;
}
for (i = 0; i < num_iqs; i++) {
@@ -788,6 +811,16 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
INIT_LIST_HEAD(&lio->glist[i]);
+ lio->glists_virt_base[i] =
+ lio_dma_alloc(oct,
+ lio->glist_entry_size * lio->tx_qsize,
+ &lio->glists_dma_base[i]);
+
+ if (!lio->glists_virt_base[i]) {
+ delete_glists(lio);
+ return -ENOMEM;
+ }
+
for (j = 0; j < lio->tx_qsize; j++) {
g = kzalloc_node(sizeof(*g), GFP_KERNEL,
numa_node);
@@ -796,43 +829,18 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
if (!g)
break;
- g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
- OCT_SG_ENTRY_SIZE);
+ g->sg = lio->glists_virt_base[i] +
+ (j * lio->glist_entry_size);
- g->sg = kmalloc_node(g->sg_size + 8,
- GFP_KERNEL, numa_node);
- if (!g->sg)
- g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
- if (!g->sg) {
- kfree(g);
- break;
- }
-
- /* The gather component should be aligned on 64-bit
- * boundary
- */
- if (((unsigned long)g->sg) & 7) {
- g->adjust = 8 - (((unsigned long)g->sg) & 7);
- g->sg = (struct octeon_sg_entry *)
- ((unsigned long)g->sg + g->adjust);
- }
- g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
- g->sg, g->sg_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev,
- g->sg_dma_ptr)) {
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
- kfree(g);
- break;
- }
+ g->sg_dma_ptr = lio->glists_dma_base[i] +
+ (j * lio->glist_entry_size);
list_add_tail(&g->list, &lio->glist[i]);
}
if (j != lio->tx_qsize) {
delete_glists(lio);
- return 1;
+ return -ENOMEM;
}
}
@@ -1885,9 +1893,6 @@ static void free_netsgbuf(void *buf)
i++;
}
- dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
- g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
@@ -1933,9 +1938,6 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
- g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
@@ -3273,8 +3275,6 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
i++;
}
- dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
- g->sg_size, DMA_TO_DEVICE);
dptr = g->sg_dma_ptr;
if (OCTEON_CN23XX_PF(oct))
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 9d5e03502c76..7b83be4ce1fe 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -108,6 +108,8 @@ struct octnic_gather {
* received from the IP layer.
*/
struct octeon_sg_entry *sg;
+
+ dma_addr_t sg_dma_ptr;
};
struct octeon_device_priv {
@@ -490,6 +492,9 @@ static void delete_glists(struct lio *lio)
struct octnic_gather *g;
int i;
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+
if (!lio->glist)
return;
@@ -497,17 +502,26 @@ static void delete_glists(struct lio *lio)
do {
g = (struct octnic_gather *)
list_delete_head(&lio->glist[i]);
- if (g) {
- if (g->sg)
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
+ if (g)
kfree(g);
- }
} while (g);
+
+ if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+ lio_dma_free(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ lio->glists_virt_base[i],
+ lio->glists_dma_base[i]);
+ }
}
+ kfree(lio->glists_virt_base);
+ lio->glists_virt_base = NULL;
+
+ kfree(lio->glists_dma_base);
+ lio->glists_dma_base = NULL;
+
kfree(lio->glist);
- kfree(lio->glist_lock);
+ lio->glist = NULL;
}
/**
@@ -522,13 +536,30 @@ static int setup_glists(struct lio *lio, int num_iqs)
lio->glist_lock =
kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
if (!lio->glist_lock)
- return 1;
+ return -ENOMEM;
lio->glist =
kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
if (!lio->glist) {
kfree(lio->glist_lock);
- return 1;
+ lio->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio->glist_entry_size =
+ ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ /* allocate memory to store virtual and dma base address of
+ * per glist consistent memory
+ */
+ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+ GFP_KERNEL);
+ lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+ GFP_KERNEL);
+
+ if (!lio->glists_virt_base || !lio->glists_dma_base) {
+ delete_glists(lio);
+ return -ENOMEM;
}
for (i = 0; i < num_iqs; i++) {
@@ -536,34 +567,33 @@ static int setup_glists(struct lio *lio, int num_iqs)
INIT_LIST_HEAD(&lio->glist[i]);
+ lio->glists_virt_base[i] =
+ lio_dma_alloc(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ &lio->glists_dma_base[i]);
+
+ if (!lio->glists_virt_base[i]) {
+ delete_glists(lio);
+ return -ENOMEM;
+ }
+
for (j = 0; j < lio->tx_qsize; j++) {
g = kzalloc(sizeof(*g), GFP_KERNEL);
if (!g)
break;
- g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
- OCT_SG_ENTRY_SIZE);
+ g->sg = lio->glists_virt_base[i] +
+ (j * lio->glist_entry_size);
- g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
- if (!g->sg) {
- kfree(g);
- break;
- }
+ g->sg_dma_ptr = lio->glists_dma_base[i] +
+ (j * lio->glist_entry_size);
- /* The gather component should be aligned on 64-bit
- * boundary
- */
- if (((unsigned long)g->sg) & 7) {
- g->adjust = 8 - (((unsigned long)g->sg) & 7);
- g->sg = (struct octeon_sg_entry *)
- ((unsigned long)g->sg + g->adjust);
- }
list_add_tail(&g->list, &lio->glist[i]);
}
if (j != lio->tx_qsize) {
delete_glists(lio);
- return 1;
+ return -ENOMEM;
}
}
@@ -1324,10 +1354,6 @@ static void free_netsgbuf(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
@@ -1374,10 +1400,6 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
@@ -2382,23 +2404,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
i++;
}
- dptr = dma_map_single(&oct->pci_dev->dev,
- g->sg, g->sg_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
- dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
- __func__);
- dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
- skb->len - skb->data_len,
- DMA_TO_DEVICE);
- for (j = 1; j <= frags; j++) {
- frag = &skb_shinfo(skb)->frags[j - 1];
- dma_unmap_page(&oct->pci_dev->dev,
- g->sg[j >> 2].ptr[j & 3],
- frag->size, DMA_TO_DEVICE);
- }
- return NETDEV_TX_BUSY;
- }
+ dptr = g->sg_dma_ptr;
ndata.cmd.cmd3.dptr = dptr;
finfo->dptr = dptr;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index b3dc2e9651a8..d29ebc531151 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -71,17 +71,17 @@
#define CN23XX_MAX_RINGS_PER_VF 8
#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
-#define CN23XX_MAX_IQ_DESCRIPTORS 2048
+#define CN23XX_MAX_IQ_DESCRIPTORS 512
#define CN23XX_DB_MIN 1
#define CN23XX_DB_MAX 8
#define CN23XX_DB_TIMEOUT 1
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
-#define CN23XX_MAX_OQ_DESCRIPTORS 2048
+#define CN23XX_MAX_OQ_DESCRIPTORS 512
#define CN23XX_OQ_BUF_SIZE 1536
#define CN23XX_OQ_PKTSPER_INTR 128
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
-#define CN23XX_OQ_REFIL_THRESHOLD 128
+#define CN23XX_OQ_REFIL_THRESHOLD 16
#define CN23XX_OQ_INTR_PKT 64
#define CN23XX_OQ_INTR_TIME 100
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 0be87d119a97..79f809479af6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -155,11 +155,6 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
recv_buffer_destroy(droq->recv_buf_list[i].buffer,
pg_info);
- if (droq->desc_ring && droq->desc_ring[i].info_ptr)
- lio_unmap_ring_info(oct->pci_dev,
- (u64)droq->
- desc_ring[i].info_ptr,
- OCT_DROQ_INFO_SIZE);
droq->recv_buf_list[i].buffer = NULL;
}
@@ -211,10 +206,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
vfree(droq->recv_buf_list);
if (droq->info_base_addr)
- cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
- droq->info_alloc_size,
- droq->info_base_addr,
- droq->info_list_dma);
+ lio_free_info_buffer(oct, droq);
if (droq->desc_ring)
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -294,12 +286,7 @@ int octeon_init_droq(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
droq->max_count);
- droq->info_list =
- cnnic_numa_alloc_aligned_dma((droq->max_count *
- OCT_DROQ_INFO_SIZE),
- &droq->info_alloc_size,
- &droq->info_base_addr,
- numa_node);
+ droq->info_list = lio_alloc_info_buffer(oct, droq);
if (!droq->info_list) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index e62074090681..6982c0af5ecc 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -325,10 +325,10 @@ struct octeon_droq {
size_t desc_ring_dma;
/** Info ptr list are allocated at this virtual address. */
- size_t info_base_addr;
+ void *info_base_addr;
/** DMA mapped address of the info list */
- size_t info_list_dma;
+ dma_addr_t info_list_dma;
/** Allocated size of info list. */
u32 info_alloc_size;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index aa36e9ae7676..bed9ef17bc26 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -140,48 +140,6 @@ err_release_region:
return 1;
}
-static inline void *
-cnnic_numa_alloc_aligned_dma(u32 size,
- u32 *alloc_size,
- size_t *orig_ptr,
- int numa_node)
-{
- int retries = 0;
- void *ptr = NULL;
-
-#define OCTEON_MAX_ALLOC_RETRIES 1
- do {
- struct page *page = NULL;
-
- page = alloc_pages_node(numa_node,
- GFP_KERNEL,
- get_order(size));
- if (!page)
- page = alloc_pages(GFP_KERNEL,
- get_order(size));
- ptr = (void *)page_address(page);
- if ((unsigned long)ptr & 0x07) {
- __free_pages(page, get_order(size));
- ptr = NULL;
- /* Increment the size required if the first
- * attempt failed.
- */
- if (!retries)
- size += 7;
- }
- retries++;
- } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
-
- *alloc_size = size;
- *orig_ptr = (unsigned long)ptr;
- if ((unsigned long)ptr & 0x07)
- ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
- return ptr;
-}
-
-#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
- free_pages(orig_ptr, get_order(size))
-
static inline int
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 6bb89419006e..eef2a1e8a7e3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -62,6 +62,9 @@ struct lio {
/** Array of gather component linked lists */
struct list_head *glist;
+ void **glists_virt_base;
+ dma_addr_t *glists_dma_base;
+ u32 glist_entry_size;
/** Pointer to the NIC properties for the Octeon device this network
* interface is associated with.
@@ -344,6 +347,29 @@ static inline void tx_buffer_free(void *buffer)
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
+static inline void *
+lio_alloc_info_buffer(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ void *virt_ptr;
+
+ virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
+ &droq->info_list_dma);
+ if (virt_ptr) {
+ droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
+ droq->info_base_addr = virt_ptr;
+ }
+
+ return virt_ptr;
+}
+
+static inline void lio_free_info_buffer(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
+ droq->info_list_dma);
+}
+
static inline
void *get_rbd(struct sk_buff *skb)
{
@@ -359,22 +385,7 @@ void *get_rbd(struct sk_buff *skb)
static inline u64
lio_map_ring_info(struct octeon_droq *droq, u32 i)
{
- dma_addr_t dma_addr;
- struct octeon_device *oct = droq->oct_dev;
-
- dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
- OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
-
- WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
-
- return (u64)dma_addr;
-}
-
-static inline void
-lio_unmap_ring_info(struct pci_dev *pci_dev,
- u64 info_ptr, u32 size)
-{
- dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
+ return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
}
static inline u64
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index e739c7153562..2269ff562d95 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -269,6 +269,7 @@ struct nicvf {
#define MAX_QUEUES_PER_QSET 8
struct queue_set *qs;
struct nicvf_cq_poll *napi[8];
+ void *iommu_domain;
u8 vf_id;
u8 sqs_id;
bool sqs_mode;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 6feaa24bcfd4..24017588f531 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -16,6 +16,7 @@
#include <linux/log2.h>
#include <linux/prefetch.h>
#include <linux/irq.h>
+#include <linux/iommu.h>
#include "nic_reg.h"
#include "nic.h"
@@ -525,7 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
/* Get actual TSO descriptors and free them */
tso_sqe =
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+ tso_sqe->subdesc_cnt);
nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+ } else {
+ nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
+ hdr->subdesc_cnt);
}
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
@@ -576,6 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
{
struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf *snic = nic;
int err = 0;
int rq_idx;
@@ -592,7 +599,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
if (err && !cqe_rx->rb_cnt)
return;
- skb = nicvf_get_rcv_skb(nic, cqe_rx);
+ skb = nicvf_get_rcv_skb(snic, cqe_rx);
if (!skb) {
netdev_dbg(nic->netdev, "Packet not received\n");
return;
@@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pass1_silicon(nic->pdev))
nic->hw_tso = true;
+ /* Get iommu domain for iova to physical addr conversion */
+ nic->iommu_domain = iommu_get_domain_for_dev(dev);
+
pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
if (sdevid == 0xA134)
nic->t88 = true;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ac0390be3b12..f13289f0d238 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <linux/etherdevice.h>
+#include <linux/iommu.h>
#include <net/ip.h>
#include <net/tso.h>
@@ -18,6 +19,16 @@
#include "q_struct.h"
#include "nicvf_queues.h"
+#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
+
+static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
+{
+ /* Translation is installed only when IOMMU is present */
+ if (nic->iommu_domain)
+ return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
+ return dma_addr;
+}
+
static void nicvf_get_page(struct nicvf *nic)
{
if (!nic->rb_pageref || !nic->rb_page)
@@ -87,7 +98,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
u32 buf_len, u64 **rbuf)
{
- int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
+ int order = NICVF_PAGE_ORDER;
/* Check if request can be accomodated in previous allocated page */
if (nic->rb_page &&
@@ -97,22 +108,27 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
}
nicvf_get_page(nic);
- nic->rb_page = NULL;
/* Allocate a new page */
+ nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ order);
if (!nic->rb_page) {
- nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- order);
- if (!nic->rb_page) {
- this_cpu_inc(nic->pnicvf->drv_stats->
- rcv_buffer_alloc_failures);
- return -ENOMEM;
- }
- nic->rb_page_offset = 0;
+ this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
+ return -ENOMEM;
}
-
+ nic->rb_page_offset = 0;
ret:
- *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
+ /* HW will ensure data coherency, CPU sync not required */
+ *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
+ nic->rb_page_offset, buf_len,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC));
+ if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
+ if (!nic->rb_page_offset)
+ __free_pages(nic->rb_page, order);
+ nic->rb_page = NULL;
+ return -ENOMEM;
+ }
nic->rb_page_offset += buf_len;
return 0;
@@ -158,16 +174,21 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
rbdr->dma_size = buf_size;
rbdr->enable = true;
rbdr->thresh = RBDR_THRESH;
+ rbdr->head = 0;
+ rbdr->tail = 0;
nic->rb_page = NULL;
for (idx = 0; idx < ring_len; idx++) {
err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
&rbuf);
- if (err)
+ if (err) {
+ /* To free already allocated and mapped ones */
+ rbdr->tail = idx - 1;
return err;
+ }
desc = GET_RBDR_DESC(rbdr, idx);
- desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
}
nicvf_get_page(nic);
@@ -179,7 +200,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
{
int head, tail;
- u64 buf_addr;
+ u64 buf_addr, phys_addr;
struct rbdr_entry_t *desc;
if (!rbdr)
@@ -192,18 +213,26 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
head = rbdr->head;
tail = rbdr->tail;
- /* Free SKBs */
+ /* Release page references */
while (head != tail) {
desc = GET_RBDR_DESC(rbdr, head);
- buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
- put_page(virt_to_page(phys_to_virt(buf_addr)));
+ buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+ phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+ dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (phys_addr)
+ put_page(virt_to_page(phys_to_virt(phys_addr)));
head++;
head &= (rbdr->dmem.q_len - 1);
}
- /* Free SKB of tail desc */
+ /* Release buffer of tail desc */
desc = GET_RBDR_DESC(rbdr, tail);
- buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
- put_page(virt_to_page(phys_to_virt(buf_addr)));
+ buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+ phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+ dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (phys_addr)
+ put_page(virt_to_page(phys_to_virt(phys_addr)));
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
@@ -250,7 +279,7 @@ refill:
break;
desc = GET_RBDR_DESC(rbdr, tail);
- desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
refill_rb_cnt--;
new_rb++;
}
@@ -361,9 +390,29 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
return 0;
}
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+ int hdr_sqe, u8 subdesc_cnt)
+{
+ u8 idx;
+ struct sq_gather_subdesc *gather;
+
+ /* Unmap DMA mapped skb data buffers */
+ for (idx = 0; idx < subdesc_cnt; idx++) {
+ hdr_sqe++;
+ hdr_sqe &= (sq->dmem.q_len - 1);
+ gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
+ /* HW will ensure data coherency, CPU sync not required */
+ dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
+ gather->size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+}
+
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{
struct sk_buff *skb;
+ struct sq_hdr_subdesc *hdr;
+ struct sq_hdr_subdesc *tso_sqe;
if (!sq)
return;
@@ -379,8 +428,22 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
smp_rmb();
while (sq->head != sq->tail) {
skb = (struct sk_buff *)sq->skbuff[sq->head];
- if (skb)
- dev_kfree_skb_any(skb);
+ if (!skb)
+ goto next;
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+ /* Check for dummy descriptor used for HW TSO offload on 88xx */
+ if (hdr->dont_send) {
+ /* Get actual TSO descriptors and unmap them */
+ tso_sqe =
+ (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+ tso_sqe->subdesc_cnt);
+ } else {
+ nicvf_unmap_sndq_buffers(nic, sq, sq->head,
+ hdr->subdesc_cnt);
+ }
+ dev_kfree_skb_any(skb);
+next:
sq->head++;
sq->head &= (sq->dmem.q_len - 1);
}
@@ -559,9 +622,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
nicvf_send_msg_to_pf(nic, &mbx);
if (!nic->sqs_mode && (qidx == 0)) {
- /* Enable checking L3/L4 length and TCP/UDP checksums */
+ /* Enable checking L3/L4 length and TCP/UDP checksums
+ * Also allow IPv6 pkts with zero UDP checksum.
+ */
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
- (BIT(24) | BIT(23) | BIT(21)));
+ (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
nicvf_config_vlan_stripping(nic, nic->netdev->features);
}
@@ -882,6 +947,14 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
return qentry;
}
+/* Rollback to previous tail pointer when descriptors not used */
+static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
+ int qentry, int desc_cnt)
+{
+ sq->tail = qentry;
+ atomic_add(desc_cnt, &sq->free_cnt);
+}
+
/* Free descriptor back to SQ for future use */
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
{
@@ -1207,8 +1280,9 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
struct sk_buff *skb, u8 sq_num)
{
int i, size;
- int subdesc_cnt, tso_sqe = 0;
+ int subdesc_cnt, hdr_sqe = 0;
int qentry;
+ u64 dma_addr;
subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
if (subdesc_cnt > atomic_read(&sq->free_cnt))
@@ -1223,12 +1297,21 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
skb, skb->len);
- tso_sqe = qentry;
+ hdr_sqe = qentry;
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
- nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+ /* HW will ensure data coherency, CPU sync not required */
+ dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
+ offset_in_page(skb->data), size,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
+ nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+ return 0;
+ }
+
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
/* Check for scattered buffer */
if (!skb_is_nonlinear(skb))
@@ -1241,15 +1324,26 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
qentry = nicvf_get_nxt_sqentry(sq, qentry);
size = skb_frag_size(frag);
- nicvf_sq_add_gather_subdesc(sq, qentry, size,
- virt_to_phys(
- skb_frag_address(frag)));
+ dma_addr = dma_map_page_attrs(&nic->pdev->dev,
+ skb_frag_page(frag),
+ frag->page_offset, size,
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
+ /* Free entire chain of mapped buffers
+ * here 'i' = frags mapped + above mapped skb->data
+ */
+ nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
+ nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+ return 0;
+ }
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
}
doorbell:
if (nic->t88 && skb_shinfo(skb)->gso_size) {
qentry = nicvf_get_nxt_sqentry(sq, qentry);
- nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+ nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
}
nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
@@ -1282,6 +1376,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
int offset;
u16 *rb_lens = NULL;
u64 *rb_ptrs = NULL;
+ u64 phys_addr;
rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
/* Except 88xx pass1 on all other chips CQE_RX2_S is added to
@@ -1296,15 +1391,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
else
rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
- netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
- __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
-
for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
payload_len = rb_lens[frag_num(frag)];
+ phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
+ if (!phys_addr) {
+ if (skb)
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
if (!frag) {
/* First fragment */
+ dma_unmap_page_attrs(&nic->pdev->dev,
+ *rb_ptrs - cqe_rx->align_pad,
+ RCV_FRAG_LEN, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
skb = nicvf_rb_ptr_to_skb(nic,
- *rb_ptrs - cqe_rx->align_pad,
+ phys_addr - cqe_rx->align_pad,
payload_len);
if (!skb)
return NULL;
@@ -1312,8 +1415,11 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
skb_put(skb, payload_len);
} else {
/* Add fragments */
- page = virt_to_page(phys_to_virt(*rb_ptrs));
- offset = phys_to_virt(*rb_ptrs) - page_address(page);
+ dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
+ RCV_FRAG_LEN, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ page = virt_to_page(phys_to_virt(phys_addr));
+ offset = phys_to_virt(phys_addr) - page_address(page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
offset, payload_len, RCV_FRAG_LEN);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 5cb84da99a2d..10cb4b84625b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -87,7 +87,7 @@
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
-#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
+#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -301,6 +301,8 @@ struct queue_set {
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+ int hdr_sqe, u8 subdesc_cnt);
void nicvf_config_vlan_stripping(struct nicvf *nic,
netdev_features_t features);
int nicvf_set_qset_resources(struct nicvf *nic);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 4c8e8cf730bb..64a1095e4d14 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
return 1;
}
+static int max_bgx_per_node;
+static void set_max_bgx_per_node(struct pci_dev *pdev)
+{
+ u16 sdevid;
+
+ if (max_bgx_per_node)
+ return;
+
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ switch (sdevid) {
+ case PCI_SUBSYS_DEVID_81XX_BGX:
+ max_bgx_per_node = MAX_BGX_PER_CN81XX;
+ break;
+ case PCI_SUBSYS_DEVID_83XX_BGX:
+ max_bgx_per_node = MAX_BGX_PER_CN83XX;
+ break;
+ case PCI_SUBSYS_DEVID_88XX_BGX:
+ default:
+ max_bgx_per_node = MAX_BGX_PER_CN88XX;
+ break;
+ }
+}
+
+static struct bgx *get_bgx(int node, int bgx_idx)
+{
+ int idx = (node * max_bgx_per_node) + bgx_idx;
+
+ return bgx_vnic[idx];
+}
+
/* Return number of BGX present in HW */
unsigned bgx_get_map(int node)
{
int i;
unsigned map = 0;
- for (i = 0; i < MAX_BGX_PER_NODE; i++) {
- if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
+ for (i = 0; i < max_bgx_per_node; i++) {
+ if (bgx_vnic[(node * max_bgx_per_node) + i])
map |= (1 << i);
}
@@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (bgx)
return bgx->lmac_count;
@@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
struct bgx *bgx;
struct lmac *lmac;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return;
@@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
if (bgx)
return bgx->lmac[lmacid].mac;
@@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
if (!bgx)
return;
@@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
struct lmac *lmac;
u64 cfg;
@@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
struct lmac *lmac;
u64 cfg;
@@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc);
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
struct lmac *lmac;
u64 cfg;
@@ -369,7 +399,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return 0;
@@ -383,7 +413,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return 0;
@@ -411,7 +441,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
struct lmac *lmac;
u64 cfg;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return;
@@ -1011,12 +1041,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
dev_info(dev, "%s: 40G_KR4\n", (char *)str);
break;
case BGX_MODE_QSGMII:
- if ((lmacid == 0) &&
- (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
- return;
- if ((lmacid == 2) &&
- (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
- return;
dev_info(dev, "%s: QSGMII\n", (char *)str);
break;
case BGX_MODE_RGMII:
@@ -1334,11 +1358,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions;
}
+ set_max_bgx_per_node(pdev);
+
pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
bgx->bgx_id = (pci_resource_start(pdev,
PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
- bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
+ bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
bgx->max_lmac = MAX_LMAC_PER_BGX;
bgx_vnic[bgx->bgx_id] = bgx;
} else {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index a60f189429bb..c5080f2cead5 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -22,7 +22,6 @@
#define MAX_BGX_PER_CN88XX 2
#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
#define MAX_BGX_PER_CN83XX 4
-#define MAX_BGX_PER_NODE 4
#define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 275c2e2349ad..c44036d5761a 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2589,8 +2589,6 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
static int emac_dt_phy_connect(struct emac_instance *dev,
struct device_node *phy_handle)
{
- int res;
-
dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
GFP_KERNEL);
if (!dev->phy.def)
@@ -2617,7 +2615,7 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
struct device_node *phy_handle;
- int res = 0;
+ int res = 1;
phy_handle = of_parse_phandle(np, "phy-handle", 0);
@@ -2714,13 +2712,24 @@ static int emac_init_phy(struct emac_instance *dev)
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
int res = emac_dt_phy_probe(dev);
- mutex_unlock(&emac_phy_map_lock);
- if (!res)
+ switch (res) {
+ case 1:
+ /* No phy-handle property configured.
+ * Continue with the existing phy probe
+ * and setup code.
+ */
+ break;
+
+ case 0:
+ mutex_unlock(&emac_phy_map_lock);
goto init_phy;
- dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
- res);
- return res;
+ default:
+ mutex_unlock(&emac_phy_map_lock);
+ dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
+ res);
+ return res;
+ }
}
if (dev->phy_address != 0xffffffff)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9198e6bd5160..5f11b4dc95d2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev)
send_map_query(adapter);
for (i = 0; i < rxadd_subcrqs; i++) {
init_rx_pool(adapter, &adapter->rx_pool[i],
- IBMVNIC_BUFFS_PER_POOL, i,
+ adapter->req_rx_add_entries_per_subcrq, i,
be64_to_cpu(size_array[i]), 1);
if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
dev_err(dev, "Couldn't alloc rx pool\n");
@@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev)
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
tx_pool->tx_buff =
- kcalloc(adapter->max_tx_entries_per_subcrq,
+ kcalloc(adapter->req_tx_entries_per_subcrq,
sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
if (!tx_pool->tx_buff)
goto tx_pool_alloc_failed;
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- adapter->max_tx_entries_per_subcrq *
+ adapter->req_tx_entries_per_subcrq *
adapter->req_mtu))
goto tx_ltb_alloc_failed;
tx_pool->free_map =
- kcalloc(adapter->max_tx_entries_per_subcrq,
+ kcalloc(adapter->req_tx_entries_per_subcrq,
sizeof(int), GFP_KERNEL);
if (!tx_pool->free_map)
goto tx_fm_alloc_failed;
- for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
+ for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
tx_pool->free_map[j] = j;
tx_pool->consumer_index = 0;
@@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_tx_buff *tx_buff = NULL;
+ struct ibmvnic_sub_crq_queue *tx_scrq;
struct ibmvnic_tx_pool *tx_pool;
unsigned int tx_send_failed = 0;
unsigned int tx_map_failed = 0;
@@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
int ret = 0;
tx_pool = &adapter->tx_pool[queue_num];
+ tx_scrq = adapter->tx_scrq[queue_num];
txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->
@@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) %
- adapter->max_tx_entries_per_subcrq;
+ adapter->req_tx_entries_per_subcrq;
tx_buff = &tx_pool->tx_buff[index];
tx_buff->skb = skb;
@@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (tx_pool->consumer_index == 0)
tx_pool->consumer_index =
- adapter->max_tx_entries_per_subcrq - 1;
+ adapter->req_tx_entries_per_subcrq - 1;
else
tx_pool->consumer_index--;
@@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_BUSY;
goto out;
}
+
+ atomic_inc(&tx_scrq->used);
+
+ if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
+ netdev_info(netdev, "Stopping queue %d\n", queue_num);
+ netif_stop_subqueue(netdev, queue_num);
+ }
+
tx_packets++;
tx_bytes += skb->len;
txq->trans_start = jiffies;
@@ -1213,6 +1223,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
scrq->cur = 0;
+ atomic_set(&scrq->used, 0);
scrq->rx_skb_top = NULL;
spin_lock_init(&scrq->lock);
@@ -1355,14 +1366,28 @@ restart_loop:
DMA_TO_DEVICE);
}
- if (txbuff->last_frag)
+ if (txbuff->last_frag) {
+ atomic_dec(&scrq->used);
+
+ if (atomic_read(&scrq->used) <=
+ (adapter->req_tx_entries_per_subcrq / 2) &&
+ netif_subqueue_stopped(adapter->netdev,
+ txbuff->skb)) {
+ netif_wake_subqueue(adapter->netdev,
+ scrq->pool_index);
+ netdev_dbg(adapter->netdev,
+ "Started queue %d\n",
+ scrq->pool_index);
+ }
+
dev_kfree_skb_any(txbuff->skb);
+ }
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
producer_index] = index;
adapter->tx_pool[pool].producer_index =
(adapter->tx_pool[pool].producer_index + 1) %
- adapter->max_tx_entries_per_subcrq;
+ adapter->req_tx_entries_per_subcrq;
}
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 422824f1f42a..1993b42666f7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue {
spinlock_t lock;
struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter;
+ atomic_t used;
};
struct ibmvnic_long_term_buff {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index ddb4ca4ff930..117170014e88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,6 +14,7 @@ config MLX5_CORE
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ depends on IPV6=y || IPV6=n || MLX5_CORE=m
imply PTP_1588_CLOCK
default n
---help---
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 0523ed47f597..8fa23f6a1f67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -302,6 +302,9 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ if (mode & DCB_CAP_DCBX_LLD_MANAGED)
+ return 1;
+
if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
return 0;
@@ -315,13 +318,10 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 1;
}
- if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
+ if (!(mode & DCB_CAP_DCBX_HOST))
return 1;
- if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
- !(mode & DCB_CAP_DCBX_VER_CEE) ||
- !(mode & DCB_CAP_DCBX_VER_IEEE) ||
- !(mode & DCB_CAP_DCBX_HOST))
+ if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
return 1;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 31e3cb7ee5fe..5621dcfda4f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -204,9 +204,6 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct iphdr *iph;
/* We are only going to peek, no need to clone the SKB */
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
-
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
@@ -249,7 +246,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
- lbtp->pt.type = htons(ETH_P_ALL);
+ lbtp->pt.type = htons(ETH_P_IP);
lbtp->pt.func = mlx5e_test_loopback_validate;
lbtp->pt.dev = priv->netdev;
lbtp->pt.af_packet_priv = lbtp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 44406a5ec15d..79481f4cf264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -48,9 +48,14 @@
#include "eswitch.h"
#include "vxlan.h"
+enum {
+ MLX5E_TC_FLOW_ESWITCH = BIT(0),
+};
+
struct mlx5e_tc_flow {
struct rhash_head node;
u64 cookie;
+ u8 flags;
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap */
struct mlx5_esw_flow_attr *attr;
@@ -177,7 +182,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
mlx5_fc_destroy(priv->mdev, counter);
}
- if (esw && esw->mode == SRIOV_OFFLOADS) {
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
mlx5_eswitch_del_vlan_action(esw, flow->attr);
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
@@ -598,6 +603,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
static int parse_cls_flower(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
@@ -609,7 +615,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
err = __parse_cls_flower(priv, spec, f, &min_inline);
- if (!err && esw->mode == SRIOV_OFFLOADS &&
+ if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
rep->vport != FDB_UPLINK_VPORT) {
if (min_inline > esw->offloads.inline_mode) {
netdev_warn(priv->netdev,
@@ -1132,23 +1138,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
- int err = 0;
- bool fdb_flow = false;
+ int err, attr_size = 0;
u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ u8 flow_flags = 0;
- if (esw && esw->mode == SRIOV_OFFLOADS)
- fdb_flow = true;
-
- if (fdb_flow)
- flow = kzalloc(sizeof(*flow) +
- sizeof(struct mlx5_esw_flow_attr),
- GFP_KERNEL);
- else
- flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (esw && esw->mode == SRIOV_OFFLOADS) {
+ flow_flags = MLX5E_TC_FLOW_ESWITCH;
+ attr_size = sizeof(struct mlx5_esw_flow_attr);
+ }
+ flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec || !flow) {
err = -ENOMEM;
@@ -1156,12 +1158,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
}
flow->cookie = f->cookie;
+ flow->flags = flow_flags;
- err = parse_cls_flower(priv, spec, f);
+ err = parse_cls_flower(priv, flow, spec, f);
if (err < 0)
goto err_free;
- if (fdb_flow) {
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
err = parse_tc_fdb_actions(priv, f->exts, flow);
if (err < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2478516a61e2..ded27bb9a3b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1136,7 +1136,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
u32 *match_criteria)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct list_head *prev = ft->node.children.prev;
+ struct list_head *prev = &ft->node.children;
unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c4242a4e8130..e2bd600d19de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1352,6 +1352,7 @@ static int init_one(struct pci_dev *pdev,
if (err)
goto clean_load;
+ pci_save_state(pdev);
return 0;
clean_load:
@@ -1407,9 +1408,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- /* In case of kernel call save the pci state and drain the health wq */
+ /* In case of kernel call drain the health wq */
if (state) {
- pci_save_state(pdev);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
}
@@ -1461,6 +1461,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
if (wait_vital(pdev)) {
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0899e2d310e2..d9616daf8a70 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -769,7 +769,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
#define MLXSW_REG_SPVM_ID 0x200F
#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
@@ -1702,7 +1702,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
#define MLXSW_REG_SPVMLR_ID 0x2020
#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
MLXSW_REG_SPVMLR_REC_LEN * \
MLXSW_REG_SPVMLR_REC_MAX_COUNT)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 22ab42925377..ae6cccc666e4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -303,11 +303,11 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
ingress,
MLXSW_SP_ACL_PROFILE_FLOWER);
- if (WARN_ON(IS_ERR(ruleset)))
+ if (IS_ERR(ruleset))
return;
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
- if (!WARN_ON(!rule)) {
+ if (rule) {
mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index d42d03df751a..7e3a6fed3da6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -422,8 +422,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ u32 align = elems_per_page * DQ_RANGE_ALIGN;
- p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+ p_conn->cid_count = roundup(p_conn->cid_count, align);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e2a081ceaf52..e518f914eab1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -2389,9 +2389,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
* size/capacity fields are of a u32 type.
*/
if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
- chain_size > 0x10000) ||
- (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
- chain_size > 0x100000000ULL)) {
+ chain_size > ((u32)U16_MAX + 1)) ||
+ (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
DP_NOTICE(cdev,
"The actual chain size (0x%llx) is larger than the maximal possible value\n",
chain_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 3a44d6b395fa..098766f7fe88 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -190,6 +190,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+ p_init->ooo_enable = p_params->ooo_enable;
+ p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
+ p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -786,6 +789,23 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
}
+void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
+{
+ qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct tcp_upload_params),
+ p_conn->tcp_upload_params_virt_addr,
+ p_conn->tcp_upload_params_phys_addr);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct scsi_terminate_extra_params),
+ p_conn->queue_cnts_virt_addr,
+ p_conn->queue_cnts_phys_addr);
+ kfree(p_conn);
+}
+
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_info *p_iscsi_info;
@@ -807,6 +827,17 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info)
{
+ struct qed_iscsi_conn *p_conn = NULL;
+
+ while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
+ struct qed_iscsi_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ qed_iscsi_free_connection(p_hwfn, p_conn);
+ }
+ }
+
kfree(p_iscsi_info);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 9a0b9af10a57..0d3cef409c96 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -211,6 +211,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
/* If need to reuse or there's no replacement buffer, repost this */
if (rc)
goto out_post;
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
skb = build_skb(buffer->data, 0);
if (!skb) {
@@ -474,7 +476,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe,
- unsigned long lock_flags,
+ unsigned long *p_lock_flags,
bool b_last_cqe)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
@@ -495,10 +497,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
- spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
- spin_lock_irqsave(&p_rx->lock, lock_flags);
+ spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
return 0;
}
@@ -538,7 +540,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
break;
case CORE_RX_CQE_TYPE_REGULAR:
rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
- cqe, flags, b_last_cqe);
+ cqe, &flags,
+ b_last_cqe);
break;
default:
rc = -EIO;
@@ -968,7 +971,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
- struct qed_ll2_conn ll2_info;
+ struct qed_ll2_conn ll2_info = { 0 };
int rc;
ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 7d731c6cb892..378afce58b3f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -159,6 +159,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info->ooo_history.p_cqes)
goto no_history_mem;
+ p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
+
return p_ooo_info;
no_history_mem:
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 6d31f92ef2b6..84ac50f92c9c 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -1162,8 +1162,8 @@ struct ob_mac_tso_iocb_rsp {
struct ib_mac_iocb_rsp {
u8 opcode; /* 0x20 */
u8 flags1;
-#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
-#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
+#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */
+#define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */
#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 65077c77082a..91e9bd7159ab 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1535,32 +1535,33 @@ static int smc_close(struct net_device *dev)
* Ethtool support
*/
static int
-smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct smc_local *lp = netdev_priv(dev);
int ret;
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
-
if (lp->phy_type != 0) {
spin_lock_irq(&lp->lock);
- ret = mii_ethtool_gset(&lp->mii, cmd);
+ ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
spin_unlock_irq(&lp->lock);
} else {
- cmd->supported = SUPPORTED_10baseT_Half |
+ u32 supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP | SUPPORTED_AUI;
if (lp->ctl_rspeed == 10)
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
else if (lp->ctl_rspeed == 100)
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
+
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = 0;
+ cmd->base.duplex = lp->tcr_cur_mode & TCR_SWFDUP ?
+ DUPLEX_FULL : DUPLEX_HALF;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->port = 0;
- cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
ret = 0;
}
@@ -1569,24 +1570,26 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
}
static int
-smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct smc_local *lp = netdev_priv(dev);
int ret;
if (lp->phy_type != 0) {
spin_lock_irq(&lp->lock);
- ret = mii_ethtool_sset(&lp->mii, cmd);
+ ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
spin_unlock_irq(&lp->lock);
} else {
- if (cmd->autoneg != AUTONEG_DISABLE ||
- cmd->speed != SPEED_10 ||
- (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
- (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ if (cmd->base.autoneg != AUTONEG_DISABLE ||
+ cmd->base.speed != SPEED_10 ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL) ||
+ (cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
return -EINVAL;
-// lp->port = cmd->port;
- lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+// lp->port = cmd->base.port;
+ lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
// if (netif_running(dev))
// smc_set_port(dev);
@@ -1744,8 +1747,6 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
static const struct ethtool_ops smc_ethtool_ops = {
- .get_settings = smc_ethtool_getsettings,
- .set_settings = smc_ethtool_setsettings,
.get_drvinfo = smc_ethtool_getdrvinfo,
.get_msglevel = smc_ethtool_getmsglevel,
@@ -1755,6 +1756,8 @@ static const struct ethtool_ops smc_ethtool_ops = {
.get_eeprom_len = smc_ethtool_geteeprom_len,
.get_eeprom = smc_ethtool_geteeprom,
.set_eeprom = smc_ethtool_seteeprom,
+ .get_link_ksettings = smc_ethtool_get_link_ksettings,
+ .set_link_ksettings = smc_ethtool_set_link_ksettings,
};
static const struct net_device_ops smc_netdev_ops = {
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d3e73ac158ae..f9f3dba7a588 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -700,6 +700,8 @@ struct net_device_context {
u32 tx_checksum_mask;
+ u32 tx_send_table[VRSS_SEND_TAB_SIZE];
+
/* Ethtool settings */
u8 duplex;
u32 speed;
@@ -757,7 +759,6 @@ struct netvsc_device {
struct nvsp_message revoke_packet;
- u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
u32 num_chn;
spinlock_t sc_lock; /* Protects num_sc_offered variable */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d35ebd993b38..4c1d8cca247b 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1136,15 +1136,11 @@ static void netvsc_receive(struct net_device *ndev,
static void netvsc_send_table(struct hv_device *hdev,
struct nvsp_message *nvmsg)
{
- struct netvsc_device *nvscdev;
struct net_device *ndev = hv_get_drvdata(hdev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
int i;
u32 count, *tab;
- nvscdev = get_outbound_net_device(hdev);
- if (!nvscdev)
- return;
-
count = nvmsg->msg.v5_msg.send_table.count;
if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1155,7 +1151,7 @@ static void netvsc_send_table(struct hv_device *hdev,
nvmsg->msg.v5_msg.send_table.offset);
for (i = 0; i < count; i++)
- nvscdev->send_table[i] = tab[i];
+ net_device_ctx->tx_send_table[i] = tab[i];
}
static void netvsc_send_vf(struct net_device_context *net_device_ctx,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index bc05c895d958..5ede87f30463 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -206,17 +206,15 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
+ unsigned int num_tx_queues = ndev->real_num_tx_queues;
struct sock *sk = skb->sk;
int q_idx = sk_tx_queue_get(sk);
- if (q_idx < 0 || skb->ooo_okay ||
- q_idx >= ndev->real_num_tx_queues) {
+ if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
int new_idx;
- new_idx = nvsc_dev->send_table[hash]
- % nvsc_dev->num_chn;
+ new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;
if (q_idx != new_idx && sk &&
sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
@@ -225,9 +223,6 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
q_idx = new_idx;
}
- if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
- q_idx = 0;
-
return q_idx;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f9d0fa315a47..272b051a0199 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1883,17 +1883,6 @@ static int m88e1510_probe(struct phy_device *phydev)
return m88e1510_hwmon_probe(phydev);
}
-static void marvell_remove(struct phy_device *phydev)
-{
-#ifdef CONFIG_HWMON
-
- struct marvell_priv *priv = phydev->priv;
-
- if (priv && priv->hwmon_dev)
- hwmon_device_unregister(priv->hwmon_dev);
-#endif
-}
-
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
@@ -1974,7 +1963,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = &m88e1121_probe,
- .remove = &marvell_remove,
.config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
@@ -2087,7 +2075,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
.flags = PHY_HAS_INTERRUPT,
.probe = &m88e1510_probe,
- .remove = &marvell_remove,
.config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2109,7 +2096,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = m88e1510_probe,
- .remove = &marvell_remove,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2127,7 +2113,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1545",
.probe = m88e1510_probe,
- .remove = &marvell_remove,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &marvell_config_init,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index daec6555f3b1..5198ccfa347f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1864,7 +1864,7 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
- .soft_reset = genphy_soft_reset,
+ .soft_reset = genphy_no_soft_reset,
.config_init = genphy_config_init,
.features = PHY_GBIT_FEATURES | SUPPORTED_MII |
SUPPORTED_AUI | SUPPORTED_FIBRE |
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 93ffedfa2994..1e2d4f1179da 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
if (err)
return err;
- ks->regs_attr.size = ks->chip->regs_size;
memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
+ ks->regs_attr.size = ks->chip->regs_size;
err = ks8995_reset(ks);
if (err)
return err;
+ sysfs_attr_init(&ks->regs_attr.attr);
err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
if (err) {
dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 4a24b5d15f5a..1b52520715ae 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2072,6 +2072,7 @@ static int team_dev_type_check_change(struct net_device *dev,
static void team_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->max_mtu = ETH_MAX_MTU;
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index dc1b1dd9157c..34cc3c590aa5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -822,7 +822,18 @@ static void tun_net_uninit(struct net_device *dev)
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
+ struct tun_struct *tun = netdev_priv(dev);
+ int i;
+
netif_tx_start_all_queues(dev);
+
+ for (i = 0; i < tun->numqueues; i++) {
+ struct tun_file *tfile;
+
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ tfile->socket.sk->sk_write_space(tfile->socket.sk);
+ }
+
return 0;
}
@@ -1103,9 +1114,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
if (!skb_array_empty(&tfile->tx_array))
mask |= POLLIN | POLLRDNORM;
- if (sock_writeable(sk) ||
- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
- sock_writeable(sk)))
+ if (tun->dev->flags & IFF_UP &&
+ (sock_writeable(sk) ||
+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+ sock_writeable(sk))))
mask |= POLLOUT | POLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
@@ -2570,7 +2582,6 @@ static int __init tun_init(void)
int ret = 0;
pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
- pr_info("%s\n", DRV_COPYRIGHT);
ret = rtnl_link_register(&tun_link_ops);
if (ret) {
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 22379da63400..fea687f35b5a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -340,6 +340,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ int len = skb->len;
netdev_tx_t ret = is_ip_tx_frame(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
@@ -347,7 +348,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&dstats->syncp);
dstats->tx_pkts++;
- dstats->tx_bytes += skb->len;
+ dstats->tx_bytes += len;
u64_stats_update_end(&dstats->syncp);
} else {
this_cpu_inc(dev->dstats->tx_drps);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e375560cc74e..bdb6ae16d4a8 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2976,6 +2976,44 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
return 0;
}
+static int __vxlan_dev_create(struct net *net, struct net_device *dev,
+ struct vxlan_config *conf)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ int err;
+
+ err = vxlan_dev_configure(net, dev, conf, false);
+ if (err)
+ return err;
+
+ dev->ethtool_ops = &vxlan_ethtool_ops;
+
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+ err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ &vxlan->default_dst.remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_EXCL | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_ifindex,
+ NTF_SELF);
+ if (err)
+ return err;
+ }
+
+ err = register_netdevice(dev);
+ if (err) {
+ vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
+ return err;
+ }
+
+ list_add(&vxlan->next, &vn->vxlan_list);
+ return 0;
+}
+
static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
struct net_device *dev, struct vxlan_config *conf,
bool changelink)
@@ -3172,8 +3210,6 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
- struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_config conf;
int err;
@@ -3181,36 +3217,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (err)
return err;
- err = vxlan_dev_configure(src_net, dev, &conf, false);
- if (err)
- return err;
-
- dev->ethtool_ops = &vxlan_ethtool_ops;
-
- /* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &vxlan->default_dst.remote_ip,
- NUD_REACHABLE | NUD_PERMANENT,
- NLM_F_EXCL | NLM_F_CREATE,
- vxlan->cfg.dst_port,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_ifindex,
- NTF_SELF);
- if (err)
- return err;
- }
-
- err = register_netdevice(dev);
- if (err) {
- vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
- return err;
- }
-
- list_add(&vxlan->next, &vn->vxlan_list);
-
- return 0;
+ return __vxlan_dev_create(src_net, dev, &conf);
}
static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3440,7 +3447,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
if (IS_ERR(dev))
return dev;
- err = vxlan_dev_configure(net, dev, conf, false);
+ err = __vxlan_dev_create(net, dev, conf);
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index a5045b5279d7..6742ae605660 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
/* set bd status and length */
bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
- iowrite16be(bd_status, &bd->status);
iowrite16be(skb->len, &bd->length);
+ iowrite16be(bd_status, &bd->status);
/* Move to next BD in the ring */
if (!(bd_status & T_W_S))
@@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
struct sk_buff *skb;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct qe_bd *bd;
- u32 bd_status;
+ u16 bd_status;
u16 length, howmany = 0;
u8 *bdbuffer;
int i;
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index e7f5910a6519..f8eb66ef2944 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
struct i2400mu *i2400mu;
struct usb_device *usb_dev = interface_to_usbdev(iface);
+ if (iface->cur_altsetting->desc.bNumEndpoints < 4)
+ return -ENODEV;
+
if (usb_dev->speed != USB_SPEED_HIGH)
dev_err(dev, "device not connected as high speed\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 829b26cd4549..8397f6c92451 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,13 +165,17 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
- unsigned int num_queues = vif->num_queues;
+ unsigned int num_queues;
u16 index;
struct xenvif_rx_cb *cb;
BUG_ON(skb->dev != dev);
- /* Drop the packet if queues are not set up */
+ /* Drop the packet if queues are not set up.
+ * This handler should be called inside an RCU read section
+ * so we don't need to enter it here explicitly.
+ */
+ num_queues = READ_ONCE(vif->num_queues);
if (num_queues < 1)
goto drop;
@@ -222,18 +226,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
+ unsigned int num_queues;
u64 rx_bytes = 0;
u64 rx_packets = 0;
u64 tx_bytes = 0;
u64 tx_packets = 0;
unsigned int index;
- spin_lock(&vif->lock);
- if (vif->queues == NULL)
- goto out;
+ rcu_read_lock();
+ num_queues = READ_ONCE(vif->num_queues);
/* Aggregate tx and rx stats from each queue */
- for (index = 0; index < vif->num_queues; ++index) {
+ for (index = 0; index < num_queues; ++index) {
queue = &vif->queues[index];
rx_bytes += queue->stats.rx_bytes;
rx_packets += queue->stats.rx_packets;
@@ -241,8 +245,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
tx_packets += queue->stats.tx_packets;
}
-out:
- spin_unlock(&vif->lock);
+ rcu_read_unlock();
vif->dev->stats.rx_bytes = rx_bytes;
vif->dev->stats.rx_packets = rx_packets;
@@ -378,10 +381,13 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 * data)
{
struct xenvif *vif = netdev_priv(dev);
- unsigned int num_queues = vif->num_queues;
+ unsigned int num_queues;
int i;
unsigned int queue_index;
+ rcu_read_lock();
+ num_queues = READ_ONCE(vif->num_queues);
+
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
unsigned long accum = 0;
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -390,6 +396,8 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
}
data[i] = accum;
}
+
+ rcu_read_unlock();
}
static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f9bcf4a665bc..602d408fa25e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -214,7 +214,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
netdev_err(vif->dev, "fatal error; disabling device\n");
vif->disabled = true;
/* Disable the vif from queue 0's kthread */
- if (vif->queues)
+ if (vif->num_queues)
xenvif_kick_thread(&vif->queues[0]);
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index d2d7cd9145b1..a56d3eab35dd 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -495,26 +495,26 @@ static void backend_disconnect(struct backend_info *be)
struct xenvif *vif = be->vif;
if (vif) {
+ unsigned int num_queues = vif->num_queues;
unsigned int queue_index;
- struct xenvif_queue *queues;
xen_unregister_watchers(vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect_data(vif);
- for (queue_index = 0;
- queue_index < vif->num_queues;
- ++queue_index)
- xenvif_deinit_queue(&vif->queues[queue_index]);
- spin_lock(&vif->lock);
- queues = vif->queues;
+ /* At this point some of the handlers may still be active
+ * so we need to have additional synchronization here.
+ */
vif->num_queues = 0;
- vif->queues = NULL;
- spin_unlock(&vif->lock);
+ synchronize_net();
- vfree(queues);
+ for (queue_index = 0; queue_index < num_queues; ++queue_index)
+ xenvif_deinit_queue(&vif->queues[queue_index]);
+
+ vfree(vif->queues);
+ vif->queues = NULL;
xenvif_disconnect_ctrl(vif);
}
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 993b650ef275..44f774c12fb2 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -132,10 +132,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
struct device *dev = pci->dev;
struct resource *res;
- /* If using the PHY framework, doesn't need to get other resource */
- if (ep->using_phy)
- return 0;
-
ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
if (!ep->mem_res)
return -ENOMEM;
@@ -145,6 +141,10 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
if (IS_ERR(ep->mem_res->elbi_base))
return PTR_ERR(ep->mem_res->elbi_base);
+ /* If using the PHY framework, doesn't need to get other resource */
+ if (ep->using_phy)
+ return 0;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
ep->mem_res->phy_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ep->mem_res->phy_base))
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 973472c23d89..1dfa10cc566b 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -478,7 +478,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
- struct pci_dev *child, *parent = link->pdev;
+ struct pci_dev *child = link->downstream, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
struct aspm_register_info upreg, dwreg;
@@ -491,9 +491,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Get upstream/downstream components' register state */
pcie_get_aspm_reg(parent, &upreg);
- child = pci_function_0(linkbus);
pcie_get_aspm_reg(child, &dwreg);
- link->downstream = child;
/*
* If ASPM not supported, don't mess with the clocks and link,
@@ -800,6 +798,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
INIT_LIST_HEAD(&link->children);
INIT_LIST_HEAD(&link->link);
link->pdev = pdev;
+ link->downstream = pci_function_0(pdev->subordinate);
/*
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f754453fe754..673683660b5c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2174,6 +2174,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
/*
* For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index f8e9e1c2b2f6..c978be5eb9eb 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -422,6 +422,20 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
return 0;
}
+static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct msm_pingroup *g;
+ u32 val;
+
+ g = &pctrl->soc->groups[offset];
+
+ val = readl(pctrl->regs + g->ctl_reg);
+
+ /* 0 = output, 1 = input */
+ return val & BIT(g->oe_bit) ? 0 : 1;
+}
+
static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
{
const struct msm_pingroup *g;
@@ -510,6 +524,7 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
static struct gpio_chip msm_gpio_template = {
.direction_input = msm_gpio_direction_input,
.direction_output = msm_gpio_direction_output,
+ .get_direction = msm_gpio_get_direction,
.get = msm_gpio_get,
.set = msm_gpio_set,
.request = gpiochip_generic_request,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 77a0236ee781..83f8864fa76a 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -390,22 +390,22 @@ static const struct pinctrl_pin_desc uniphier_ld11_pins[] = {
UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140,
140, UNIPHIER_PIN_DRV_1BIT,
140, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(141, "TCON0", 141,
+ UNIPHIER_PINCTRL_PIN(141, "AO1D1", 141,
141, UNIPHIER_PIN_DRV_1BIT,
141, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(142, "TCON1", 142,
+ UNIPHIER_PINCTRL_PIN(142, "AO1D2", 142,
142, UNIPHIER_PIN_DRV_1BIT,
142, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(143, "TCON2", 143,
+ UNIPHIER_PINCTRL_PIN(143, "XIRQ9", 143,
143, UNIPHIER_PIN_DRV_1BIT,
143, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(144, "TCON3", 144,
+ UNIPHIER_PINCTRL_PIN(144, "XIRQ10", 144,
144, UNIPHIER_PIN_DRV_1BIT,
144, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(145, "TCON4", 145,
+ UNIPHIER_PINCTRL_PIN(145, "XIRQ11", 145,
145, UNIPHIER_PIN_DRV_1BIT,
145, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(146, "TCON5", 146,
+ UNIPHIER_PINCTRL_PIN(146, "XIRQ13", 146,
146, UNIPHIER_PIN_DRV_1BIT,
146, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(147, "PWMA", 147,
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 5be4783e40d4..dea98ffb6f60 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -103,15 +103,6 @@ static struct quirk_entry quirk_asus_x200ca = {
.wapf = 2,
};
-static struct quirk_entry quirk_no_rfkill = {
- .no_rfkill = true,
-};
-
-static struct quirk_entry quirk_no_rfkill_wapf4 = {
- .wapf = 4,
- .no_rfkill = true,
-};
-
static struct quirk_entry quirk_asus_ux303ub = {
.wmi_backlight_native = true,
};
@@ -194,7 +185,7 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
},
- .driver_data = &quirk_no_rfkill_wapf4,
+ .driver_data = &quirk_asus_wapf4,
},
{
.callback = dmi_matched,
@@ -203,7 +194,7 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
},
- .driver_data = &quirk_no_rfkill_wapf4,
+ .driver_data = &quirk_asus_wapf4,
},
{
.callback = dmi_matched,
@@ -369,42 +360,6 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. X555UB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X555UB"),
- },
- .driver_data = &quirk_no_rfkill,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. N552VW",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N552VW"),
- },
- .driver_data = &quirk_no_rfkill,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. U303LB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "U303LB"),
- },
- .driver_data = &quirk_no_rfkill,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. Z550MA",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Z550MA"),
- },
- .driver_data = &quirk_no_rfkill,
- },
- {
- .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. UX303UB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 43cb680adbb4..8fe5890bf539 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -159,6 +159,8 @@ MODULE_LICENSE("GPL");
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
+
struct bios_args {
u32 arg0;
u32 arg1;
@@ -2051,6 +2053,16 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
return 0;
}
+static bool ashs_present(void)
+{
+ int i = 0;
+ while (ashs_ids[i]) {
+ if (acpi_dev_found(ashs_ids[i++]))
+ return true;
+ }
+ return false;
+}
+
/*
* WMI Driver
*/
@@ -2095,7 +2107,11 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_leds;
- if (!asus->driver->quirks->no_rfkill) {
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
+ if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+ asus->driver->wlan_ctrl_by_user = 1;
+
+ if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
err = asus_wmi_rfkill_init(asus);
if (err)
goto fail_rfkill;
@@ -2134,10 +2150,6 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_debugfs;
- asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
- if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
- asus->driver->wlan_ctrl_by_user = 1;
-
return 0;
fail_debugfs:
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index fdff626c3b51..c9589d9342bb 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -39,7 +39,6 @@ struct key_entry;
struct asus_wmi;
struct quirk_entry {
- bool no_rfkill;
bool hotplug_wireless;
bool scalar_panel_brightness;
bool store_backlight_power;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 2b218b1d13e5..e12cc3504d48 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -78,18 +78,18 @@
#define FUJITSU_LCD_N_LEVELS 8
-#define ACPI_FUJITSU_CLASS "fujitsu"
-#define ACPI_FUJITSU_HID "FUJ02B1"
-#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
-#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1"
-#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3"
-#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
-#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3"
+#define ACPI_FUJITSU_CLASS "fujitsu"
+#define ACPI_FUJITSU_BL_HID "FUJ02B1"
+#define ACPI_FUJITSU_BL_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
+#define ACPI_FUJITSU_BL_DEVICE_NAME "Fujitsu FUJ02B1"
+#define ACPI_FUJITSU_LAPTOP_HID "FUJ02E3"
+#define ACPI_FUJITSU_LAPTOP_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
+#define ACPI_FUJITSU_LAPTOP_DEVICE_NAME "Fujitsu FUJ02E3"
#define ACPI_FUJITSU_NOTIFY_CODE1 0x80
/* FUNC interface - command values */
-#define FUNC_RFKILL 0x1000
+#define FUNC_FLAGS 0x1000
#define FUNC_LEDS 0x1001
#define FUNC_BUTTONS 0x1002
#define FUNC_BACKLIGHT 0x1004
@@ -97,6 +97,11 @@
/* FUNC interface - responses */
#define UNSUPPORTED_CMD 0x80000000
+/* FUNC interface - status flags */
+#define FLAG_RFKILL 0x020
+#define FLAG_LID 0x100
+#define FLAG_DOCK 0x200
+
#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* FUNC interface - LED control */
#define FUNC_LED_OFF 0x1
@@ -136,7 +141,7 @@
#endif
/* Device controlling the backlight and associated keys */
-struct fujitsu_t {
+struct fujitsu_bl {
acpi_handle acpi_handle;
struct acpi_device *dev;
struct input_dev *input;
@@ -150,12 +155,12 @@ struct fujitsu_t {
unsigned int brightness_level;
};
-static struct fujitsu_t *fujitsu;
+static struct fujitsu_bl *fujitsu_bl;
static int use_alt_lcd_levels = -1;
static int disable_brightness_adjust = -1;
-/* Device used to access other hotkeys on the laptop */
-struct fujitsu_hotkey_t {
+/* Device used to access hotkeys and other features on the laptop */
+struct fujitsu_laptop {
acpi_handle acpi_handle;
struct acpi_device *dev;
struct input_dev *input;
@@ -163,17 +168,15 @@ struct fujitsu_hotkey_t {
struct platform_device *pf_device;
struct kfifo fifo;
spinlock_t fifo_lock;
- int rfkill_supported;
- int rfkill_state;
+ int flags_supported;
+ int flags_state;
int logolamp_registered;
int kblamps_registered;
int radio_led_registered;
int eco_led_registered;
};
-static struct fujitsu_hotkey_t *fujitsu_hotkey;
-
-static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
+static struct fujitsu_laptop *fujitsu_laptop;
#if IS_ENABLED(CONFIG_LEDS_CLASS)
static enum led_brightness logolamp_get(struct led_classdev *cdev);
@@ -222,8 +225,6 @@ static struct led_classdev eco_led = {
static u32 dbg_level = 0x03;
#endif
-static void acpi_fujitsu_notify(struct acpi_device *device, u32 event);
-
/* Fujitsu ACPI interface function */
static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
@@ -239,7 +240,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
unsigned long long value;
acpi_handle handle = NULL;
- status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
+ status = acpi_get_handle(fujitsu_laptop->acpi_handle, "FUNC", &handle);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_ERROR,
"FUNC interface is not present\n");
@@ -300,9 +301,9 @@ static int radio_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
if (brightness >= LED_FULL)
- return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
+ return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, RADIO_LED_ON);
else
- return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
+ return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0);
}
static int eco_led_set(struct led_classdev *cdev,
@@ -346,7 +347,7 @@ static enum led_brightness radio_led_get(struct led_classdev *cdev)
{
enum led_brightness brightness = LED_OFF;
- if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON)
+ if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
brightness = LED_FULL;
return brightness;
@@ -373,10 +374,10 @@ static int set_lcd_level(int level)
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
level);
- if (level < 0 || level >= fujitsu->max_brightness)
+ if (level < 0 || level >= fujitsu_bl->max_brightness)
return -EINVAL;
- status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle);
+ status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBLL", &handle);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
return -ENODEV;
@@ -398,10 +399,10 @@ static int set_lcd_level_alt(int level)
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
level);
- if (level < 0 || level >= fujitsu->max_brightness)
+ if (level < 0 || level >= fujitsu_bl->max_brightness)
return -EINVAL;
- status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle);
+ status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBL2", &handle);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
return -ENODEV;
@@ -421,19 +422,19 @@ static int get_lcd_level(void)
vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
- status =
- acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state);
+ status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "GBLL", NULL,
+ &state);
if (ACPI_FAILURE(status))
return 0;
- fujitsu->brightness_level = state & 0x0fffffff;
+ fujitsu_bl->brightness_level = state & 0x0fffffff;
if (state & 0x80000000)
- fujitsu->brightness_changed = 1;
+ fujitsu_bl->brightness_changed = 1;
else
- fujitsu->brightness_changed = 0;
+ fujitsu_bl->brightness_changed = 0;
- return fujitsu->brightness_level;
+ return fujitsu_bl->brightness_level;
}
static int get_max_brightness(void)
@@ -443,14 +444,14 @@ static int get_max_brightness(void)
vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
- status =
- acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state);
+ status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "RBLL", NULL,
+ &state);
if (ACPI_FAILURE(status))
return -1;
- fujitsu->max_brightness = state;
+ fujitsu_bl->max_brightness = state;
- return fujitsu->max_brightness;
+ return fujitsu_bl->max_brightness;
}
/* Backlight device stuff */
@@ -483,7 +484,7 @@ static int bl_update_status(struct backlight_device *b)
return ret;
}
-static const struct backlight_ops fujitsubl_ops = {
+static const struct backlight_ops fujitsu_bl_ops = {
.get_brightness = bl_get_brightness,
.update_status = bl_update_status,
};
@@ -511,7 +512,7 @@ show_brightness_changed(struct device *dev,
int ret;
- ret = fujitsu->brightness_changed;
+ ret = fujitsu_bl->brightness_changed;
if (ret < 0)
return ret;
@@ -539,7 +540,7 @@ static ssize_t store_lcd_level(struct device *dev,
int level, ret;
if (sscanf(buf, "%i", &level) != 1
- || (level < 0 || level >= fujitsu->max_brightness))
+ || (level < 0 || level >= fujitsu_bl->max_brightness))
return -EINVAL;
if (use_alt_lcd_levels)
@@ -567,9 +568,9 @@ static ssize_t
show_lid_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (!(fujitsu_hotkey->rfkill_supported & 0x100))
+ if (!(fujitsu_laptop->flags_supported & FLAG_LID))
return sprintf(buf, "unknown\n");
- if (fujitsu_hotkey->rfkill_state & 0x100)
+ if (fujitsu_laptop->flags_state & FLAG_LID)
return sprintf(buf, "open\n");
else
return sprintf(buf, "closed\n");
@@ -579,9 +580,9 @@ static ssize_t
show_dock_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (!(fujitsu_hotkey->rfkill_supported & 0x200))
+ if (!(fujitsu_laptop->flags_supported & FLAG_DOCK))
return sprintf(buf, "unknown\n");
- if (fujitsu_hotkey->rfkill_state & 0x200)
+ if (fujitsu_laptop->flags_state & FLAG_DOCK)
return sprintf(buf, "docked\n");
else
return sprintf(buf, "undocked\n");
@@ -591,9 +592,9 @@ static ssize_t
show_radios_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (!(fujitsu_hotkey->rfkill_supported & 0x20))
+ if (!(fujitsu_laptop->flags_supported & FLAG_RFKILL))
return sprintf(buf, "unknown\n");
- if (fujitsu_hotkey->rfkill_state & 0x20)
+ if (fujitsu_laptop->flags_state & FLAG_RFKILL)
return sprintf(buf, "on\n");
else
return sprintf(buf, "killed\n");
@@ -607,7 +608,7 @@ static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store);
static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store);
static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store);
-static struct attribute *fujitsupf_attributes[] = {
+static struct attribute *fujitsu_pf_attributes[] = {
&dev_attr_brightness_changed.attr,
&dev_attr_max_brightness.attr,
&dev_attr_lcd_level.attr,
@@ -617,11 +618,11 @@ static struct attribute *fujitsupf_attributes[] = {
NULL
};
-static struct attribute_group fujitsupf_attribute_group = {
- .attrs = fujitsupf_attributes
+static struct attribute_group fujitsu_pf_attribute_group = {
+ .attrs = fujitsu_pf_attributes
};
-static struct platform_driver fujitsupf_driver = {
+static struct platform_driver fujitsu_pf_driver = {
.driver = {
.name = "fujitsu-laptop",
}
@@ -630,39 +631,30 @@ static struct platform_driver fujitsupf_driver = {
static void __init dmi_check_cb_common(const struct dmi_system_id *id)
{
pr_info("Identified laptop model '%s'\n", id->ident);
- if (use_alt_lcd_levels == -1) {
- if (acpi_has_method(NULL,
- "\\_SB.PCI0.LPCB.FJEX.SBL2"))
- use_alt_lcd_levels = 1;
- else
- use_alt_lcd_levels = 0;
- vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as "
- "%i\n", use_alt_lcd_levels);
- }
}
static int __init dmi_check_cb_s6410(const struct dmi_system_id *id)
{
dmi_check_cb_common(id);
- fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */
- fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */
+ fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
+ fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
return 1;
}
static int __init dmi_check_cb_s6420(const struct dmi_system_id *id)
{
dmi_check_cb_common(id);
- fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */
- fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */
+ fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
+ fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
return 1;
}
static int __init dmi_check_cb_p8010(const struct dmi_system_id *id)
{
dmi_check_cb_common(id);
- fujitsu->keycode1 = KEY_HELP; /* "Support" */
- fujitsu->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
- fujitsu->keycode4 = KEY_WWW; /* "Internet" */
+ fujitsu_bl->keycode1 = KEY_HELP; /* "Support" */
+ fujitsu_bl->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
+ fujitsu_bl->keycode4 = KEY_WWW; /* "Internet" */
return 1;
}
@@ -693,7 +685,7 @@ static const struct dmi_system_id fujitsu_dmi_table[] __initconst = {
/* ACPI device for LCD brightness control */
-static int acpi_fujitsu_add(struct acpi_device *device)
+static int acpi_fujitsu_bl_add(struct acpi_device *device)
{
int state = 0;
struct input_dev *input;
@@ -702,22 +694,22 @@ static int acpi_fujitsu_add(struct acpi_device *device)
if (!device)
return -EINVAL;
- fujitsu->acpi_handle = device->handle;
- sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME);
+ fujitsu_bl->acpi_handle = device->handle;
+ sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_BL_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
- device->driver_data = fujitsu;
+ device->driver_data = fujitsu_bl;
- fujitsu->input = input = input_allocate_device();
+ fujitsu_bl->input = input = input_allocate_device();
if (!input) {
error = -ENOMEM;
goto err_stop;
}
- snprintf(fujitsu->phys, sizeof(fujitsu->phys),
+ snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys),
"%s/video/input0", acpi_device_hid(device));
input->name = acpi_device_name(device);
- input->phys = fujitsu->phys;
+ input->phys = fujitsu_bl->phys;
input->id.bustype = BUS_HOST;
input->id.product = 0x06;
input->dev.parent = &device->dev;
@@ -730,7 +722,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
if (error)
goto err_free_input_dev;
- error = acpi_bus_update_power(fujitsu->acpi_handle, &state);
+ error = acpi_bus_update_power(fujitsu_bl->acpi_handle, &state);
if (error) {
pr_err("Error reading power state\n");
goto err_unregister_input_dev;
@@ -740,7 +732,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
- fujitsu->dev = device;
+ fujitsu_bl->dev = device;
if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
@@ -750,6 +742,15 @@ static int acpi_fujitsu_add(struct acpi_device *device)
pr_err("_INI Method failed\n");
}
+ if (use_alt_lcd_levels == -1) {
+ if (acpi_has_method(NULL, "\\_SB.PCI0.LPCB.FJEX.SBL2"))
+ use_alt_lcd_levels = 1;
+ else
+ use_alt_lcd_levels = 0;
+ vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as %i\n",
+ use_alt_lcd_levels);
+ }
+
/* do config (detect defaults) */
use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
@@ -758,7 +759,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
use_alt_lcd_levels, disable_brightness_adjust);
if (get_max_brightness() <= 0)
- fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
+ fujitsu_bl->max_brightness = FUJITSU_LCD_N_LEVELS;
get_lcd_level();
return 0;
@@ -772,38 +773,38 @@ err_stop:
return error;
}
-static int acpi_fujitsu_remove(struct acpi_device *device)
+static int acpi_fujitsu_bl_remove(struct acpi_device *device)
{
- struct fujitsu_t *fujitsu = acpi_driver_data(device);
- struct input_dev *input = fujitsu->input;
+ struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device);
+ struct input_dev *input = fujitsu_bl->input;
input_unregister_device(input);
- fujitsu->acpi_handle = NULL;
+ fujitsu_bl->acpi_handle = NULL;
return 0;
}
/* Brightness notify */
-static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
+static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)
{
struct input_dev *input;
int keycode;
int oldb, newb;
- input = fujitsu->input;
+ input = fujitsu_bl->input;
switch (event) {
case ACPI_FUJITSU_NOTIFY_CODE1:
keycode = 0;
- oldb = fujitsu->brightness_level;
+ oldb = fujitsu_bl->brightness_level;
get_lcd_level();
- newb = fujitsu->brightness_level;
+ newb = fujitsu_bl->brightness_level;
vdbg_printk(FUJLAPTOP_DBG_TRACE,
"brightness button event [%i -> %i (%i)]\n",
- oldb, newb, fujitsu->brightness_changed);
+ oldb, newb, fujitsu_bl->brightness_changed);
if (oldb < newb) {
if (disable_brightness_adjust != 1) {
@@ -840,7 +841,7 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
/* ACPI device for hotkey handling */
-static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
+static int acpi_fujitsu_laptop_add(struct acpi_device *device)
{
int result = 0;
int state = 0;
@@ -851,42 +852,42 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (!device)
return -EINVAL;
- fujitsu_hotkey->acpi_handle = device->handle;
+ fujitsu_laptop->acpi_handle = device->handle;
sprintf(acpi_device_name(device), "%s",
- ACPI_FUJITSU_HOTKEY_DEVICE_NAME);
+ ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
- device->driver_data = fujitsu_hotkey;
+ device->driver_data = fujitsu_laptop;
/* kfifo */
- spin_lock_init(&fujitsu_hotkey->fifo_lock);
- error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
+ spin_lock_init(&fujitsu_laptop->fifo_lock);
+ error = kfifo_alloc(&fujitsu_laptop->fifo, RINGBUFFERSIZE * sizeof(int),
GFP_KERNEL);
if (error) {
pr_err("kfifo_alloc failed\n");
goto err_stop;
}
- fujitsu_hotkey->input = input = input_allocate_device();
+ fujitsu_laptop->input = input = input_allocate_device();
if (!input) {
error = -ENOMEM;
goto err_free_fifo;
}
- snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys),
+ snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys),
"%s/video/input0", acpi_device_hid(device));
input->name = acpi_device_name(device);
- input->phys = fujitsu_hotkey->phys;
+ input->phys = fujitsu_laptop->phys;
input->id.bustype = BUS_HOST;
input->id.product = 0x06;
input->dev.parent = &device->dev;
set_bit(EV_KEY, input->evbit);
- set_bit(fujitsu->keycode1, input->keybit);
- set_bit(fujitsu->keycode2, input->keybit);
- set_bit(fujitsu->keycode3, input->keybit);
- set_bit(fujitsu->keycode4, input->keybit);
- set_bit(fujitsu->keycode5, input->keybit);
+ set_bit(fujitsu_bl->keycode1, input->keybit);
+ set_bit(fujitsu_bl->keycode2, input->keybit);
+ set_bit(fujitsu_bl->keycode3, input->keybit);
+ set_bit(fujitsu_bl->keycode4, input->keybit);
+ set_bit(fujitsu_bl->keycode5, input->keybit);
set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit);
set_bit(KEY_UNKNOWN, input->keybit);
@@ -894,7 +895,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (error)
goto err_free_input_dev;
- error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
+ error = acpi_bus_update_power(fujitsu_laptop->acpi_handle, &state);
if (error) {
pr_err("Error reading power state\n");
goto err_unregister_input_dev;
@@ -904,7 +905,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
- fujitsu_hotkey->dev = device;
+ fujitsu_laptop->dev = device;
if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
@@ -920,27 +921,27 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
; /* No action, result is discarded */
vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
- fujitsu_hotkey->rfkill_supported =
- call_fext_func(FUNC_RFKILL, 0x0, 0x0, 0x0);
+ fujitsu_laptop->flags_supported =
+ call_fext_func(FUNC_FLAGS, 0x0, 0x0, 0x0);
/* Make sure our bitmask of supported functions is cleared if the
RFKILL function block is not implemented, like on the S7020. */
- if (fujitsu_hotkey->rfkill_supported == UNSUPPORTED_CMD)
- fujitsu_hotkey->rfkill_supported = 0;
+ if (fujitsu_laptop->flags_supported == UNSUPPORTED_CMD)
+ fujitsu_laptop->flags_supported = 0;
- if (fujitsu_hotkey->rfkill_supported)
- fujitsu_hotkey->rfkill_state =
- call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
+ if (fujitsu_laptop->flags_supported)
+ fujitsu_laptop->flags_state =
+ call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
/* Suspect this is a keymap of the application panel, print it */
pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
#if IS_ENABLED(CONFIG_LEDS_CLASS)
if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
- result = led_classdev_register(&fujitsu->pf_device->dev,
+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,
&logolamp_led);
if (result == 0) {
- fujitsu_hotkey->logolamp_registered = 1;
+ fujitsu_laptop->logolamp_registered = 1;
} else {
pr_err("Could not register LED handler for logo lamp, error %i\n",
result);
@@ -949,10 +950,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
(call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
- result = led_classdev_register(&fujitsu->pf_device->dev,
+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,
&kblamps_led);
if (result == 0) {
- fujitsu_hotkey->kblamps_registered = 1;
+ fujitsu_laptop->kblamps_registered = 1;
} else {
pr_err("Could not register LED handler for keyboard lamps, error %i\n",
result);
@@ -966,10 +967,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
* that an RF LED is present.
*/
if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
- result = led_classdev_register(&fujitsu->pf_device->dev,
+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,
&radio_led);
if (result == 0) {
- fujitsu_hotkey->radio_led_registered = 1;
+ fujitsu_laptop->radio_led_registered = 1;
} else {
pr_err("Could not register LED handler for radio LED, error %i\n",
result);
@@ -983,10 +984,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
*/
if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
(call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
- result = led_classdev_register(&fujitsu->pf_device->dev,
+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,
&eco_led);
if (result == 0) {
- fujitsu_hotkey->eco_led_registered = 1;
+ fujitsu_laptop->eco_led_registered = 1;
} else {
pr_err("Could not register LED handler for eco LED, error %i\n",
result);
@@ -1002,47 +1003,47 @@ err_unregister_input_dev:
err_free_input_dev:
input_free_device(input);
err_free_fifo:
- kfifo_free(&fujitsu_hotkey->fifo);
+ kfifo_free(&fujitsu_laptop->fifo);
err_stop:
return error;
}
-static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
+static int acpi_fujitsu_laptop_remove(struct acpi_device *device)
{
- struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
- struct input_dev *input = fujitsu_hotkey->input;
+ struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);
+ struct input_dev *input = fujitsu_laptop->input;
#if IS_ENABLED(CONFIG_LEDS_CLASS)
- if (fujitsu_hotkey->logolamp_registered)
+ if (fujitsu_laptop->logolamp_registered)
led_classdev_unregister(&logolamp_led);
- if (fujitsu_hotkey->kblamps_registered)
+ if (fujitsu_laptop->kblamps_registered)
led_classdev_unregister(&kblamps_led);
- if (fujitsu_hotkey->radio_led_registered)
+ if (fujitsu_laptop->radio_led_registered)
led_classdev_unregister(&radio_led);
- if (fujitsu_hotkey->eco_led_registered)
+ if (fujitsu_laptop->eco_led_registered)
led_classdev_unregister(&eco_led);
#endif
input_unregister_device(input);
- kfifo_free(&fujitsu_hotkey->fifo);
+ kfifo_free(&fujitsu_laptop->fifo);
- fujitsu_hotkey->acpi_handle = NULL;
+ fujitsu_laptop->acpi_handle = NULL;
return 0;
}
-static void acpi_fujitsu_hotkey_press(int keycode)
+static void acpi_fujitsu_laptop_press(int keycode)
{
- struct input_dev *input = fujitsu_hotkey->input;
+ struct input_dev *input = fujitsu_laptop->input;
int status;
- status = kfifo_in_locked(&fujitsu_hotkey->fifo,
+ status = kfifo_in_locked(&fujitsu_laptop->fifo,
(unsigned char *)&keycode, sizeof(keycode),
- &fujitsu_hotkey->fifo_lock);
+ &fujitsu_laptop->fifo_lock);
if (status != sizeof(keycode)) {
vdbg_printk(FUJLAPTOP_DBG_WARN,
"Could not push keycode [0x%x]\n", keycode);
@@ -1054,16 +1055,16 @@ static void acpi_fujitsu_hotkey_press(int keycode)
"Push keycode into ringbuffer [%d]\n", keycode);
}
-static void acpi_fujitsu_hotkey_release(void)
+static void acpi_fujitsu_laptop_release(void)
{
- struct input_dev *input = fujitsu_hotkey->input;
+ struct input_dev *input = fujitsu_laptop->input;
int keycode, status;
while (true) {
- status = kfifo_out_locked(&fujitsu_hotkey->fifo,
+ status = kfifo_out_locked(&fujitsu_laptop->fifo,
(unsigned char *)&keycode,
sizeof(keycode),
- &fujitsu_hotkey->fifo_lock);
+ &fujitsu_laptop->fifo_lock);
if (status != sizeof(keycode))
return;
input_report_key(input, keycode, 0);
@@ -1073,14 +1074,14 @@ static void acpi_fujitsu_hotkey_release(void)
}
}
-static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
+static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
{
struct input_dev *input;
int keycode;
unsigned int irb = 1;
int i;
- input = fujitsu_hotkey->input;
+ input = fujitsu_laptop->input;
if (event != ACPI_FUJITSU_NOTIFY_CODE1) {
keycode = KEY_UNKNOWN;
@@ -1093,9 +1094,9 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
return;
}
- if (fujitsu_hotkey->rfkill_supported)
- fujitsu_hotkey->rfkill_state =
- call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
+ if (fujitsu_laptop->flags_supported)
+ fujitsu_laptop->flags_state =
+ call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
i = 0;
while ((irb =
@@ -1103,19 +1104,19 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
&& (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
switch (irb & 0x4ff) {
case KEY1_CODE:
- keycode = fujitsu->keycode1;
+ keycode = fujitsu_bl->keycode1;
break;
case KEY2_CODE:
- keycode = fujitsu->keycode2;
+ keycode = fujitsu_bl->keycode2;
break;
case KEY3_CODE:
- keycode = fujitsu->keycode3;
+ keycode = fujitsu_bl->keycode3;
break;
case KEY4_CODE:
- keycode = fujitsu->keycode4;
+ keycode = fujitsu_bl->keycode4;
break;
case KEY5_CODE:
- keycode = fujitsu->keycode5;
+ keycode = fujitsu_bl->keycode5;
break;
case 0:
keycode = 0;
@@ -1128,17 +1129,17 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
}
if (keycode > 0)
- acpi_fujitsu_hotkey_press(keycode);
+ acpi_fujitsu_laptop_press(keycode);
else if (keycode == 0)
- acpi_fujitsu_hotkey_release();
+ acpi_fujitsu_laptop_release();
}
/* On some models (first seen on the Skylake-based Lifebook
* E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is
- * handled in software; its state is queried using FUNC_RFKILL
+ * handled in software; its state is queried using FUNC_FLAGS
*/
- if ((fujitsu_hotkey->rfkill_supported & BIT(26)) &&
- (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) {
+ if ((fujitsu_laptop->flags_supported & BIT(26)) &&
+ (call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26))) {
keycode = KEY_TOUCHPAD_TOGGLE;
input_report_key(input, keycode, 1);
input_sync(input);
@@ -1150,83 +1151,81 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
/* Initialization */
-static const struct acpi_device_id fujitsu_device_ids[] = {
- {ACPI_FUJITSU_HID, 0},
+static const struct acpi_device_id fujitsu_bl_device_ids[] = {
+ {ACPI_FUJITSU_BL_HID, 0},
{"", 0},
};
-static struct acpi_driver acpi_fujitsu_driver = {
- .name = ACPI_FUJITSU_DRIVER_NAME,
+static struct acpi_driver acpi_fujitsu_bl_driver = {
+ .name = ACPI_FUJITSU_BL_DRIVER_NAME,
.class = ACPI_FUJITSU_CLASS,
- .ids = fujitsu_device_ids,
+ .ids = fujitsu_bl_device_ids,
.ops = {
- .add = acpi_fujitsu_add,
- .remove = acpi_fujitsu_remove,
- .notify = acpi_fujitsu_notify,
+ .add = acpi_fujitsu_bl_add,
+ .remove = acpi_fujitsu_bl_remove,
+ .notify = acpi_fujitsu_bl_notify,
},
};
-static const struct acpi_device_id fujitsu_hotkey_device_ids[] = {
- {ACPI_FUJITSU_HOTKEY_HID, 0},
+static const struct acpi_device_id fujitsu_laptop_device_ids[] = {
+ {ACPI_FUJITSU_LAPTOP_HID, 0},
{"", 0},
};
-static struct acpi_driver acpi_fujitsu_hotkey_driver = {
- .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME,
+static struct acpi_driver acpi_fujitsu_laptop_driver = {
+ .name = ACPI_FUJITSU_LAPTOP_DRIVER_NAME,
.class = ACPI_FUJITSU_CLASS,
- .ids = fujitsu_hotkey_device_ids,
+ .ids = fujitsu_laptop_device_ids,
.ops = {
- .add = acpi_fujitsu_hotkey_add,
- .remove = acpi_fujitsu_hotkey_remove,
- .notify = acpi_fujitsu_hotkey_notify,
+ .add = acpi_fujitsu_laptop_add,
+ .remove = acpi_fujitsu_laptop_remove,
+ .notify = acpi_fujitsu_laptop_notify,
},
};
static const struct acpi_device_id fujitsu_ids[] __used = {
- {ACPI_FUJITSU_HID, 0},
- {ACPI_FUJITSU_HOTKEY_HID, 0},
+ {ACPI_FUJITSU_BL_HID, 0},
+ {ACPI_FUJITSU_LAPTOP_HID, 0},
{"", 0}
};
MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
static int __init fujitsu_init(void)
{
- int ret, result, max_brightness;
+ int ret, max_brightness;
if (acpi_disabled)
return -ENODEV;
- fujitsu = kzalloc(sizeof(struct fujitsu_t), GFP_KERNEL);
- if (!fujitsu)
+ fujitsu_bl = kzalloc(sizeof(struct fujitsu_bl), GFP_KERNEL);
+ if (!fujitsu_bl)
return -ENOMEM;
- fujitsu->keycode1 = KEY_PROG1;
- fujitsu->keycode2 = KEY_PROG2;
- fujitsu->keycode3 = KEY_PROG3;
- fujitsu->keycode4 = KEY_PROG4;
- fujitsu->keycode5 = KEY_RFKILL;
+ fujitsu_bl->keycode1 = KEY_PROG1;
+ fujitsu_bl->keycode2 = KEY_PROG2;
+ fujitsu_bl->keycode3 = KEY_PROG3;
+ fujitsu_bl->keycode4 = KEY_PROG4;
+ fujitsu_bl->keycode5 = KEY_RFKILL;
dmi_check_system(fujitsu_dmi_table);
- result = acpi_bus_register_driver(&acpi_fujitsu_driver);
- if (result < 0) {
- ret = -ENODEV;
+ ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);
+ if (ret)
goto fail_acpi;
- }
/* Register platform stuff */
- fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1);
- if (!fujitsu->pf_device) {
+ fujitsu_bl->pf_device = platform_device_alloc("fujitsu-laptop", -1);
+ if (!fujitsu_bl->pf_device) {
ret = -ENOMEM;
goto fail_platform_driver;
}
- ret = platform_device_add(fujitsu->pf_device);
+ ret = platform_device_add(fujitsu_bl->pf_device);
if (ret)
goto fail_platform_device1;
ret =
- sysfs_create_group(&fujitsu->pf_device->dev.kobj,
- &fujitsupf_attribute_group);
+ sysfs_create_group(&fujitsu_bl->pf_device->dev.kobj,
+ &fujitsu_pf_attribute_group);
if (ret)
goto fail_platform_device2;
@@ -1236,90 +1235,88 @@ static int __init fujitsu_init(void)
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
- max_brightness = fujitsu->max_brightness;
+ max_brightness = fujitsu_bl->max_brightness;
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max_brightness - 1;
- fujitsu->bl_device = backlight_device_register("fujitsu-laptop",
- NULL, NULL,
- &fujitsubl_ops,
- &props);
- if (IS_ERR(fujitsu->bl_device)) {
- ret = PTR_ERR(fujitsu->bl_device);
- fujitsu->bl_device = NULL;
+ fujitsu_bl->bl_device = backlight_device_register("fujitsu-laptop",
+ NULL, NULL,
+ &fujitsu_bl_ops,
+ &props);
+ if (IS_ERR(fujitsu_bl->bl_device)) {
+ ret = PTR_ERR(fujitsu_bl->bl_device);
+ fujitsu_bl->bl_device = NULL;
goto fail_sysfs_group;
}
- fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
+ fujitsu_bl->bl_device->props.brightness = fujitsu_bl->brightness_level;
}
- ret = platform_driver_register(&fujitsupf_driver);
+ ret = platform_driver_register(&fujitsu_pf_driver);
if (ret)
goto fail_backlight;
- /* Register hotkey driver */
+ /* Register laptop driver */
- fujitsu_hotkey = kzalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL);
- if (!fujitsu_hotkey) {
+ fujitsu_laptop = kzalloc(sizeof(struct fujitsu_laptop), GFP_KERNEL);
+ if (!fujitsu_laptop) {
ret = -ENOMEM;
- goto fail_hotkey;
+ goto fail_laptop;
}
- result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver);
- if (result < 0) {
- ret = -ENODEV;
- goto fail_hotkey1;
- }
+ ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);
+ if (ret)
+ goto fail_laptop1;
/* Sync backlight power status (needs FUJ02E3 device, hence deferred) */
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
- fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN;
+ fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
else
- fujitsu->bl_device->props.power = FB_BLANK_UNBLANK;
+ fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
}
pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
return 0;
-fail_hotkey1:
- kfree(fujitsu_hotkey);
-fail_hotkey:
- platform_driver_unregister(&fujitsupf_driver);
+fail_laptop1:
+ kfree(fujitsu_laptop);
+fail_laptop:
+ platform_driver_unregister(&fujitsu_pf_driver);
fail_backlight:
- backlight_device_unregister(fujitsu->bl_device);
+ backlight_device_unregister(fujitsu_bl->bl_device);
fail_sysfs_group:
- sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
- &fujitsupf_attribute_group);
+ sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
+ &fujitsu_pf_attribute_group);
fail_platform_device2:
- platform_device_del(fujitsu->pf_device);
+ platform_device_del(fujitsu_bl->pf_device);
fail_platform_device1:
- platform_device_put(fujitsu->pf_device);
+ platform_device_put(fujitsu_bl->pf_device);
fail_platform_driver:
- acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+ acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
fail_acpi:
- kfree(fujitsu);
+ kfree(fujitsu_bl);
return ret;
}
static void __exit fujitsu_cleanup(void)
{
- acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver);
+ acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver);
- kfree(fujitsu_hotkey);
+ kfree(fujitsu_laptop);
- platform_driver_unregister(&fujitsupf_driver);
+ platform_driver_unregister(&fujitsu_pf_driver);
- backlight_device_unregister(fujitsu->bl_device);
+ backlight_device_unregister(fujitsu_bl->bl_device);
- sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
- &fujitsupf_attribute_group);
+ sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
+ &fujitsu_pf_attribute_group);
- platform_device_unregister(fujitsu->pf_device);
+ platform_device_unregister(fujitsu_bl->pf_device);
- acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+ acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
- kfree(fujitsu);
+ kfree(fujitsu_bl);
pr_info("driver unloaded\n");
}
@@ -1341,7 +1338,3 @@ MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
MODULE_DESCRIPTION("Fujitsu laptop extras support");
MODULE_VERSION(FUJITSU_DRIVER_VERSION);
MODULE_LICENSE("GPL");
-
-MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
-MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
-MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 230043c1c90f..4bf55b5d78be 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1241,19 +1241,32 @@ config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
- depends on NVME_FC && NVME_TARGET_FC
select CRC_T10DIF
- help
+ ---help---
This lpfc driver supports the Emulex LightPulse
Family of Fibre Channel PCI host adapters.
config SCSI_LPFC_DEBUG_FS
bool "Emulex LightPulse Fibre Channel debugfs Support"
depends on SCSI_LPFC && DEBUG_FS
- help
+ ---help---
This makes debugging information from the lpfc driver
available via the debugfs filesystem.
+config LPFC_NVME_INITIATOR
+ bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
+ depends on SCSI_LPFC && NVME_FC
+ ---help---
+ This enables NVME Initiator support in the Emulex lpfc driver.
+
+config LPFC_NVME_TARGET
+ bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
+ depends on SCSI_LPFC && NVME_TARGET_FC
+ ---help---
+ This enables NVME Target support in the Emulex lpfc driver.
+ Target enablement must still be enabled on a per adapter
+ basis by module parameters.
+
config SCSI_SIM710
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2e5338dec621..7b0410e0f569 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -468,7 +468,7 @@ err_out:
return -1;
err_blink:
- return (status > 16) & 0xFF;
+ return (status >> 16) & 0xFF;
}
static inline u32 aac_get_vector(struct aac_dev *dev)
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 109e2c99e6c1..95d8f25cbcca 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -6278,7 +6278,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit)
* does not disable its parity logic prior to
* the start of the reset. This may cause a
* parity error to be detected and thus a
- * spurious SERR or PERR assertion. Disble
+ * spurious SERR or PERR assertion. Disable
* PERR and SERR responses during the CHIPRST.
*/
mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 07c08ce68d70..894b1e3ebd56 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -561,8 +561,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;
- if (!list_empty(&task->running))
+ spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&task->running)) {
+ pr_debug_once("%s while task on list", __func__);
list_del_init(&task->running);
+ }
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->task == task)
conn->task = NULL;
@@ -784,7 +788,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (session->tt->xmit_task(task))
goto free_task;
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->mgmtqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -1475,8 +1481,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
* this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (list_empty(&task->running))
list_add_tail(&task->running, &conn->requeue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1513,22 +1521,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* only have one nop-out as a ping from us and targets should not
* overflow us with nop-ins
*/
+ spin_lock_bh(&conn->taskqueuelock);
check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next,
struct iscsi_task, running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
/* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(conn->task);
spin_unlock_bh(&conn->session->back_lock);
conn->task = NULL;
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
}
/* process pending command queue */
@@ -1536,19 +1548,24 @@ check_mgmt:
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
fail_scsi_task(conn->task, DID_IMM_RETRY);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM || rc == -EACCES) {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&conn->task->running,
&conn->cmdqueue);
conn->task = NULL;
+ spin_unlock_bh(&conn->taskqueuelock);
goto done;
} else
fail_scsi_task(conn->task, DID_ABORT);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
@@ -1559,6 +1576,7 @@ check_mgmt:
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
@@ -1578,12 +1596,15 @@ check_mgmt:
conn->task = task;
list_del_init(&conn->task->running);
conn->task->state = ISCSI_TASK_RUNNING;
+ spin_unlock_bh(&conn->taskqueuelock);
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
+ spin_unlock_bh(&conn->taskqueuelock);
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1739,7 +1760,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto prepd_reject;
}
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->cmdqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
+ spin_lock_init(&conn->taskqueuelock);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 0bba2e30b4f0..257bbdd0f0b8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -99,12 +99,13 @@ struct lpfc_sli2_slim;
#define FC_MAX_ADPTMSG 64
#define MAX_HBAEVT 32
+#define MAX_HBAS_NO_RESET 16
/* Number of MSI-X vectors the driver uses */
#define LPFC_MSIX_VECTORS 2
/* lpfc wait event data ready flag */
-#define LPFC_DATA_READY (1<<0)
+#define LPFC_DATA_READY 0 /* bit 0 */
/* queue dump line buffer size */
#define LPFC_LBUF_SZ 128
@@ -692,6 +693,7 @@ struct lpfc_hba {
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
+#define NVME_XRI_ABORT_EVENT 0x100000
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5c783ef7f260..5c3be3e6f5e2 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3010,6 +3010,12 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
+int lpfc_no_hba_reset_cnt;
+unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
+MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
+
LPFC_ATTR(sli_mode, 0, 0, 3,
"SLI mode selector:"
" 0 - auto (SLI-3 if supported),"
@@ -4451,7 +4457,8 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val;
- for (i = 0; i < phba->io_channel_irqs; i++)
+
+ for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
lpfc_modify_hba_eq_delay(phba, i);
return strlen(buf);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 843dd73004da..54e6ac42fbcd 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -384,7 +384,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
-extern struct scsi_host_template lpfc_template_s3;
+extern struct scsi_host_template lpfc_template_no_hr;
extern struct scsi_host_template lpfc_template_nvme;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
@@ -554,3 +554,5 @@ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_wcqe_complete *abts_cmpl);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
+extern int lpfc_no_hba_reset_cnt;
+extern unsigned long lpfc_no_hba_reset[];
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c22bb3f887e1..d3e9af983015 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -939,8 +939,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"FC4 x%08x, Data: x%08x x%08x\n",
ndlp, did, ndlp->nlp_fc4_type,
FC_TYPE_FCP, FC_TYPE_NVME);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
}
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0);
} else
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 9f4798e9d938..913eed822cb8 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -3653,17 +3653,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.nvmels_cq;
goto pass_check;
}
- /* NVME LS complete queue */
- if (phba->sli4_hba.nvmels_cq &&
- phba->sli4_hba.nvmels_cq->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.nvmels_cq, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = phba->sli4_hba.nvmels_cq;
- goto pass_check;
- }
/* FCP complete queue */
if (phba->sli4_hba.fcp_cq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
@@ -3738,17 +3727,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.nvmels_wq;
goto pass_check;
}
- /* NVME LS work queue */
- if (phba->sli4_hba.nvmels_wq &&
- phba->sli4_hba.nvmels_wq->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.nvmels_wq, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = phba->sli4_hba.nvmels_wq;
- goto pass_check;
- }
/* FCP work queue */
if (phba->sli4_hba.fcp_wq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2d26440e6f2f..d9c61d030034 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5177,15 +5177,15 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
static uint32_t
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
- struct lpfc_hba *phba)
+ struct lpfc_vport *vport)
{
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
- memcpy(desc->port_names.wwnn, phba->wwnn,
+ memcpy(desc->port_names.wwnn, &vport->fc_nodename,
sizeof(desc->port_names.wwnn));
- memcpy(desc->port_names.wwpn, phba->wwpn,
+ memcpy(desc->port_names.wwpn, &vport->fc_portname,
sizeof(desc->port_names.wwpn));
desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -5279,7 +5279,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
(len + pcmd), &rdp_context->link_stat);
len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
- (len + pcmd), phba);
+ (len + pcmd), vport);
len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
(len + pcmd), vport, ndlp);
len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
@@ -8371,11 +8371,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- if (vport->port_type == LPFC_PHYSICAL_PORT
- && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
- lpfc_issue_init_vfi(vport);
- else
+ if (mb->mbxStatus == MBX_NOT_FINISHED)
+ break;
+ if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
+ !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_issue_init_vfi(vport);
+ else
+ lpfc_initial_flogi(vport);
+ } else {
lpfc_initial_fdisc(vport);
+ }
break;
}
} else {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 194a14d5f8a9..180b072beef6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -313,8 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
}
- if (!(vport->load_flag & FC_UNLOADING) &&
- !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
@@ -641,6 +640,8 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_rrq_active(phba);
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
lpfc_sli4_fcp_xri_abort_event_proc(phba);
+ if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
+ lpfc_sli4_nvme_xri_abort_event_proc(phba);
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
lpfc_sli4_els_xri_abort_event_proc(phba);
if (phba->hba_flag & ASYNC_EVENT)
@@ -2173,7 +2174,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t boot_flag, addr_mode;
uint16_t fcf_index, next_fcf_index;
struct lpfc_fcf_rec *fcf_rec = NULL;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
bool select_new_fcf;
int rc;
@@ -4020,9 +4021,11 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rdata = rport->dd_data;
/* break the link before dropping the ref */
ndlp->rport = NULL;
- if (rdata && rdata->pnode == ndlp)
- lpfc_nlp_put(ndlp);
- rdata->pnode = NULL;
+ if (rdata) {
+ if (rdata->pnode == ndlp)
+ lpfc_nlp_put(ndlp);
+ rdata->pnode = NULL;
+ }
/* drop reference for earlier registeration */
put_device(&rport->dev);
}
@@ -4344,9 +4347,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
- init_timer(&ndlp->nlp_delayfunc);
- ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
- ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+ setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
+ (unsigned long)ndlp);
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->phba = vport->phba;
@@ -4606,9 +4608,9 @@ lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
pring = qp->pring;
if (!pring)
continue;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
__lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
}
spin_unlock_irq(&phba->hbalock);
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index cfdb068a3bfc..15277705cb6b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1001,7 +1001,7 @@ struct eq_delay_info {
uint32_t phase;
uint32_t delay_multi;
};
-#define LPFC_MAX_EQ_DELAY 8
+#define LPFC_MAX_EQ_DELAY_EQID_CNT 8
struct sgl_page_pairs {
uint32_t sgl_pg0_addr_lo;
@@ -1070,7 +1070,7 @@ struct lpfc_mbx_modify_eq_delay {
union {
struct {
uint32_t num_eq;
- struct eq_delay_info eq[LPFC_MAX_EQ_DELAY];
+ struct eq_delay_info eq[LPFC_MAX_EQ_DELAY_EQID_CNT];
} request;
struct {
uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0ee429d773f3..2697d49da4d7 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3555,6 +3555,44 @@ out_free_mem:
return rc;
}
+static uint64_t
+lpfc_get_wwpn(struct lpfc_hba *phba)
+{
+ uint64_t wwn;
+ int rc;
+ LPFC_MBOXQ_t *mboxq;
+ MAILBOX_t *mb;
+
+
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq)
+ return (uint64_t)-1;
+
+ /* First get WWN of HBA instance */
+ lpfc_read_nv(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6019 Mailbox failed , mbxCmd x%x "
+ "READ_NV, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return (uint64_t) -1;
+ }
+ mb = &mboxq->u.mb;
+ memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
+ /* wwn is WWPN of HBA instance */
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return be64_to_cpu(wwn);
+ else
+ return (((wwn & 0xffffffff00000000) >> 32) |
+ ((wwn & 0x00000000ffffffff) << 32));
+
+}
+
/**
* lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
* @phba: pointer to lpfc hba data structure.
@@ -3676,17 +3714,32 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
struct lpfc_vport *vport;
struct Scsi_Host *shost = NULL;
int error = 0;
+ int i;
+ uint64_t wwn;
+ bool use_no_reset_hba = false;
+
+ wwn = lpfc_get_wwpn(phba);
+
+ for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
+ if (wwn == lpfc_no_hba_reset[i]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6020 Setting use_no_reset port=%llx\n",
+ wwn);
+ use_no_reset_hba = true;
+ break;
+ }
+ }
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
if (dev != &phba->pcidev->dev) {
shost = scsi_host_alloc(&lpfc_vport_template,
sizeof(struct lpfc_vport));
} else {
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (!use_no_reset_hba)
shost = scsi_host_alloc(&lpfc_template,
sizeof(struct lpfc_vport));
else
- shost = scsi_host_alloc(&lpfc_template_s3,
+ shost = scsi_host_alloc(&lpfc_template_no_hr,
sizeof(struct lpfc_vport));
}
} else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
@@ -3734,17 +3787,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
INIT_LIST_HEAD(&vport->rcv_buffer_list);
spin_lock_init(&vport->work_port_lock);
- init_timer(&vport->fc_disctmo);
- vport->fc_disctmo.function = lpfc_disc_timeout;
- vport->fc_disctmo.data = (unsigned long)vport;
+ setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
+ (unsigned long)vport);
- init_timer(&vport->els_tmofunc);
- vport->els_tmofunc.function = lpfc_els_timeout;
- vport->els_tmofunc.data = (unsigned long)vport;
+ setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
+ (unsigned long)vport);
- init_timer(&vport->delayed_disc_tmo);
- vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
- vport->delayed_disc_tmo.data = (unsigned long)vport;
+ setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
+ (unsigned long)vport);
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
@@ -5406,21 +5456,15 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->luns);
/* MBOX heartbeat timer */
- init_timer(&psli->mbox_tmo);
- psli->mbox_tmo.function = lpfc_mbox_timeout;
- psli->mbox_tmo.data = (unsigned long) phba;
+ setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
/* Fabric block timer */
- init_timer(&phba->fabric_block_timer);
- phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
- phba->fabric_block_timer.data = (unsigned long) phba;
+ setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
+ (unsigned long)phba);
/* EA polling mode timer */
- init_timer(&phba->eratt_poll);
- phba->eratt_poll.function = lpfc_poll_eratt;
- phba->eratt_poll.data = (unsigned long) phba;
+ setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
+ (unsigned long)phba);
/* Heartbeat timer */
- init_timer(&phba->hb_tmofunc);
- phba->hb_tmofunc.function = lpfc_hb_timeout;
- phba->hb_tmofunc.data = (unsigned long)phba;
+ setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
return 0;
}
@@ -5446,9 +5490,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
*/
/* FCP polling mode timer */
- init_timer(&phba->fcp_poll_timer);
- phba->fcp_poll_timer.function = lpfc_poll_timeout;
- phba->fcp_poll_timer.data = (unsigned long) phba;
+ setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
+ (unsigned long)phba);
/* Host attention work mask setup */
phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
@@ -5482,7 +5525,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the host templates the configured values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
if (phba->cfg_enable_bg) {
@@ -5617,14 +5661,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* Initialize timers used by driver
*/
- init_timer(&phba->rrq_tmr);
- phba->rrq_tmr.function = lpfc_rrq_timeout;
- phba->rrq_tmr.data = (unsigned long)phba;
+ setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
/* FCF rediscover timer */
- init_timer(&phba->fcf.redisc_wait);
- phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
- phba->fcf.redisc_wait.data = (unsigned long)phba;
+ setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
+ (unsigned long)phba);
/*
* Control structure for handling external multi-buffer mailbox
@@ -5706,6 +5747,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the host templates with the updated values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5736,6 +5778,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the Abort nvme buffer list used by driver */
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+ /* Fast-path XRI aborted CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
/* This abort list used by worker thread */
@@ -8712,12 +8756,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
}
- /*
- * Configure EQ delay multipier for interrupt coalescing using
- * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
- */
- for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY)
+ for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
lpfc_modify_hba_eq_delay(phba, qidx);
+
return 0;
out_destroy:
@@ -8973,6 +9014,11 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
/* Pending ELS XRI abort events */
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
&cqelist);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ /* Pending NVME XRI abort events */
+ list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+ &cqelist);
+ }
/* Pending asynnc events */
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
&cqelist);
@@ -10400,12 +10446,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
fc_remove_host(shost);
scsi_remove_host(shost);
- /* Perform ndlp cleanup on the physical port. The nvme and nvmet
- * localports are destroyed after to cleanup all transport memory.
- */
lpfc_cleanup(vport);
- lpfc_nvmet_destroy_targetport(phba);
- lpfc_nvme_destroy_localport(vport);
/*
* Bring down the SLI Layer. This step disable all interrupts,
@@ -12018,6 +12059,7 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
+ .shutdown = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index c61d8d692ede..5986c7957199 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -646,7 +646,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
}
dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
- dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
if (!dma_buf->iocbq) {
kfree(dma_buf->context);
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
@@ -658,6 +657,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
"2621 Ran out of nvmet iocb/WQEs\n");
return NULL;
}
+ dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
nvmewqe = dma_buf->iocbq;
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
/* Initialize WQE */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 609a908ea9db..0a4c19081409 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -316,7 +316,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
- bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
+ bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
/* Word 6 */
@@ -620,15 +620,15 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
* Embed the payload in the last half of the WQE
* WQE words 16-30 get the NVME CMD IU payload
*
- * WQE Word 16 is already setup with flags
- * WQE words 17-19 get payload Words 2-4
+ * WQE words 16-19 get payload Words 1-4
* WQE words 20-21 get payload Words 6-7
* WQE words 22-29 get payload Words 16-23
*/
- wptr = &wqe->words[17]; /* WQE ptr */
+ wptr = &wqe->words[16]; /* WQE ptr */
dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
- dptr += 2; /* Skip Words 0-1 in payload */
+ dptr++; /* Skip Word 0 in payload */
+ *wptr++ = *dptr++; /* Word 1 */
*wptr++ = *dptr++; /* Word 2 */
*wptr++ = *dptr++; /* Word 3 */
*wptr++ = *dptr++; /* Word 4 */
@@ -978,9 +978,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
NVME_WRITE_CMD);
- /* Word 16 */
- wqe->words[16] = LPFC_NVME_EMBED_WRITE;
-
phba->fc4NvmeOutputRequests++;
} else {
/* Word 7 */
@@ -1002,9 +999,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
NVME_READ_CMD);
- /* Word 16 */
- wqe->words[16] = LPFC_NVME_EMBED_READ;
-
phba->fc4NvmeInputRequests++;
}
} else {
@@ -1026,9 +1020,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 11 */
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
- /* Word 16 */
- wqe->words[16] = LPFC_NVME_EMBED_CMD;
-
phba->fc4NvmeControlRequests++;
}
/*
@@ -1286,6 +1277,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
pnvme_fcreq->private = (void *)lpfc_ncmd;
lpfc_ncmd->nvmeCmd = pnvme_fcreq;
lpfc_ncmd->nrport = rport;
+ lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->start_time = jiffies;
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
@@ -1319,7 +1311,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"sid: x%x did: x%x oxid: x%x\n",
ret, vport->fc_myDID, ndlp->nlp_DID,
lpfc_ncmd->cur_iocbq.sli4_xritag);
- ret = -EINVAL;
+ ret = -EBUSY;
goto out_free_nvme_buf;
}
@@ -1821,10 +1813,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
pdma_phys_sgl1, cur_xritag);
if (status) {
/* failure, put on abort nvme list */
- lpfc_ncmd->exch_busy = 1;
+ lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
} else {
/* success, put on NVME buffer list */
- lpfc_ncmd->exch_busy = 0;
+ lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
lpfc_ncmd->status = IOSTAT_SUCCESS;
num_posted++;
}
@@ -1854,10 +1846,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
struct lpfc_nvme_buf, list);
if (status) {
/* failure, put on abort nvme list */
- lpfc_ncmd->exch_busy = 1;
+ lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
} else {
/* success, put on NVME buffer list */
- lpfc_ncmd->exch_busy = 0;
+ lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
lpfc_ncmd->status = IOSTAT_SUCCESS;
num_posted++;
}
@@ -2099,7 +2091,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
unsigned long iflag = 0;
lpfc_ncmd->nonsg_phys = 0;
- if (lpfc_ncmd->exch_busy) {
+ if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
iflag);
lpfc_ncmd->nvmeCmd = NULL;
@@ -2135,11 +2127,12 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
int
lpfc_nvme_create_localport(struct lpfc_vport *vport)
{
+ int ret = 0;
struct lpfc_hba *phba = vport->phba;
struct nvme_fc_port_info nfcp_info;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
- int len, ret = 0;
+ int len;
/* Initialize this localport instance. The vport wwn usage ensures
* that NPIV is accounted for.
@@ -2156,8 +2149,12 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
/* localport is allocated from the stack, but the registration
* call allocates heap memory as well as the private area.
*/
+#ifdef CONFIG_LPFC_NVME_INITIATOR
ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
&vport->phba->pcidev->dev, &localport);
+#else
+ ret = -ENOMEM;
+#endif
if (!ret) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
"6005 Successfully registered local "
@@ -2173,10 +2170,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport->vport = vport;
INIT_LIST_HEAD(&lport->rport_list);
vport->nvmei_support = 1;
+ len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
+ vport->phba->total_nvme_bufs += len;
}
- len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
- vport->phba->total_nvme_bufs += len;
return ret;
}
@@ -2193,6 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
void
lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
{
+#ifdef CONFIG_LPFC_NVME_INITIATOR
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
@@ -2208,7 +2206,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6011 Destroying NVME localport %p\n",
localport);
-
list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
/* The last node ref has to get released now before the rport
* private memory area is released by the transport.
@@ -2222,6 +2219,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
"6008 rport fail destroy %x\n", ret);
wait_for_completion_timeout(&rport->rport_unreg_done, 5);
}
+
/* lport's rport list is clear. Unregister
* lport and release resources.
*/
@@ -2245,6 +2243,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
"Failed, status x%x\n",
ret);
}
+#endif
}
void
@@ -2275,6 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
int
lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+#ifdef CONFIG_LPFC_NVME_INITIATOR
int ret = 0;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
@@ -2348,7 +2348,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
-
ret = nvme_fc_register_remoteport(localport, &rpinfo,
&remote_port);
if (!ret) {
@@ -2384,6 +2383,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_type, ndlp->nlp_DID, ndlp);
}
return ret;
+#else
+ return 0;
+#endif
}
/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
@@ -2401,6 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
void
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+#ifdef CONFIG_LPFC_NVME_INITIATOR
int ret;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
@@ -2458,7 +2461,61 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return;
input_err:
+#endif
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6168: State error: lport %p, rport%p FCID x%06x\n",
vport->localport, ndlp->rport, ndlp->nlp_DID);
}
+
+/**
+ * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+ struct lpfc_nodelist *ndlp;
+ unsigned long iflag = 0;
+ int rrq_empty = 0;
+
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+ return;
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
+ &phba->sli4_hba.lpfc_abts_nvme_buf_list,
+ list) {
+ if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
+ list_del(&lpfc_ncmd->list);
+ lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ spin_unlock(
+ &phba->sli4_hba.abts_nvme_buf_list_lock);
+
+ rrq_empty = list_empty(&phba->active_rrq_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ ndlp = lpfc_ncmd->ndlp;
+ if (ndlp) {
+ lpfc_set_rrq_active(
+ phba, ndlp,
+ lpfc_ncmd->cur_iocbq.sli4_lxritag,
+ rxid, 1);
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ }
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ if (rrq_empty)
+ lpfc_worker_wake_up(phba);
+ return;
+ }
+ }
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index b2fae5e813f8..1347deb8dd6c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -57,6 +57,7 @@ struct lpfc_nvme_buf {
struct list_head list;
struct nvmefc_fcp_req *nvmeCmd;
struct lpfc_nvme_rport *nrport;
+ struct lpfc_nodelist *ndlp;
uint32_t timeout;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index c421e1738ee9..b7739a554fe0 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -571,6 +571,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6102 Bad state IO x%x aborted\n",
ctxp->oxid);
+ rc = -ENXIO;
goto aerr;
}
@@ -580,6 +581,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6152 FCP Drop IO x%x: Prep\n",
ctxp->oxid);
+ rc = -ENXIO;
goto aerr;
}
@@ -618,8 +620,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->wqeq->hba_wqidx = 0;
nvmewqeq->context2 = NULL;
nvmewqeq->context3 = NULL;
+ rc = -EBUSY;
aerr:
- return -ENXIO;
+ return rc;
}
static void
@@ -668,9 +671,13 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
+#ifdef CONFIG_LPFC_NVME_TARGET
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
&phba->pcidev->dev,
&phba->targetport);
+#else
+ error = -ENOMEM;
+#endif
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6025 Cannot register NVME targetport "
@@ -731,9 +738,25 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the nvmet xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * NVMET aborted xri.
+ **/
+void
+lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ /* TODO: work in progress */
+}
+
void
lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
{
+#ifdef CONFIG_LPFC_NVME_TARGET
struct lpfc_nvmet_tgtport *tgtp;
if (phba->nvmet_support == 0)
@@ -745,6 +768,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
}
phba->targetport = NULL;
+#endif
}
/**
@@ -764,6 +788,7 @@ static void
lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct hbq_dmabuf *nvmebuf)
{
+#ifdef CONFIG_LPFC_NVME_TARGET
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_rcv_ctx *ctxp;
@@ -844,6 +869,7 @@ dropit:
atomic_inc(&tgtp->xmt_ls_abort);
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
+#endif
}
/**
@@ -865,6 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
struct rqb_dmabuf *nvmebuf,
uint64_t isr_timestamp)
{
+#ifdef CONFIG_LPFC_NVME_TARGET
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
@@ -955,7 +982,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n",
+ "6159 FCP Drop IO x%x: err x%x\n",
ctxp->oxid, rc);
dropit:
lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
@@ -970,6 +997,7 @@ dropit:
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
}
+#endif
}
/**
@@ -1114,7 +1142,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
- bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL);
+ bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
/* Word 6 */
@@ -1445,7 +1473,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
case NVMET_FCOP_RSP:
/* Words 0 - 2 */
- sgel = &rsp->sg[0];
physaddr = rsp->rspdma;
wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
@@ -1681,8 +1708,8 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6067 %s: Entrypoint: sid %x xri %x\n", __func__,
- sid, xri);
+ "6067 Abort: sid %x xri x%x/x%x\n",
+ sid, xri, ctxp->wqeq->sli4_xritag);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -1693,7 +1720,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
"6134 Drop ABTS - wrong NDLP state x%x.\n",
- ndlp->nlp_state);
+ (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
/* No failure to an ABTS request. */
return 0;
@@ -1791,7 +1818,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
"6160 Drop ABTS - wrong NDLP state x%x.\n",
- ndlp->nlp_state);
+ (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
/* No failure to an ABTS request. */
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9d6384af9fce..54fd0c81ceaf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5953,12 +5953,13 @@ struct scsi_host_template lpfc_template_nvme = {
.track_queue_depth = 0,
};
-struct scsi_host_template lpfc_template_s3 = {
+struct scsi_host_template lpfc_template_no_hr = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
@@ -6015,7 +6016,6 @@ struct scsi_host_template lpfc_vport_template = {
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
- .eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e43e5e23c24b..1c9fa45df7eb 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,3 +1,4 @@
+
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
@@ -952,7 +953,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
start_sglq = sglq;
while (!found) {
if (!sglq)
- return NULL;
+ break;
if (ndlp && ndlp->active_rrqs_xri_bitmap &&
test_bit(sglq->sli4_lxritag,
ndlp->active_rrqs_xri_bitmap)) {
@@ -12213,6 +12214,41 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 NVME abort XRI events.
+ **/
+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* First, declare the fcp xri abort event has been handled */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, handle all the fcp xri abort events */
+ while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
+ /* Get the first event from the head of the event queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support) {
+ lpfc_sli4_nvmet_xri_aborted(phba,
+ &cq_event->cqe.wcqe_axri);
+ } else {
+ lpfc_sli4_nvme_xri_aborted(phba,
+ &cq_event->cqe.wcqe_axri);
+ }
+ /* Free the event processed back to the free pool */
+ lpfc_sli4_cq_event_release(phba, cq_event);
+ }
+}
+
+/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
@@ -12709,10 +12745,22 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case LPFC_NVME:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&cq_event->list,
+ &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
+ /* Set the nvme xri abort event flag */
+ phba->hba_flag |= NVME_XRI_ABORT_EVENT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0603 Invalid work queue CQE subtype (x%x)\n",
- cq->subtype);
+ "0603 Invalid CQ subtype %d: "
+ "%08x %08x %08x %08x\n",
+ cq->subtype, wcqe->word0, wcqe->parameter,
+ wcqe->word2, wcqe->word3);
+ lpfc_sli4_cq_event_release(phba, cq_event);
workposted = false;
break;
}
@@ -13827,6 +13875,8 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
* @startq: The starting FCP EQ to modify
*
* This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
+ * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
+ * updated in one mailbox command.
*
* The @phba struct is used to send mailbox command to HBA. The @startq
* is used to get the starting FCP EQ to change.
@@ -13879,7 +13929,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
eq_delay->u.request.eq[cnt].phase = 0;
eq_delay->u.request.eq[cnt].delay_multi = dmult;
cnt++;
- if (cnt >= LPFC_MAX_EQ_DELAY)
+ if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
break;
}
eq_delay->u.request.num_eq = cnt;
@@ -15185,17 +15235,17 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
drq = drqp[idx];
cq = cqp[idx];
- if (hrq->entry_count != drq->entry_count) {
- status = -EINVAL;
- goto out;
- }
-
/* sanity check on queue memory */
if (!hrq || !drq || !cq) {
status = -ENODEV;
goto out;
}
+ if (hrq->entry_count != drq->entry_count) {
+ status = -EINVAL;
+ goto out;
+ }
+
if (idx == 0) {
bf_set(lpfc_mbx_rq_create_num_pages,
&rq_create->u.request,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 91153c9f6d18..710458cf11d6 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -642,6 +642,7 @@ struct lpfc_sli4_hba {
struct list_head sp_asynce_work_queue;
struct list_head sp_fcp_xri_aborted_work_queue;
struct list_head sp_els_xri_aborted_work_queue;
+ struct list_head sp_nvme_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -794,9 +795,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri);
+void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 86c6c9b26b82..d4e95e28f4e3 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.7"
+#define LPFC_DRIVER_VERSION "11.2.0.10"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 7fe7e6ed595b..8981806fb13f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1442,9 +1442,6 @@ void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
extern struct sas_function_template mpt3sas_transport_functions;
extern struct scsi_transport_template *mpt3sas_transport_template;
-extern int scsi_internal_device_block(struct scsi_device *sdev);
-extern int scsi_internal_device_unblock(struct scsi_device *sdev,
- enum scsi_device_state new_state);
/* trigger data externs */
void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 46e866c36c8a..919ba2bb15f1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
- r = scsi_internal_device_block(sdev);
+ r = scsi_internal_device_block(sdev, false);
if (r == -EINVAL)
sdev_printk(KERN_WARNING, sdev,
"device_block failed with return(%d) for handle(0x%04x)\n",
@@ -2895,7 +2895,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
"performing a block followed by an unblock\n",
r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
- r = scsi_internal_device_block(sdev);
+ r = scsi_internal_device_block(sdev, false);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_block "
"failed with return(%d) for handle(0x%04x)\n",
@@ -4677,7 +4677,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
unsigned long flags;
- unsigned int sector_sz;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
@@ -4742,20 +4741,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
-
- /* In case of bogus fw or device, we could end up having
- * unaligned partial completion. We can force alignment here,
- * then scsi-ml does not need to handle this misbehavior.
- */
- sector_sz = scmd->device->sector_size;
- if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&
- xfer_cnt % sector_sz)) {
- sdev_printk(KERN_INFO, scmd->device,
- "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
- xfer_cnt, sector_sz);
- xfer_cnt = round_down(xfer_cnt, sector_sz);
- }
-
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 23bd70628a2f..7d173f48a81e 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -81,14 +81,17 @@ struct qedf_dbg_ctx {
#define QEDF_INFO(pdev, level, fmt, ...) \
qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
## __VA_ARGS__)
-
-extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(4, 5)
+void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...);
-extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(4, 5)
+void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *, ...);
-extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
+__printf(4, 5)
+void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
u32 line, const char *, ...);
-extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(5, 6)
+void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
u32 info, const char *fmt, ...);
/* GRC Dump related defines */
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 868d423380d1..ed58b9104f58 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -203,7 +203,7 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "fd_mac=%pM.\n", __func__, mp->fd_mac);
+ "fd_mac=%pM\n", mp->fd_mac);
ether_addr_copy(cvl_mac, mp->fd_mac);
break;
case FIP_DT_NAME:
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ee0dcf9d3aba..46debe5034af 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1342,7 +1342,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
} else {
refcount = kref_read(&io_req->refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
- "%d:0:%d:%d xid=0x%0x op=0x%02x "
+ "%d:0:%d:%lld xid=0x%0x op=0x%02x "
"lba=%02x%02x%02x%02x cdb_status=%d "
"fcp_resid=0x%x refcount=%d.\n",
qedf->lport->host->host_no, sc_cmd->device->id,
@@ -1426,7 +1426,7 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
sc_cmd->result = result << 16;
refcount = kref_read(&io_req->refcount);
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing "
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
"sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
"allowed=%d retries=%d refcount=%d.\n",
qedf->lport->host->host_no, sc_cmd->device->id,
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d9d7a86b5f8b..8e2a160490e6 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2456,8 +2456,8 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
- "BDQ PBL addr=0x%p dma=0x%llx.\n", qedf->bdq_pbl,
- qedf->bdq_pbl_dma);
+ "BDQ PBL addr=0x%p dma=%pad\n",
+ qedf->bdq_pbl, &qedf->bdq_pbl_dma);
/*
* Populate BDQ PBL with physical and virtual address of individual
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 955936274241..59417199bf36 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -14,7 +14,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
-int do_not_recover;
+int qedi_do_not_recover;
static struct dentry *qedi_dbg_root;
void
@@ -74,22 +74,22 @@ qedi_dbg_exit(void)
static ssize_t
qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
{
- if (!do_not_recover)
- do_not_recover = 1;
+ if (!qedi_do_not_recover)
+ qedi_do_not_recover = 1;
QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
- do_not_recover);
+ qedi_do_not_recover);
return 0;
}
static ssize_t
qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
{
- if (do_not_recover)
- do_not_recover = 0;
+ if (qedi_do_not_recover)
+ qedi_do_not_recover = 0;
QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
- do_not_recover);
+ qedi_do_not_recover);
return 0;
}
@@ -141,7 +141,7 @@ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
if (*ppos)
return 0;
- cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
+ cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index c9f0ef4e11b3..2bce3efc66a4 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1461,9 +1461,9 @@ static void qedi_tmf_work(struct work_struct *work)
get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
qedi_conn->iscsi_conn_id);
- if (do_not_recover) {
+ if (qedi_do_not_recover) {
QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
- do_not_recover);
+ qedi_do_not_recover);
goto abort_ret;
}
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 8e488de88ece..63d793f46064 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -12,8 +12,14 @@
#include "qedi_iscsi.h"
+#ifdef CONFIG_DEBUG_FS
+extern int qedi_do_not_recover;
+#else
+#define qedi_do_not_recover (0)
+#endif
+
extern uint qedi_io_tracing;
-extern int do_not_recover;
+
extern struct scsi_host_template qedi_host_template;
extern struct iscsi_transport qedi_iscsi_transport;
extern const struct qed_iscsi_ops *qedi_ops;
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index b9f79d36142d..4cc474364c50 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -833,7 +833,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
- if (do_not_recover) {
+ if (qedi_do_not_recover) {
ret = -ENOMEM;
return ERR_PTR(ret);
}
@@ -957,7 +957,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
struct qedi_endpoint *qedi_ep;
int ret = 0;
- if (do_not_recover)
+ if (qedi_do_not_recover)
return 1;
qedi_ep = ep->dd_data;
@@ -1025,7 +1025,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
}
if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
- if (do_not_recover) {
+ if (qedi_do_not_recover) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Do not recover cid=0x%x\n",
qedi_ep->iscsi_cid);
@@ -1039,7 +1039,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
}
}
- if (do_not_recover)
+ if (qedi_do_not_recover)
goto ep_exit_recover;
switch (qedi_ep->state) {
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5eda21d903e9..8e3d92807cb8 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1805,7 +1805,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
*/
qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
- qedi_setup_int(qedi);
+ rc = qedi_setup_int(qedi);
if (rc)
goto stop_iscsi_func;
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 67c0d5aa3212..de952935b5d2 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -3,6 +3,7 @@ config SCSI_QLA_FC
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
select FW_LOADER
+ select BTREE
---help---
This qla2xxx driver supports all QLogic Fibre Channel
PCI and PCIe host adapters.
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index f610103994af..435ff7fd6384 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
- BUG_ON(atomic_read(&vha->vref_count));
-
qla2x00_free_fcports(vha);
mutex_lock(&ha->vport_lock);
@@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
- if (vha->qpair->vp_idx == vha->vp_idx) {
+ if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
"Queue Pair delete failed.\n");
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 21d9fb7fc887..51b4179469d1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
"%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
ql_dbg(level, vha, id,
"----- -----------------------------------------------\n");
- for (cnt = 0; cnt < size; cnt++, buf++) {
- if (cnt % 16 == 0)
- ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);
- printk(" %02x", *buf);
- if (cnt % 16 == 15)
- printk("\n");
+ for (cnt = 0; cnt < size; cnt += 16) {
+ ql_dbg(level, vha, id, "%04x: ", cnt);
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
+ buf + cnt, min(16U, size - cnt), false);
}
- if (cnt % 16 != 0)
- printk("\n");
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index e1fc4e66966a..c6bffe929fe7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_tgt 0x00004000 /* Target mode */
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
+#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 625d438e3cce..ae119018dfaa 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include <linux/aer.h>
#include <linux/mutex.h>
+#include <linux/btree.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -395,11 +396,15 @@ struct srb_iocb {
struct completion comp;
} abt;
struct ct_arg ctarg;
+#define MAX_IOCB_MB_REG 28
+#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
struct {
- __le16 in_mb[28]; /* fr fw */
- __le16 out_mb[28]; /* to fw */
+ __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
+ __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
void *out, *in;
dma_addr_t out_dma, in_dma;
+ struct completion comp;
+ int rc;
} mbx;
struct {
struct imm_ntfy_from_isp *ntfy;
@@ -437,7 +442,7 @@ typedef struct srb {
uint32_t handle;
uint16_t flags;
uint16_t type;
- char *name;
+ const char *name;
int iocbs;
struct qla_qpair *qpair;
u32 gen1; /* scratch */
@@ -2300,6 +2305,8 @@ typedef struct fc_port {
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
enum login_state fw_login_state;
+ unsigned long plogi_nack_done_deadline;
+
u32 login_gen, last_login_gen;
u32 rscn_gen, last_rscn_gen;
u32 chip_reset;
@@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx {
uint32_t gold_fw_version;
};
+struct qla_dif_statistics {
+ uint64_t dif_input_bytes;
+ uint64_t dif_output_bytes;
+ uint64_t dif_input_requests;
+ uint64_t dif_output_requests;
+ uint32_t dif_guard_err;
+ uint32_t dif_ref_tag_err;
+ uint32_t dif_app_tag_err;
+};
+
struct qla_statistics {
uint32_t total_isp_aborts;
uint64_t input_bytes;
@@ -3118,6 +3135,8 @@ struct qla_statistics {
uint32_t stat_max_pend_cmds;
uint32_t stat_max_qfull_cmds_alloc;
uint32_t stat_max_qfull_cmds_dropped;
+
+ struct qla_dif_statistics qla_dif_stats;
};
struct bidi_statistics {
@@ -3125,6 +3144,16 @@ struct bidi_statistics {
unsigned long long transfer_bytes;
};
+struct qla_tc_param {
+ struct scsi_qla_host *vha;
+ uint32_t blk_sz;
+ uint32_t bufflen;
+ struct scatterlist *sg;
+ struct scatterlist *prot_sg;
+ struct crc_context *ctx;
+ uint8_t *ctx_dsd_alloced;
+};
+
/* Multi queue support */
#define MBC_INITIALIZE_MULTIQ 0x1f
#define QLA_QUE_PAGE 0X1000
@@ -3272,6 +3301,8 @@ struct qlt_hw_data {
uint8_t tgt_node_name[WWN_SIZE];
struct dentry *dfs_tgt_sess;
+ struct dentry *dfs_tgt_port_database;
+
struct list_head q_full_list;
uint32_t num_pend_cmds;
uint32_t num_qfull_cmds_alloc;
@@ -3281,6 +3312,7 @@ struct qlt_hw_data {
spinlock_t sess_lock;
int rspq_vector_cpuid;
spinlock_t atio_lock ____cacheline_aligned;
+ struct btree_head32 host_map;
};
#define MAX_QFULL_CMDS_ALLOC 8192
@@ -3290,6 +3322,10 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+#define QLA_EARLY_LINKUP(_ha) \
+ ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
+ _ha->flags.fw_started && !_ha->flags.fw_init_done)
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -3339,7 +3375,11 @@ struct qla_hw_data {
uint32_t fawwpn_enabled:1;
uint32_t exlogins_enabled:1;
uint32_t exchoffld_enabled:1;
- /* 35 bits */
+
+ uint32_t lip_ae:1;
+ uint32_t n2n_ae:1;
+ uint32_t fw_started:1;
+ uint32_t fw_init_done:1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -3432,7 +3472,6 @@ struct qla_hw_data {
#define P2P_LOOP 3
uint8_t interrupts_on;
uint32_t isp_abort_cnt;
-
#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
@@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host {
struct list_head vp_fcports; /* list of fcports */
struct list_head work_list;
spinlock_t work_lock;
+ struct work_struct iocb_work;
/* Commonly used flags and state information. */
struct Scsi_Host *host;
@@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host {
/* Count of active session/fcport */
int fcport_count;
wait_queue_head_t fcport_waitQ;
+ wait_queue_head_t vref_waitq;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -4131,14 +4172,17 @@ struct qla2_sgx {
mb(); \
if (__vha->flags.delete_progress) { \
atomic_dec(&__vha->vref_count); \
+ wake_up(&__vha->vref_waitq); \
__bail = 1; \
} else { \
__bail = 0; \
} \
} while (0)
-#define QLA_VHA_MARK_NOT_BUSY(__vha) \
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
atomic_dec(&__vha->vref_count); \
+ wake_up(&__vha->vref_waitq); \
+} while (0) \
#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
atomic_inc(&__qpair->ref_count); \
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index b48cce696bac..989e17b0758c 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
struct fc_port *sess = NULL;
- struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- seq_printf(s, "%s\n",vha->host_str);
+ seq_printf(s, "%s\n", vha->host_str);
if (tgt) {
- seq_printf(s, "Port ID Port Name Handle\n");
+ seq_puts(s, "Port ID Port Name Handle\n");
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
list_for_each_entry(sess, &vha->vp_fcports, list)
@@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
}
-
static const struct file_operations dfs_tgt_sess_ops = {
.open = qla2x00_dfs_tgt_sess_open,
.read = seq_read,
@@ -53,6 +52,78 @@ static const struct file_operations dfs_tgt_sess_ops = {
};
static int
+qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
+{
+ scsi_qla_host_t *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+ struct gid_list_info *gid_list;
+ dma_addr_t gid_list_dma;
+ fc_port_t fc_port;
+ char *id_iter;
+ int rc, i;
+ uint16_t entries, loop_id;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
+ seq_printf(s, "%s\n", vha->host_str);
+ if (tgt) {
+ gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_user, vha, 0x705c,
+ "DMA allocation failed for %u\n",
+ qla2x00_gid_list_size(ha));
+ return 0;
+ }
+
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
+ &entries);
+ if (rc != QLA_SUCCESS)
+ goto out_free_id_list;
+
+ id_iter = (char *)gid_list;
+
+ seq_puts(s, "Port Name Port ID Loop ID\n");
+
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid =
+ (struct gid_list_info *)id_iter;
+ loop_id = le16_to_cpu(gid->loop_id);
+ memset(&fc_port, 0, sizeof(fc_port_t));
+
+ fc_port.loop_id = loop_id;
+
+ rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
+ seq_printf(s, "%8phC %02x%02x%02x %d\n",
+ fc_port.port_name, fc_port.d_id.b.domain,
+ fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
+ fc_port.loop_id);
+ id_iter += ha->gid_list_info_size;
+ }
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
+ }
+
+ return 0;
+}
+
+static int
+qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
+{
+ scsi_qla_host_t *vha = inode->i_private;
+
+ return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
+}
+
+static const struct file_operations dfs_tgt_port_database_ops = {
+ .open = qla2x00_dfs_tgt_port_database_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
@@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
seq_printf(s, "num Q full sent = %lld\n",
vha->tgt_counters.num_q_full_sent);
+ /* DIF stats */
+ seq_printf(s, "DIF Inp Bytes = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_input_bytes);
+ seq_printf(s, "DIF Outp Bytes = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_output_bytes);
+ seq_printf(s, "DIF Inp Req = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_input_requests);
+ seq_printf(s, "DIF Outp Req = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_output_requests);
+ seq_printf(s, "DIF Guard err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_guard_err);
+ seq_printf(s, "DIF Ref tag err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
+ seq_printf(s, "DIF App tag err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_app_tag_err);
return 0;
}
@@ -281,6 +367,14 @@ create_nodes:
goto out;
}
+ ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
+ S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
+ if (!ha->tgt.dfs_tgt_port_database) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to create debugFS tgt_port_database node.\n");
+ goto out;
+ }
+
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
if (!ha->dfs_fce) {
@@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
ha->tgt.dfs_tgt_sess = NULL;
}
+ if (ha->tgt.dfs_tgt_port_database) {
+ debugfs_remove(ha->tgt.dfs_tgt_port_database);
+ ha->tgt.dfs_tgt_port_database = NULL;
+ }
+
if (ha->dfs_fw_resource_cnt) {
debugfs_remove(ha->dfs_fw_resource_cnt);
ha->dfs_fw_resource_cnt = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b3d6441d1d90..5b2451745e9f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_abort_cmd(srb_t *);
/*
* Global Functions in qla_mid.c source file.
@@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
@@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
extern int
qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
- dma_addr_t, uint);
+ dma_addr_t, uint16_t);
extern int qla24xx_abort_command(srb_t *);
extern int qla24xx_async_abort_command(srb_t *);
@@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int
qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
+int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
+ uint16_t *);
+int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
+ struct port_database_24xx *);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
+void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
+void qlt_remove_target_resources(struct qla_hw_data *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 32fb9007f137..f9d2fe7b1ade 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- uint64_t zero = 0;
struct port_database_24xx *pd;
fc_port_t *fcport = sp->fcport;
u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
@@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
- /* Check for logged in state. */
- if (pd->current_login_state != PDS_PRLI_COMPLETE &&
- pd->last_login_state != PDS_PRLI_COMPLETE) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
- "Unable to verify login-state (%x/%x) for "
- "loop_id %x.\n", pd->current_login_state,
- pd->last_login_state, fcport->loop_id);
- rval = QLA_FUNCTION_FAILED;
- goto gpd_error_out;
- }
-
- if (fcport->loop_id == FC_NO_LOOP_ID ||
- (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
- memcmp(fcport->port_name, pd->port_name, 8))) {
- /* We lost the device mid way. */
- rval = QLA_NOT_LOGGED_IN;
- goto gpd_error_out;
- }
-
- /* Names are little-endian. */
- memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
-
- /* Get port_id of device. */
- fcport->d_id.b.domain = pd->port_id[0];
- fcport->d_id.b.area = pd->port_id[1];
- fcport->d_id.b.al_pa = pd->port_id[2];
- fcport->d_id.b.rsvd_1 = 0;
-
- /* If not target must be initiator or unknown type. */
- if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
- fcport->port_type = FCT_INITIATOR;
- else
- fcport->port_type = FCT_TARGET;
-
- /* Passback COS information. */
- fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
- FC_COS_CLASS2 : FC_COS_CLASS3;
-
- if (pd->prli_svc_param_word_3[0] & BIT_7) {
- fcport->flags |= FCF_CONF_COMP_SUPPORTED;
- fcport->conf_compl_supported = 1;
- }
+ rval = __qla24xx_parse_gpdb(vha, fcport, pd);
gpd_error_out:
memset(&ea, 0, sizeof(ea));
@@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->login_retry--;
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return 0;
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ return 0;
+ }
+
/* for pure Target Mode. Login will not be initiated */
if (vha->host->active_mode == MODE_TARGET)
return 0;
@@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
fcport->flags);
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return;
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ return;
+ }
+
if (fcport->flags & FCF_ASYNC_SENT) {
fcport->login_retry++;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
complete(&abt->u.abt.comp);
}
-static int
+int
qla24xx_async_abort_cmd(srb_t *cmd_sp)
{
scsi_qla_host_t *vha = cmd_sp->vha;
@@ -3212,6 +3178,7 @@ next_check:
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
+ ha->flags.fw_started = 1;
}
return (rval);
@@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
uint8_t domain;
char connect_type[22];
struct qla_hw_data *ha = vha->hw;
- unsigned long flags;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ port_id_t id;
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
/* Save Host port and loop ID. */
/* byte order - Big Endian */
- vha->d_id.b.domain = domain;
- vha->d_id.b.area = area;
- vha->d_id.b.al_pa = al_pa;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ id.b.domain = domain;
+ id.b.area = area;
+ id.b.al_pa = al_pa;
+ id.b.rsvd_1 = 0;
+ qlt_update_host_map(vha, id);
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
@@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_READY);
ql_dbg(ql_dbg_disc, vha, 0x2069,
"LOOP READY.\n");
+ ha->flags.fw_init_done = 1;
/*
* Process any ATIO queue entries that came in
@@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
}
}
atomic_dec(&vha->vref_count);
+ wake_up(&vha->vref_waitq);
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
@@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ ha->flags.n2n_ae = 0;
+ ha->flags.lip_ae = 0;
+ ha->current_topology = 0;
+ ha->flags.fw_started = 0;
+ ha->flags.fw_init_done = 0;
ha->chip_reset++;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
return;
if (!ha->fw_major_version)
return;
+ if (!ha->flags.fw_started)
+ return;
ret = qla2x00_stop_firmware(vha);
for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
@@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
"Attempting retry of stop-firmware command.\n");
ret = qla2x00_stop_firmware(vha);
}
+
+ ha->flags.fw_started = 0;
+ ha->flags.fw_init_done = 0;
}
int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 535079280288..ea027f6a7fd4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
struct scatterlist *sg_prot;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
-
uint32_t prot_int; /* protection interval */
uint32_t partial;
struct qla2_sgx sgx;
@@ -966,7 +965,7 @@ alloc_and_fill:
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
@@ -1005,7 +1004,7 @@ alloc_and_fill:
int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
- uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
/* add new list to cmd iocb or last list */
@@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
/* add new list to cmd iocb or last list */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 3c66ea29de27..3203367a4f42 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -708,6 +708,8 @@ skip_rio:
"mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
ha->isp_ops->fw_dump(vha, 1);
+ ha->flags.fw_init_done = 0;
+ ha->flags.fw_started = 0;
if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
@@ -761,6 +763,9 @@ skip_rio:
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
+ ha->flags.lip_ae = 1;
+ ha->flags.n2n_ae = 0;
+
ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
@@ -797,6 +802,10 @@ skip_rio:
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
+ ha->flags.n2n_ae = 0;
+ ha->flags.lip_ae = 0;
+ ha->current_topology = 0;
+
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
@@ -866,6 +875,9 @@ skip_rio:
/* case MBA_DCBX_COMPLETE: */
case MBA_POINT_TO_POINT: /* Point-to-Point */
+ ha->flags.lip_ae = 0;
+ ha->flags.n2n_ae = 1;
+
if (IS_QLA2100(ha))
break;
@@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
ql_log(ql_log_warn, fcport->vha, 0x5034,
- "Async-%s error entry - hdl=%x"
+ "Async-%s error entry - %8phC hdl=%x"
"portid=%02x%02x%02x entry-status=%x.\n",
- type, sp->handle, fcport->d_id.b.domain,
+ type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
logio->entry_status);
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
@@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
- "Async-%s complete - hdl=%x portid=%02x%02x%02x "
- "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
+ "iop0=%x.\n", type, fcport->port_name, sp->handle,
+ fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
@@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED;
break;
+ case LSC_SCODE_CMD_FAILED:
+ if (iop[1] == 0x0606) {
+ /*
+ * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
+ * Target side acked.
+ */
+ data[0] = MBS_COMMAND_COMPLETE;
+ goto logio_done;
+ }
+ data[0] = MBS_COMMAND_ERROR;
+ break;
case LSC_SCODE_NOXCB:
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
@@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
- "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
- "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
+ "iop0=%x iop1=%x.\n", type, fcport->port_name,
+ sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
@@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
abt = &sp->u.iocb_cmd;
- abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
+ abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
sp->done(sp, 0);
}
@@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct sts_entry_24xx *pkt;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online)
+ if (!ha->flags.fw_started)
return;
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 35079f417417..a113ab3592a7 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,28 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+static struct mb_cmd_name {
+ uint16_t cmd;
+ const char *str;
+} mb_str[] = {
+ {MBC_GET_PORT_DATABASE, "GPDB"},
+ {MBC_GET_ID_LIST, "GIDList"},
+ {MBC_GET_LINK_PRIV_STATS, "Stats"},
+};
+
+static const char *mb_to_str(uint16_t cmd)
+{
+ int i;
+ struct mb_cmd_name *e;
+
+ for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
+ e = mb_str + i;
+ if (cmd == e->cmd)
+ return e->str;
+ }
+ return "unknown";
+}
+
static struct rom_cmd {
uint16_t cmd;
} rom_cmds[] = {
@@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int
qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
- dma_addr_t stats_dma, uint options)
+ dma_addr_t stats_dma, uint16_t options)
{
int rval;
mbx_cmd_t mc;
@@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
"Entered %s.\n", __func__);
- mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
- mcp->mb[2] = MSW(stats_dma);
- mcp->mb[3] = LSW(stats_dma);
- mcp->mb[6] = MSW(MSD(stats_dma));
- mcp->mb[7] = LSW(MSD(stats_dma));
- mcp->mb[8] = sizeof(struct link_statistics) / 4;
- mcp->mb[9] = vha->vp_idx;
- mcp->mb[10] = options;
- mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
- mcp->in_mb = MBX_2|MBX_1|MBX_0;
- mcp->tov = MBX_TOV_SECONDS;
- mcp->flags = IOCTL_CMD;
- rval = qla2x00_mailbox_command(vha, mcp);
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
+ mc.mb[2] = MSW(stats_dma);
+ mc.mb[3] = LSW(stats_dma);
+ mc.mb[6] = MSW(MSD(stats_dma));
+ mc.mb[7] = LSW(MSD(stats_dma));
+ mc.mb[8] = sizeof(struct link_statistics) / 4;
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[10] = cpu_to_le16(options);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -3603,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
scsi_qla_host_t *vp = NULL;
unsigned long flags;
int found;
+ port_id_t id;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3610,28 +3631,27 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (rptid_entry->entry_status != 0)
return;
+ id.b.domain = rptid_entry->port_id[2];
+ id.b.area = rptid_entry->port_id[1];
+ id.b.al_pa = rptid_entry->port_id[0];
+ id.b.rsvd_1 = 0;
+
if (rptid_entry->format == 0) {
/* loop */
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
+ ql_dbg(ql_dbg_async, vha, 0x10b7,
"Format 0 : Number of VPs setup %d, number of "
"VPs acquired %d.\n", rptid_entry->vp_setup,
rptid_entry->vp_acquired);
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
+ ql_dbg(ql_dbg_async, vha, 0x10b8,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
- vha->d_id.b.domain = rptid_entry->port_id[2];
- vha->d_id.b.area = rptid_entry->port_id[1];
- vha->d_id.b.al_pa = rptid_entry->port_id[0];
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
/* fabric */
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
+ ql_dbg(ql_dbg_async, vha, 0x10b9,
"Format 1: VP[%d] enabled - status %d - with "
"port id %02x%02x%02x.\n", rptid_entry->vp_idx,
rptid_entry->vp_status,
@@ -3653,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
WWN_SIZE);
}
- vha->d_id.b.domain = rptid_entry->port_id[2];
- vha->d_id.b.area = rptid_entry->port_id[1];
- vha->d_id.b.al_pa = rptid_entry->port_id[0];
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vha, id);
}
fc_host_port_name(vha->host) =
@@ -3694,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (!found)
return;
- vp->d_id.b.domain = rptid_entry->port_id[2];
- vp->d_id.b.area = rptid_entry->port_id[1];
- vp->d_id.b.al_pa = rptid_entry->port_id[0];
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vp, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vp, id);
/*
* Cannot configure here as we are still sitting on the
@@ -5827,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
return rval;
}
+
+static void qla2x00_async_mb_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+
+ sp->u.iocb_cmd.u.mbx.rc = res;
+
+ complete(&sp->u.iocb_cmd.u.mbx.comp);
+ /* don't free sp here. Let the caller do the free */
+}
+
+/*
+ * This mailbox uses the iocb interface to send MB command.
+ * This allows non-critial (non chip setup) command to go
+ * out in parrallel.
+ */
+int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ srb_t *sp;
+ struct srb_iocb *c;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_MB_IOCB;
+ sp->name = mb_to_str(mcp->mb[0]);
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
+
+ c = &sp->u.iocb_cmd;
+ c->timeout = qla2x00_async_iocb_timeout;
+ init_completion(&c->u.mbx.comp);
+
+ sp->done = qla2x00_async_mb_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: %s Failed submission. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n",
+ sp->name, sp->handle);
+
+ wait_for_completion(&c->u.mbx.comp);
+ memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
+
+ rval = c->u.mbx.rc;
+ switch (rval) {
+ case QLA_FUNCTION_TIMEOUT:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ __func__, sp->name, rval);
+ break;
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n",
+ __func__, sp->name);
+ sp->free(sp);
+ break;
+ default:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n",
+ __func__, sp->name, rval);
+ sp->free(sp);
+ break;
+ }
+
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
+}
+
+/*
+ * qla24xx_gpdb_wait
+ * NOTE: Do not call this routine from DPC thread
+ */
+int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ dma_addr_t pd_dma;
+ struct port_database_24xx *pd;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ if (pd == NULL) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate port database structure.\n");
+ goto done_free_sp;
+ }
+ memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_PORT_DATABASE;
+ mc.mb[1] = cpu_to_le16(fcport->loop_id);
+ mc.mb[2] = MSW(pd_dma);
+ mc.mb[3] = LSW(pd_dma);
+ mc.mb[6] = MSW(MSD(pd_dma));
+ mc.mb[7] = LSW(MSD(pd_dma));
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[10] = cpu_to_le16((uint16_t)opt);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: %8phC fail\n", __func__, fcport->port_name);
+ goto done_free_sp;
+ }
+
+ rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n",
+ __func__, fcport->port_name);
+
+done_free_sp:
+ if (pd)
+ dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+done:
+ return rval;
+}
+
+int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct port_database_24xx *pd)
+{
+ int rval = QLA_SUCCESS;
+ uint64_t zero = 0;
+
+ /* Check for logged in state. */
+ if (pd->current_login_state != PDS_PRLI_COMPLETE &&
+ pd->last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "Unable to verify login-state (%x/%x) for "
+ "loop_id %x.\n", pd->current_login_state,
+ pd->last_login_state, fcport->loop_id);
+ rval = QLA_FUNCTION_FAILED;
+ goto gpd_error_out;
+ }
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
+ /* Names are little-endian. */
+ memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+ memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
+
+ /* Get port_id of device. */
+ fcport->d_id.b.domain = pd->port_id[0];
+ fcport->d_id.b.area = pd->port_id[1];
+ fcport->d_id.b.al_pa = pd->port_id[2];
+ fcport->d_id.b.rsvd_1 = 0;
+
+ /* If not target must be initiator or unknown type. */
+ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
+ FC_COS_CLASS2 : FC_COS_CLASS3;
+
+ if (pd->prli_svc_param_word_3[0] & BIT_7) {
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+ fcport->conf_compl_supported = 1;
+ }
+
+gpd_error_out:
+ return rval;
+}
+
+/*
+ * qla24xx_gidlist__wait
+ * NOTE: don't call this routine from DPC thread.
+ */
+int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
+ void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_ID_LIST;
+ mc.mb[2] = MSW(id_list_dma);
+ mc.mb[3] = LSW(id_list_dma);
+ mc.mb[6] = MSW(MSD(id_list_dma));
+ mc.mb[7] = LSW(MSD(id_list_dma));
+ mc.mb[8] = 0;
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: fail\n", __func__);
+ } else {
+ *entries = mc.mb[1];
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: done\n", __func__);
+ }
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c6d6f0d912ff..09a490c98763 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
- spin_lock_irqsave(&ha->vport_slock, flags);
- while (atomic_read(&vha->vref_count)) {
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- msleep(500);
+ wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+ 10*HZ);
- spin_lock_irqsave(&ha->vport_slock, flags);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ if (atomic_read(&vha->vref_count)) {
+ ql_dbg(ql_dbg_vport, vha, 0xfffa,
+ "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+ vha->vref_count = (atomic_t)ATOMIC_INIT(0);
}
list_del(&vha->list);
qlt_update_vp_map(vha, RESET_VP_IDX);
@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vha->vref_count);
+ wake_up(&vha->vref_waitq);
}
i++;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1fed235a1b4a..41d5b09f7326 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2560,6 +2560,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
return atomic_read(&vha->loop_state) == LOOP_READY;
}
+static void qla2x00_iocb_work_fn(struct work_struct *work)
+{
+ struct scsi_qla_host *vha = container_of(work,
+ struct scsi_qla_host, iocb_work);
+ int cnt = 0;
+
+ while (!list_empty(&vha->work_list)) {
+ qla2x00_do_work(vha);
+ cnt++;
+ if (cnt > 10)
+ break;
+ }
+}
+
/*
* PCI driver interface
*/
@@ -3078,6 +3092,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
*/
qla2xxx_wake_dpc(base_vha);
+ INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
@@ -3469,6 +3484,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_free_sysfs_attr(base_vha, true);
fc_remove_host(base_vha->host);
+ qlt_remove_target_resources(ha);
scsi_remove_host(base_vha->host);
@@ -4268,6 +4284,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ);
+ init_waitqueue_head(&vha->vref_waitq);
vha->gnl.size = sizeof(struct get_name_list_extended) *
(ha->max_loop_id + 1);
@@ -4319,7 +4336,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list);
spin_unlock_irqrestore(&vha->work_lock, flags);
- qla2xxx_wake_dpc(vha);
+
+ if (QLA_EARLY_LINKUP(vha->hw))
+ schedule_work(&vha->iocb_work);
+ else
+ qla2xxx_wake_dpc(vha);
return QLA_SUCCESS;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 45f5077684f0..0e03ca2ab3e5 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
fc_port_t *fcport, bool local);
void qlt_unreg_sess(struct fc_port *sess);
+static void qlt_24xx_handle_abts(struct scsi_qla_host *,
+ struct abts_recv_from_24xx *);
+
/*
* Global Variables
*/
@@ -140,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);
+static const char *prot_op_str(u32 prot_op)
+{
+ switch (prot_op) {
+ case TARGET_PROT_NORMAL: return "NORMAL";
+ case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
+ case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
+ case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
+ case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
+ case TARGET_PROT_DIN_PASS: return "DIN_PASS";
+ case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
+ default: return "UNKNOWN";
+ }
+}
+
/* This API intentionally takes dest as a parameter, rather than returning
* int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -170,21 +187,23 @@ static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
uint8_t *d_id)
{
- struct qla_hw_data *ha = vha->hw;
- uint8_t vp_idx;
-
- if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
- return NULL;
+ struct scsi_qla_host *host;
+ uint32_t key = 0;
- if (vha->d_id.b.al_pa == d_id[2])
+ if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
+ (vha->d_id.b.al_pa == d_id[2]))
return vha;
- BUG_ON(ha->tgt.tgt_vp_map == NULL);
- vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
- if (likely(test_bit(vp_idx, ha->vp_idx_map)))
- return ha->tgt.tgt_vp_map[vp_idx].vha;
+ key = (uint32_t)d_id[0] << 16;
+ key |= (uint32_t)d_id[1] << 8;
+ key |= (uint32_t)d_id[2];
- return NULL;
+ host = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (!host)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "Unable to find host %06x\n", key);
+
+ return host;
}
static inline
@@ -389,6 +408,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
(struct abts_recv_from_24xx *)atio;
struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
entry->vp_index);
+ unsigned long flags;
+
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xffff,
"qla_target(%d): Response pkt (ABTS_RECV_24XX) "
@@ -396,9 +417,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
vha->vp_idx, entry->vp_index);
break;
}
- qlt_response_pkt(host, (response_t *)atio);
+ if (!ha_locked)
+ spin_lock_irqsave(&host->hw->hardware_lock, flags);
+ qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
break;
-
}
/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
@@ -554,6 +578,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
sp->fcport->login_gen++;
sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
sp->fcport->logout_on_delete = 1;
+ sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
break;
case SRB_NACK_PRLI:
@@ -613,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
break;
case SRB_NACK_PRLI:
fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ fcport->deleted = 0;
c = "PRLI";
break;
case SRB_NACK_LOGO:
@@ -1215,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
}
/* Get list of logged in devices */
- rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
"qla_target(%d): get_id_list() failed: %x\n",
@@ -1551,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
request_t *pkt;
struct nack_to_isp *nack;
+ if (!ha->flags.fw_started)
+ return;
+
ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
/* Send marker if required */
@@ -2013,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
}
EXPORT_SYMBOL(qlt_free_mcmd);
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then
+ * reacquire
+ */
+void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+ uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
+{
+ struct atio_from_isp *atio = &cmd->atio;
+ struct ctio7_to_24xx *ctio;
+ uint16_t temp;
+
+ ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
+ "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
+ "sense_key=%02x, asc=%02x, ascq=%02x",
+ vha, atio, scsi_status, sense_key, asc, ascq);
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!ctio) {
+ ql_dbg(ql_dbg_async, vha, 0x3067,
+ "qla2x00t(%ld): %s failed: unable to allocate request packet",
+ vha->host_no, __func__);
+ goto out;
+ }
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->handle = QLA_TGT_SKIP_HANDLE;
+ ctio->nport_handle = cmd->sess->loop_id;
+ ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = vha->vp_idx;
+ ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
+ temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ ctio->u.status1.ox_id = cpu_to_le16(temp);
+ ctio->u.status1.scsi_status =
+ cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
+ ctio->u.status1.response_len = cpu_to_le16(18);
+ ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
+
+ if (ctio->u.status1.residual != 0)
+ ctio->u.status1.scsi_status |=
+ cpu_to_le16(SS_RESIDUAL_UNDER);
+
+ /* Response code and sense key */
+ put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
+ (&ctio->u.status1.sense_data)[0]);
+ /* Additional sense length */
+ put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
+ /* ASC and ASCQ */
+ put_unaligned_le32(((asc << 24) | (ascq << 16)),
+ (&ctio->u.status1.sense_data)[3]);
+
+ /* Memory Barrier */
+ wmb();
+
+ qla2x00_start_iocbs(vha, vha->req);
+out:
+ return;
+}
+
/* callback from target fabric module code */
void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
{
@@ -2261,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
*/
return -EAGAIN;
} else
- ha->tgt.cmds[h-1] = prm->cmd;
+ ha->tgt.cmds[h - 1] = prm->cmd;
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = prm->cmd->loop_id;
@@ -2391,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
return cmd->bufflen > 0;
}
+static void qlt_print_dif_err(struct qla_tgt_prm *prm)
+{
+ struct qla_tgt_cmd *cmd;
+ struct scsi_qla_host *vha;
+
+ /* asc 0x10=dif error */
+ if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
+ cmd = prm->cmd;
+ vha = cmd->vha;
+ /* ASCQ */
+ switch (prm->sense_buffer[13]) {
+ case 1:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ case 2:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ case 3:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ }
+ ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16);
+ }
+}
+
/*
* Called without ha->hardware_lock held
*/
@@ -2512,18 +2649,9 @@ skip_explict_conf:
for (i = 0; i < prm->sense_buffer_len/4; i++)
((uint32_t *)ctio->u.status1.sense_data)[i] =
cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
-#if 0
- if (unlikely((prm->sense_buffer_len % 4) != 0)) {
- static int q;
- if (q < 10) {
- ql_dbg(ql_dbg_tgt, vha, 0xe04f,
- "qla_target(%d): %d bytes of sense "
- "lost", prm->tgt->ha->vp_idx,
- prm->sense_buffer_len % 4);
- q++;
- }
- }
-#endif
+
+ qlt_print_dif_err(prm);
+
} else {
ctio->u.status1.flags &=
~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
@@ -2537,19 +2665,9 @@ skip_explict_conf:
/* Sense with len > 24, is it possible ??? */
}
-
-
-/* diff */
static inline int
qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
{
- /*
- * Uncomment when corresponding SCSI changes are done.
- *
- if (!sp->cmd->prot_chk)
- return 0;
- *
- */
switch (se_cmd->prot_op) {
case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_STRIP:
@@ -2570,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
return 0;
}
+static inline int
+qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
+{
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_INSERT:
+ case TARGET_PROT_DIN_STRIP:
+ case TARGET_PROT_DOUT_STRIP:
+ case TARGET_PROT_DIN_PASS:
+ case TARGET_PROT_DOUT_PASS:
+ return 1;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
/*
- * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
- *
+ * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
*/
-static inline void
-qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
+static void
+qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
+ uint16_t *pfw_prot_opts)
{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+ scsi_qla_host_t *vha = cmd->tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t t32 = 0;
- /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
+ /*
+ * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
* have been immplemented by TCM, before AppTag is avail.
* Look for modesense_handlers[]
*/
@@ -2587,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
ctx->app_tag_mask[0] = 0x0;
ctx->app_tag_mask[1] = 0x0;
+ if (IS_PI_UNINIT_CAPABLE(ha)) {
+ if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+ (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+ *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
+ else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+ *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+ }
+
+ t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
+
switch (se_cmd->prot_type) {
case TARGET_DIF_TYPE0_PROT:
/*
- * No check for ql2xenablehba_err_chk, as it would be an
- * I/O error if hba tag generation is not done.
+ * No check for ql2xenablehba_err_chk, as it
+ * would be an I/O error if hba tag generation
+ * is not done.
*/
ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
/* enable ALL bytes of the ref tag */
ctx->ref_tag_mask[0] = 0xff;
ctx->ref_tag_mask[1] = 0xff;
ctx->ref_tag_mask[2] = 0xff;
ctx->ref_tag_mask[3] = 0xff;
break;
- /*
- * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
- * 16 bit app tag.
- */
case TARGET_DIF_TYPE1_PROT:
- ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
- /* enable ALL bytes of the ref tag */
- ctx->ref_tag_mask[0] = 0xff;
- ctx->ref_tag_mask[1] = 0xff;
- ctx->ref_tag_mask[2] = 0xff;
- ctx->ref_tag_mask[3] = 0xff;
- break;
- /*
- * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
- * match LBA in CDB + N
- */
+ /*
+ * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
+ * REF tag, and 16 bit app tag.
+ */
+ ctx->ref_tag = cpu_to_le32(lba);
+ if (!qla_tgt_ref_mask_check(se_cmd) ||
+ !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ break;
+ }
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
case TARGET_DIF_TYPE2_PROT:
- ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
- /* enable ALL bytes of the ref tag */
- ctx->ref_tag_mask[0] = 0xff;
- ctx->ref_tag_mask[1] = 0xff;
- ctx->ref_tag_mask[2] = 0xff;
- ctx->ref_tag_mask[3] = 0xff;
- break;
-
- /* For Type 3 protection: 16 bit GUARD only */
+ /*
+ * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
+ * tag has to match LBA in CDB + N
+ */
+ ctx->ref_tag = cpu_to_le32(lba);
+ if (!qla_tgt_ref_mask_check(se_cmd) ||
+ !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ break;
+ }
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
case TARGET_DIF_TYPE3_PROT:
- ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
- ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
- break;
+ /* For TYPE 3 protection: 16 bit GUARD only */
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
+ ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
+ break;
}
}
-
static inline int
qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
{
@@ -2664,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
struct se_cmd *se_cmd = &cmd->se_cmd;
uint32_t h;
struct atio_from_isp *atio = &prm->cmd->atio;
+ struct qla_tc_param tc;
uint16_t t16;
ha = vha->hw;
@@ -2689,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
case TARGET_PROT_DIN_INSERT:
case TARGET_PROT_DOUT_STRIP:
transfer_length = data_bytes;
- data_bytes += dif_bytes;
+ if (cmd->prot_sg_cnt)
+ data_bytes += dif_bytes;
break;
-
case TARGET_PROT_DIN_STRIP:
case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_PASS:
case TARGET_PROT_DOUT_PASS:
transfer_length = data_bytes + dif_bytes;
break;
-
default:
BUG();
break;
@@ -2734,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
break;
}
-
/* ---- PKT ---- */
/* Update entry type to indicate Command Type CRC_2 IOCB */
pkt->entry_type = CTIO_CRC2;
@@ -2752,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
} else
ha->tgt.cmds[h-1] = prm->cmd;
-
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
- pkt->nport_handle = prm->cmd->loop_id;
+ pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
@@ -2775,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
-
pkt->dseg_count = prm->tot_dsds;
/* Fibre channel byte count */
pkt->transfer_length = cpu_to_le32(transfer_length);
-
/* ----- CRC context -------- */
/* Allocate CRC context from global pool */
@@ -2800,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
/* Set handle */
crc_ctx_pkt->handle = pkt->handle;
- qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
+ qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
-
if (!bundling) {
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
} else {
@@ -2827,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
crc_ctx_pkt->guard_seed = cpu_to_le16(0);
+ memset((uint8_t *)&tc, 0 , sizeof(tc));
+ tc.vha = vha;
+ tc.blk_sz = cmd->blk_sz;
+ tc.bufflen = cmd->bufflen;
+ tc.sg = cmd->sg;
+ tc.prot_sg = cmd->prot_sg;
+ tc.ctx = crc_ctx_pkt;
+ tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
/* Walks data segments */
pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
if (!bundling && prm->prot_seg_cnt) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
- prm->tot_dsds, cmd))
+ prm->tot_dsds, &tc))
goto crc_queuing_error;
} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
- (prm->tot_dsds - prm->prot_seg_cnt), cmd))
+ (prm->tot_dsds - prm->prot_seg_cnt), &tc))
goto crc_queuing_error;
if (bundling && prm->prot_seg_cnt) {
@@ -2845,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
- prm->prot_seg_cnt, cmd))
+ prm->prot_seg_cnt, &tc))
goto crc_queuing_error;
}
return QLA_SUCCESS;
crc_queuing_error:
/* Cleanup will be performed by the caller */
+ vha->hw->tgt.cmds[h - 1] = NULL;
return QLA_FUNCTION_FAILED;
}
-
/*
* Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
* QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -2906,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
else
vha->tgt_counters.core_qla_que_buf++;
- if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
+ if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
/*
* Either the port is not online or this request was from
* previous life, just abort the processing.
@@ -3047,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
+ if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
/*
* Either the port is not online or this request was from
@@ -3104,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
/*
- * Checks the guard or meta-data for the type of error
- * detected by the HBA.
+ * it is assumed either hardware_lock or qpair lock is held.
*/
-static inline int
+static void
qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
- struct ctio_crc_from_fw *sts)
+ struct ctio_crc_from_fw *sts)
{
uint8_t *ap = &sts->actual_dif[0];
uint8_t *ep = &sts->expected_dif[0];
- uint32_t e_ref_tag, a_ref_tag;
- uint16_t e_app_tag, a_app_tag;
- uint16_t e_guard, a_guard;
uint64_t lba = cmd->se_cmd.t_task_lba;
+ uint8_t scsi_status, sense_key, asc, ascq;
+ unsigned long flags;
- a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
- a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
- a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
-
- e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
- e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
- e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
-
- ql_dbg(ql_dbg_tgt, vha, 0xe075,
- "iocb(s) %p Returned STATUS.\n", sts);
-
- ql_dbg(ql_dbg_tgt, vha, 0xf075,
- "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
-
- /*
- * Ignore sector if:
- * For type 3: ref & app tag is all 'f's
- * For type 0,1,2: app tag is all 'f's
- */
- if ((a_app_tag == 0xffff) &&
- ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
- (a_ref_tag == 0xffffffff))) {
- uint32_t blocks_done;
-
- /* 2TB boundary case covered automatically with this */
- blocks_done = e_ref_tag - (uint32_t)lba + 1;
- cmd->se_cmd.bad_sector = e_ref_tag;
- cmd->se_cmd.pi_err = 0;
- ql_dbg(ql_dbg_tgt, vha, 0xf074,
- "need to return scsi good\n");
-
- /* Update protection tag */
- if (cmd->prot_sg_cnt) {
- uint32_t i, k = 0, num_ent;
- struct scatterlist *sg, *sgl;
-
-
- sgl = cmd->prot_sg;
-
- /* Patch the corresponding protection tags */
- for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
- num_ent = sg_dma_len(sg) / 8;
- if (k + num_ent < blocks_done) {
- k += num_ent;
- continue;
- }
- k = blocks_done;
- break;
- }
+ cmd->trc_flags |= TRC_DIF_ERR;
- if (k != blocks_done) {
- ql_log(ql_log_warn, vha, 0xf076,
- "unexpected tag values tag:lba=%u:%llu)\n",
- e_ref_tag, (unsigned long long)lba);
- goto out;
- }
+ cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
+ cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
+ cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
-#if 0
- struct sd_dif_tuple *spt;
- /* TODO:
- * This section came from initiator. Is it valid here?
- * should ulp be override with actual val???
- */
- spt = page_address(sg_page(sg)) + sg->offset;
- spt += j;
+ cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
+ cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
+ cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
- spt->app_tag = 0xffff;
- if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
- spt->ref_tag = 0xffffffff;
-#endif
- }
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
+ "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
- return 0;
- }
+ scsi_status = sense_key = asc = ascq = 0;
- /* check guard */
- if (e_guard != a_guard) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
- cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
- ql_log(ql_log_warn, vha, 0xe076,
- "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
- goto out;
+ /* check appl tag */
+ if (cmd->e_app_tag != cmd->a_app_tag) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+ cmd->dif_err_code = DIF_ERR_APP;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x2;
}
/* check ref tag */
- if (e_ref_tag != a_ref_tag) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
- cmd->se_cmd.bad_sector = e_ref_tag;
-
- ql_log(ql_log_warn, vha, 0xe077,
- "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
+ if (cmd->e_ref_tag != cmd->a_ref_tag) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard[%x|%x] cmd=%p ox_id[%04x] ",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+ cmd->dif_err_code = DIF_ERR_REF;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x3;
goto out;
}
- /* check appl tag */
- if (e_app_tag != a_app_tag) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
- cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
- ql_log(ql_log_warn, vha, 0xe078,
- "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
- goto out;
+ /* check guard */
+ if (cmd->e_guard != cmd->a_guard) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+ cmd->dif_err_code = DIF_ERR_GRD;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x1;
}
out:
- return 1;
-}
+ switch (cmd->state) {
+ case QLA_TGT_STATE_NEED_DATA:
+ /* handle_data will load DIF error code */
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ vha->hw->tgt.tgt_ops->handle_data(cmd);
+ break;
+ default:
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->aborted) {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ break;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
+ /* assume scsi status gets out on the wire.
+ * Will not wait for completion.
+ */
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ break;
+ }
+}
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
@@ -3251,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
"Sending TERM ELS CTIO (ha=%p)\n", ha);
- pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
if (pkt == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe080,
"qla_target(%d): %s failed: unable to allocate "
@@ -3543,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
{
int term = 0;
+ if (cmd->se_cmd.prot_op)
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x] op %#x/%s",
+ cmd->lba, cmd->lba,
+ cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr,
+ cmd->se_cmd.prot_op,
+ prot_op_str(cmd->se_cmd.prot_op));
+
if (ctio != NULL) {
struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
term = !(c->flags &
@@ -3760,32 +3895,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
struct ctio_crc_from_fw *crc =
(struct ctio_crc_from_fw *)ctio;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
- "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
+ "qla_target(%d): CTIO with DIF_ERROR status %x "
+ "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
+ "expect_dif[0x%llx]\n",
vha->vp_idx, status, cmd->state, se_cmd,
*((u64 *)&crc->actual_dif[0]),
*((u64 *)&crc->expected_dif[0]));
- if (qlt_handle_dif_error(vha, cmd, ctio)) {
- if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
- /* scsi Write/xfer rdy complete */
- goto skip_term;
- } else {
- /* scsi read/xmit respond complete
- * call handle dif to send scsi status
- * rather than terminate exchange.
- */
- cmd->state = QLA_TGT_STATE_PROCESSED;
- ha->tgt.tgt_ops->handle_dif_err(cmd);
- return;
- }
- } else {
- /* Need to generate a SCSI good completion.
- * because FW did not send scsi status.
- */
- status = 0;
- goto skip_term;
- }
- break;
+ qlt_handle_dif_error(vha, cmd, ctio);
+ return;
}
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
@@ -3808,7 +3926,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
return;
}
}
-skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
cmd->trc_flags |= TRC_CTIO_DONE;
@@ -4584,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (sess != NULL) {
- if (sess->fw_login_state == DSC_LS_PLOGI_PEND) {
+ if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
+ sess->fw_login_state != DSC_LS_PLOGI_COMP) {
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4623,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
/* Make session global (not used in fabric mode) */
if (ha->current_topology != ISP_CFG_F) {
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ if (sess) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post nack\n",
+ __func__, __LINE__, sess->port_name);
+ qla24xx_post_nack_work(vha, sess, iocb,
+ SRB_NACK_PRLI);
+ res = 0;
+ } else {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
} else {
if (sess) {
ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post nack\n",
- __func__, __LINE__, sess->port_name);
-
+ "%s %d %8phC post nack\n",
+ __func__, __LINE__, sess->port_name);
qla24xx_post_nack_work(vha, sess, iocb,
SRB_NACK_PRLI);
res = 0;
@@ -4639,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
break;
-
case ELS_TPRLO:
if (le16_to_cpu(iocb->u.isp24.flags) &
NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
@@ -5079,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
- struct atio_from_isp *atio)
+ struct atio_from_isp *atio, bool ha_locked)
{
struct qla_hw_data *ha = vha->hw;
uint16_t status;
+ unsigned long flags;
if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
return 0;
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
status = temp_sam_status;
qlt_send_busy(vha, atio, status);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
return 1;
}
@@ -5103,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
unsigned long flags;
if (unlikely(tgt == NULL)) {
- ql_dbg(ql_dbg_io, vha, 0x3064,
+ ql_dbg(ql_dbg_tgt, vha, 0x3064,
"ATIO pkt, but no tgt (ha %p)", ha);
return;
}
@@ -5133,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
- rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
if (rc != 0) {
tgt->atio_irq_cmd_count--;
return;
@@ -5256,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
break;
}
- rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
if (rc != 0) {
tgt->irq_cmd_count--;
return;
@@ -5531,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
fcport->loop_id = loop_id;
- rc = qla2x00_get_port_database(vha, fcport, 0);
+ rc = qla24xx_gpdb_wait(vha, fcport, 0);
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
"qla_target(%d): Failed to retrieve fcport "
@@ -5713,30 +5844,23 @@ static void qlt_abort_work(struct qla_tgt *tgt,
}
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- if (tgt->tgt_stop)
- goto out_term;
-
rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
if (rc != 0)
goto out_term;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
return;
out_term2:
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
out_term:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
}
static void qlt_tmr_work(struct qla_tgt *tgt,
@@ -5756,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (tgt->tgt_stop)
- goto out_term;
+ goto out_term2;
s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -5768,11 +5892,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (!sess)
- goto out_term;
+ goto out_term2;
} else {
if (sess->deleted) {
sess = NULL;
- goto out_term;
+ goto out_term2;
}
if (!kref_get_unless_zero(&sess->sess_kref)) {
@@ -5780,7 +5904,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
"%s: kref_get fail %8phC\n",
__func__, sess->port_name);
sess = NULL;
- goto out_term;
+ goto out_term2;
}
}
@@ -5790,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
- if (rc != 0)
- goto out_term;
-
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ if (rc != 0)
+ goto out_term;
return;
+out_term2:
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
out_term:
qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
static void qlt_sess_work_fn(struct work_struct *work)
@@ -5893,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
- if (base_vha->fc_vport)
- return 0;
-
mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
mutex_unlock(&qla_tgt_mutex);
+ if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
+ ha->tgt.tgt_ops->add_target(base_vha);
+
return 0;
}
@@ -5928,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
return 0;
}
+void qlt_remove_target_resources(struct qla_hw_data *ha)
+{
+ struct scsi_qla_host *node;
+ u32 key = 0;
+
+ btree_for_each_safe32(&ha->tgt.host_map, key, node)
+ btree_remove32(&ha->tgt.host_map, key);
+
+ btree_destroy32(&ha->tgt.host_map);
+}
+
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
unsigned char *b)
{
@@ -6234,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
struct atio_from_isp *pkt;
int cnt, i;
- if (!vha->flags.online)
+ if (!ha->flags.fw_started)
return;
while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
@@ -6581,6 +6718,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
+ int rc;
+
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6600,6 +6739,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
qlt_unknown_atio_work_fn);
qlt_clear_mode(base_vha);
+
+ rc = btree_init32(&ha->tgt.host_map);
+ if (rc)
+ ql_log(ql_log_info, base_vha, 0xffff,
+ "Unable to initialize ha->host_map btree\n");
+
+ qlt_update_vp_map(base_vha, SET_VP_IDX);
}
irqreturn_t
@@ -6642,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ kfree(op);
}
void
@@ -6706,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha)
void
qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
{
+ void *slot;
+ u32 key;
+ int rc;
+
if (!QLA_TGT_MODE_ENABLED())
return;
+ key = vha->d_id.b24;
+
switch (cmd) {
case SET_VP_IDX:
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
break;
case SET_AL_PA:
- vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+ slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (!slot) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "Save vha in host_map %p %06x\n", vha, key);
+ rc = btree_insert32(&vha->hw->tgt.host_map,
+ key, vha, GFP_ATOMIC);
+ if (rc)
+ ql_log(ql_log_info, vha, 0xffff,
+ "Unable to insert s_id into host_map: %06x\n",
+ key);
+ return;
+ }
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "replace existing vha in host_map %p %06x\n", vha, key);
+ btree_update32(&vha->hw->tgt.host_map, key, vha);
break;
case RESET_VP_IDX:
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
break;
case RESET_AL_PA:
- vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "clear vha in host_map %p %06x\n", vha, key);
+ slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (slot)
+ btree_remove32(&vha->hw->tgt.host_map, key);
+ vha->d_id.b24 = 0;
break;
}
}
+void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
+{
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!vha->d_id.b24) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ vha->d_id = id;
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ } else if (vha->d_id.b24 != id.b24) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, RESET_AL_PA);
+ vha->d_id = id;
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ }
+}
+
static int __init qlt_parse_ini_mode(void)
{
if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index a7f90dcaae37..d64420251194 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
}
+static inline int get_datalen_for_atio(struct atio_from_isp *atio)
+{
+ int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
+
+ return (be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+}
+
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
/*
@@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
- void (*handle_dif_err)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
@@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl {
void (*clear_nacl_from_fcport_map)(struct fc_port *);
void (*put_sess)(struct fc_port *);
void (*shutdown_sess)(struct fc_port *);
+ int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
+ int (*chk_dif_tags)(uint32_t tag);
+ void (*add_target)(struct scsi_qla_host *);
};
int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -720,8 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_ABORT_ALL 0xFFFE
#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
#define QLA_TGT_NEXUS_LOSS 0xFFFC
-#define QLA_TGT_ABTS 0xFFFB
-#define QLA_TGT_2G_ABORT_TASK 0xFFFA
+#define QLA_TGT_ABTS 0xFFFB
+#define QLA_TGT_2G_ABORT_TASK 0xFFFA
/* Notify Acknowledge flags */
#define NOTIFY_ACK_RES_COUNT BIT_8
@@ -845,6 +855,7 @@ enum trace_flags {
TRC_CMD_FREE = BIT_17,
TRC_DATA_IN = BIT_18,
TRC_ABORT = BIT_19,
+ TRC_DIF_ERR = BIT_20,
};
struct qla_tgt_cmd {
@@ -862,7 +873,6 @@ struct qla_tgt_cmd {
unsigned int sg_mapped:1;
unsigned int free_sg:1;
unsigned int write_data_transferred:1;
- unsigned int ctx_dsd_alloced:1;
unsigned int q_full:1;
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
@@ -885,11 +895,25 @@ struct qla_tgt_cmd {
struct list_head cmd_list;
struct atio_from_isp atio;
- /* t10dif */
+
+ uint8_t ctx_dsd_alloced;
+
+ /* T10-DIF */
+#define DIF_ERR_NONE 0
+#define DIF_ERR_GRD 1
+#define DIF_ERR_REF 2
+#define DIF_ERR_APP 3
+ int8_t dif_err_code;
struct scatterlist *prot_sg;
uint32_t prot_sg_cnt;
- uint32_t blk_sz;
+ uint32_t blk_sz, num_blks;
+ uint8_t scsi_status, sense_key, asc, ascq;
+
struct crc_context *ctx;
+ uint8_t *cdb;
+ uint64_t lba;
+ uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
+ uint32_t a_ref_tag, e_ref_tag;
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
@@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
extern void qlt_logo_completion_handler(fc_port_t *, int);
extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
+void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
+ uint8_t, uint8_t, uint8_t);
+
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3cb1964b7786..45bc84e8e3bf 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.38-k"
+#define QLA2XXX_VERSION "9.00.00.00-k"
-#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 7
+#define QLA_DRIVER_MAJOR_VER 9
+#define QLA_DRIVER_MINOR_VER 0
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8e8ab0fa9672..7443e4efa3ae 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
+ switch (cmd->dif_err_code) {
+ case DIF_ERR_GRD:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ break;
+ case DIF_ERR_REF:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ break;
+ case DIF_ERR_APP:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+ break;
+ case DIF_ERR_NONE:
+ default:
+ break;
+ }
+
if (cmd->se_cmd.pi_err)
transport_generic_request_failure(&cmd->se_cmd,
cmd->se_cmd.pi_err);
@@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
}
-static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
+static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
{
- struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
-
- /* take an extra kref to prevent cmd free too early.
- * need to wait for SCSI status/check condition to
- * finish responding generate by transport_generic_request_failure.
- */
- kref_get(&cmd->se_cmd.cmd_kref);
- transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
+ return 0;
}
-/*
- * Called from qla_target.c:qlt_do_ctio_completion()
- */
-static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
+static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
+ uint16_t *pfw_prot_opts)
{
- INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
- queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+ *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
+ if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG))
+ *pfw_prot_opts |= PO_DIS_APP_TAG_VALD;
+
+ return 0;
}
/*
@@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
- .handle_dif_err = tcm_qla2xxx_handle_dif_err,
.handle_tmr = tcm_qla2xxx_handle_tmr,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
@@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
.put_sess = tcm_qla2xxx_put_sess,
.shutdown_sess = tcm_qla2xxx_shutdown_sess,
+ .get_dif_tags = tcm_qla2xxx_dif_tags,
+ .chk_dif_tags = tcm_qla2xxx_chk_dif_tags,
};
static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ba2286652ff6..19125d72f322 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2932,6 +2932,8 @@ EXPORT_SYMBOL(scsi_target_resume);
/**
* scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
* @sdev: device to block
+ * @wait: Whether or not to wait until ongoing .queuecommand() /
+ * .queue_rq() calls have finished.
*
* Block request made by scsi lld's to temporarily stop all
* scsi commands on the specified device. May sleep.
@@ -2949,7 +2951,7 @@ EXPORT_SYMBOL(scsi_target_resume);
* remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
int
-scsi_internal_device_block(struct scsi_device *sdev)
+scsi_internal_device_block(struct scsi_device *sdev, bool wait)
{
struct request_queue *q = sdev->request_queue;
unsigned long flags;
@@ -2969,12 +2971,16 @@ scsi_internal_device_block(struct scsi_device *sdev)
* request queue.
*/
if (q->mq_ops) {
- blk_mq_quiesce_queue(q);
+ if (wait)
+ blk_mq_quiesce_queue(q);
+ else
+ blk_mq_stop_hw_queues(q);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- scsi_wait_for_queuecommand(sdev);
+ if (wait)
+ scsi_wait_for_queuecommand(sdev);
}
return 0;
@@ -3036,7 +3042,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
static void
device_block(struct scsi_device *sdev, void *data)
{
- scsi_internal_device_block(sdev);
+ scsi_internal_device_block(sdev, true);
}
static int
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 99bfc985e190..f11bd102d6d5 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -188,8 +188,5 @@ static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
*/
#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
-extern int scsi_internal_device_block(struct scsi_device *sdev);
-extern int scsi_internal_device_unblock(struct scsi_device *sdev,
- enum scsi_device_state new_state);
#endif /* _SCSI_PRIV_H */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c7839f6c35cc..fcfeddc79331 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1783,6 +1783,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
{
int result = SCpnt->result;
unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
+ unsigned int sector_size = SCpnt->device->sector_size;
+ unsigned int resid;
struct scsi_sense_hdr sshdr;
struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
struct request *req = SCpnt->request;
@@ -1813,6 +1815,21 @@ static int sd_done(struct scsi_cmnd *SCpnt)
scsi_set_resid(SCpnt, blk_rq_bytes(req));
}
break;
+ default:
+ /*
+ * In case of bogus fw or device, we could end up having
+ * an unaligned partial completion. Check this here and force
+ * alignment.
+ */
+ resid = scsi_get_resid(SCpnt);
+ if (resid & (sector_size - 1)) {
+ sd_printk(KERN_INFO, sdkp,
+ "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
+ resid, sector_size);
+ resid = min(scsi_bufflen(SCpnt),
+ round_up(resid, sector_size));
+ scsi_set_resid(SCpnt, resid);
+ }
}
if (result) {
@@ -3075,23 +3092,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
put_device(&sdkp->dev);
}
-struct sd_devt {
- int idx;
- struct disk_devt disk_devt;
-};
-
-static void sd_devt_release(struct disk_devt *disk_devt)
-{
- struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt,
- disk_devt);
-
- spin_lock(&sd_index_lock);
- ida_remove(&sd_index_ida, sd_devt->idx);
- spin_unlock(&sd_index_lock);
-
- kfree(sd_devt);
-}
-
/**
* sd_probe - called during driver initialization and whenever a
* new scsi device is attached to the system. It is called once
@@ -3113,7 +3113,6 @@ static void sd_devt_release(struct disk_devt *disk_devt)
static int sd_probe(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
- struct sd_devt *sd_devt;
struct scsi_disk *sdkp;
struct gendisk *gd;
int index;
@@ -3139,13 +3138,9 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
- sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL);
- if (!sd_devt)
- goto out_free;
-
gd = alloc_disk(SD_MINORS);
if (!gd)
- goto out_free_devt;
+ goto out_free;
do {
if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
@@ -3161,11 +3156,6 @@ static int sd_probe(struct device *dev)
goto out_put;
}
- atomic_set(&sd_devt->disk_devt.count, 1);
- sd_devt->disk_devt.release = sd_devt_release;
- sd_devt->idx = index;
- gd->disk_devt = &sd_devt->disk_devt;
-
error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
if (error) {
sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
@@ -3205,12 +3195,11 @@ static int sd_probe(struct device *dev)
return 0;
out_free_index:
- put_disk_devt(&sd_devt->disk_devt);
- sd_devt = NULL;
+ spin_lock(&sd_index_lock);
+ ida_remove(&sd_index_ida, index);
+ spin_unlock(&sd_index_lock);
out_put:
put_disk(gd);
- out_free_devt:
- kfree(sd_devt);
out_free:
kfree(sdkp);
out:
@@ -3271,7 +3260,10 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
- put_disk_devt(disk->disk_devt);
+ spin_lock(&sd_index_lock);
+ ida_remove(&sd_index_ida, sdkp->index);
+ spin_unlock(&sd_index_lock);
+
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 638e5f427c90..016639d7fef1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -400,8 +400,6 @@ MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels")
*/
static int storvsc_timeout = 180;
-static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
-
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
static struct scsi_transport_template *fc_transport_template;
#endif
@@ -1383,6 +1381,22 @@ static int storvsc_do_io(struct hv_device *device,
return ret;
}
+static int storvsc_device_alloc(struct scsi_device *sdevice)
+{
+ /*
+ * Set blist flag to permit the reading of the VPD pages even when
+ * the target may claim SPC-2 compliance. MSFT targets currently
+ * claim SPC-2 compliance while they implement post SPC-2 features.
+ * With this flag we can correctly handle WRITE_SAME_16 issues.
+ *
+ * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
+ * still supports REPORT LUN.
+ */
+ sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
+
+ return 0;
+}
+
static int storvsc_device_configure(struct scsi_device *sdevice)
{
@@ -1396,14 +1410,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
sdevice->no_write_same = 1;
/*
- * Add blist flags to permit the reading of the VPD pages even when
- * the target may claim SPC-2 compliance. MSFT targets currently
- * claim SPC-2 compliance while they implement post SPC-2 features.
- * With this patch we can correctly handle WRITE_SAME_16 issues.
- */
- sdevice->sdev_bflags |= msft_blist_flags;
-
- /*
* If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
* if the device is a MSFT virtual device. If the host is
* WIN10 or newer, allow write_same.
@@ -1661,6 +1667,7 @@ static struct scsi_host_template scsi_driver = {
.eh_host_reset_handler = storvsc_host_reset_handler,
.proc_name = "storvsc_host",
.eh_timed_out = storvsc_eh_timed_out,
+ .slave_alloc = storvsc_device_alloc,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 255,
.this_id = -1,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 318e4a1f76c9..54deeb754db5 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -146,7 +146,7 @@ enum attr_idn {
/* Descriptor idn for Query requests */
enum desc_idn {
QUERY_DESC_IDN_DEVICE = 0x0,
- QUERY_DESC_IDN_CONFIGURAION = 0x1,
+ QUERY_DESC_IDN_CONFIGURATION = 0x1,
QUERY_DESC_IDN_UNIT = 0x2,
QUERY_DESC_IDN_RFU_0 = 0x3,
QUERY_DESC_IDN_INTERCONNECT = 0x4,
@@ -162,19 +162,13 @@ enum desc_header_offset {
QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
};
-enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
- QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
- QUERY_DESC_UNIT_MAX_SIZE = 0x23,
- QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
- /*
- * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
- * of descriptor header.
- */
- QUERY_DESC_STRING_MAX_SIZE = 0xFE,
- QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
- QUERY_DESC_POWER_MAX_SIZE = 0x62,
- QUERY_DESC_RFU_MAX_SIZE = 0x00,
+enum ufs_desc_def_size {
+ QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
+ QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
+ QUERY_DESC_UNIT_DEF_SIZE = 0x23,
+ QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
+ QUERY_DESC_POWER_DEF_SIZE = 0x62,
};
/* Unit descriptor parameters offsets in bytes*/
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index dc6efbd1be8e..1359913bf840 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -100,19 +100,6 @@
#define ufshcd_hex_dump(prefix_str, buf, len) \
print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
-static u32 ufs_query_desc_max_size[] = {
- QUERY_DESC_DEVICE_MAX_SIZE,
- QUERY_DESC_CONFIGURAION_MAX_SIZE,
- QUERY_DESC_UNIT_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_INTERCONNECT_MAX_SIZE,
- QUERY_DESC_STRING_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_GEOMETRY_MAX_SIZE,
- QUERY_DESC_POWER_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
-};
-
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
@@ -2857,7 +2844,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out;
}
- if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
__func__, *buf_len);
err = -EINVAL;
@@ -2938,6 +2925,92 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
}
/**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @desc_length: pointer to variable to read the length of descriptor
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ int *desc_length)
+{
+ int ret;
+ u8 header[QUERY_DESC_HDR_SIZE];
+ int header_len = QUERY_DESC_HDR_SIZE;
+
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, header,
+ &header_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+ __func__, desc_id);
+ return ret;
+ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+ desc_id);
+ ret = -EINVAL;
+ }
+
+ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+ return ret;
+
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_len: mapped desc length (out)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
+ enum desc_idn desc_id, int *desc_len)
+{
+ switch (desc_id) {
+ case QUERY_DESC_IDN_DEVICE:
+ *desc_len = hba->desc_size.dev_desc;
+ break;
+ case QUERY_DESC_IDN_POWER:
+ *desc_len = hba->desc_size.pwr_desc;
+ break;
+ case QUERY_DESC_IDN_GEOMETRY:
+ *desc_len = hba->desc_size.geom_desc;
+ break;
+ case QUERY_DESC_IDN_CONFIGURATION:
+ *desc_len = hba->desc_size.conf_desc;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ *desc_len = hba->desc_size.unit_desc;
+ break;
+ case QUERY_DESC_IDN_INTERCONNECT:
+ *desc_len = hba->desc_size.interc_desc;
+ break;
+ case QUERY_DESC_IDN_STRING:
+ *desc_len = QUERY_DESC_MAX_SIZE;
+ break;
+ case QUERY_DESC_IDN_RFU_0:
+ case QUERY_DESC_IDN_RFU_1:
+ *desc_len = 0;
+ break;
+ default:
+ *desc_len = 0;
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
+/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -2951,42 +3024,49 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
static int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
int desc_index,
- u32 param_offset,
+ u8 param_offset,
u8 *param_read_buf,
- u32 param_size)
+ u8 param_size)
{
int ret;
u8 *desc_buf;
- u32 buff_len;
+ int buff_len;
bool is_kmalloc = true;
- /* safety checks */
- if (desc_id >= QUERY_DESC_IDN_MAX)
+ /* Safety check */
+ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
return -EINVAL;
- buff_len = ufs_query_desc_max_size[desc_id];
- if ((param_offset + param_size) > buff_len)
- return -EINVAL;
+ /* Get the max length of descriptor from structure filled up at probe
+ * time.
+ */
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
- if (!param_offset && (param_size == buff_len)) {
- /* memory space already available to hold full descriptor */
- desc_buf = param_read_buf;
- is_kmalloc = false;
- } else {
- /* allocate memory to hold full descriptor */
+ /* Sanity checks */
+ if (ret || !buff_len) {
+ dev_err(hba->dev, "%s: Failed to get full descriptor length",
+ __func__);
+ return ret;
+ }
+
+ /* Check whether we need temp memory */
+ if (param_offset != 0 || param_size < buff_len) {
desc_buf = kmalloc(buff_len, GFP_KERNEL);
if (!desc_buf)
return -ENOMEM;
+ } else {
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
}
+ /* Request for full descriptor */
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0, desc_buf,
- &buff_len);
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
if (ret) {
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
__func__, desc_id, desc_index, param_offset, ret);
-
goto out;
}
@@ -2998,25 +3078,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
goto out;
}
- /*
- * While reading variable size descriptors (like string descriptor),
- * some UFS devices may report the "LENGTH" (field in "Transaction
- * Specific fields" of Query Response UPIU) same as what was requested
- * in Query Request UPIU instead of reporting the actual size of the
- * variable size descriptor.
- * Although it's safe to ignore the "LENGTH" field for variable size
- * descriptors as we can always derive the length of the descriptor from
- * the descriptor header fields. Hence this change impose the length
- * match check only for fixed size descriptors (for which we always
- * request the correct size as part of Query Request UPIU).
- */
- if ((desc_id != QUERY_DESC_IDN_STRING) &&
- (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
- dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
- __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
- ret = -EINVAL;
- goto out;
- }
+ /* Check wherher we will not copy more data, than available */
+ if (is_kmalloc && param_size > buff_len)
+ param_size = buff_len;
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
@@ -5919,8 +5983,8 @@ static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
- int buff_len = QUERY_DESC_POWER_MAX_SIZE;
- u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
+ int buff_len = hba->desc_size.pwr_desc;
+ u8 desc_buf[hba->desc_size.pwr_desc];
ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
if (ret) {
@@ -6017,11 +6081,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
{
int err;
u8 model_index;
- u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
- u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+ u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
+ u8 desc_buf[hba->desc_size.dev_desc];
- err = ufshcd_read_device_desc(hba, desc_buf,
- QUERY_DESC_DEVICE_MAX_SIZE);
+ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
@@ -6038,14 +6101,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
- QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+ QUERY_DESC_MAX_SIZE, ASCII_STD);
if (err) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
__func__, err);
goto out;
}
- str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+ str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
MAX_MODEL_LEN));
@@ -6251,6 +6314,51 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
hba->req_abort_count = 0;
}
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+ &hba->desc_size.dev_desc);
+ if (err)
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+ &hba->desc_size.pwr_desc);
+ if (err)
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+ &hba->desc_size.interc_desc);
+ if (err)
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+ &hba->desc_size.conf_desc);
+ if (err)
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+ &hba->desc_size.unit_desc);
+ if (err)
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ &hba->desc_size.geom_desc);
+ if (err)
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
@@ -6285,6 +6393,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
+
ret = ufs_get_device_desc(hba, &card);
if (ret) {
dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
@@ -6320,6 +6431,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -7774,6 +7886,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
+ /* Set descriptor lengths to specification defaults */
+ ufshcd_def_desc_sizes(hba);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 7630600217a2..cdc8bd05f7df 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -220,6 +220,15 @@ struct ufs_dev_cmd {
struct ufs_query query;
};
+struct ufs_desc_size {
+ int dev_desc;
+ int pwr_desc;
+ int geom_desc;
+ int interc_desc;
+ int unit_desc;
+ int conf_desc;
+};
+
/**
* struct ufs_clk_info - UFS clock related info
* @list: list headed by hba->clk_list_head
@@ -483,6 +492,7 @@ struct ufs_stats {
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm
+ * @desc_size: descriptor sizes reported by device
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
@@ -666,6 +676,7 @@ struct ufs_hba {
bool is_urgent_bkops_lvl_checked;
struct rw_semaphore clk_scaling_lock;
+ struct ufs_desc_size desc_size;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -832,6 +843,10 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
+
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
+ int *desc_length);
+
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index ef474a748744..c374e3b5c678 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1487,7 +1487,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
irq_flag &= ~PCI_IRQ_MSI;
error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
- if (error)
+ if (error < 0)
goto out_reset_adapter;
adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index b7b87ecefcdf..9fca8d225ee0 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -532,7 +532,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
newsock->ops = sock->ops;
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+ rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
if (rc == -EAGAIN) {
/* Nothing ready, so wait for activity */
init_waitqueue_entry(&wait, current);
@@ -540,7 +540,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
set_current_state(TASK_INTERRUPTIBLE);
schedule();
remove_wait_queue(sk_sleep(sock->sk), &wait);
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+ rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
}
if (rc)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 7f8cf875157c..65a285631994 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -336,7 +336,6 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
cvm_oct_device[port])) {
struct net_device *dev = cvm_oct_device[port];
- struct octeon_ethernet *priv = netdev_priv(dev);
/*
* Only accept packets for devices that are
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index e61e4ca064a8..74094fff4367 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -1,6 +1,7 @@
config BCM2835_VCHIQ
tristate "Videocore VCHIQ"
depends on HAS_DMA
+ depends on OF
depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
default y
help
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index f5e330099bfc..fd7c16a7ca6e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -43,7 +43,7 @@
#include "target_core_ua.h"
static sense_reason_t core_alua_check_transition(int state, int valid,
- int *primary);
+ int *primary, int explicit);
static int core_alua_set_tg_pt_secondary_state(
struct se_lun *lun, int explicit, int offline);
@@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* the state is a primary or secondary target port asymmetric
* access state.
*/
- rc = core_alua_check_transition(alua_access_state,
- valid_states, &primary);
+ rc = core_alua_check_transition(alua_access_state, valid_states,
+ &primary, 1);
if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
@@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd)
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return 0;
/*
@@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd)
* Check implicit and explicit ALUA state change request.
*/
static sense_reason_t
-core_alua_check_transition(int state, int valid, int *primary)
+core_alua_check_transition(int state, int valid, int *primary, int explicit)
{
/*
* OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
@@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary)
*primary = 0;
break;
case ALUA_ACCESS_STATE_TRANSITION:
- /*
- * Transitioning is set internally, and
- * cannot be selected manually.
- */
- goto not_supported;
+ if (!(valid & ALUA_T_SUP) || explicit)
+ /*
+ * Transitioning is set internally and by tcmu daemon,
+ * and cannot be selected through a STPG.
+ */
+ goto not_supported;
+ *primary = 0;
+ break;
default:
pr_err("Unknown ALUA access state: 0x%02x\n", state);
return TCM_INVALID_PARAMETER_LIST;
@@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
- struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+ struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
@@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt(
if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
return 0;
- if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+ if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
return -EAGAIN;
/*
* Flush any pending transitions
*/
- if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
- atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
- ALUA_ACCESS_STATE_TRANSITION) {
- /* Just in case */
- tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
- tg_pt_gp->tg_pt_gp_transition_complete = &wait;
- flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
- wait_for_completion(&wait);
- tg_pt_gp->tg_pt_gp_transition_complete = NULL;
- return 0;
- }
+ if (!explicit)
+ flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/*
* Save the old primary ALUA access state, and set the current state
* to ALUA_ACCESS_STATE_TRANSITION.
*/
- tg_pt_gp->tg_pt_gp_alua_previous_state =
- atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
- tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
-
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_TRANSITION);
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
@@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt(
core_alua_queue_state_change_ua(tg_pt_gp);
+ if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+ return 0;
+
+ tg_pt_gp->tg_pt_gp_alua_previous_state =
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+
/*
* Check for the optional ALUA primary state transition delay
*/
@@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt(
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
- if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
- unsigned long transition_tmo;
-
- transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
- queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
- &tg_pt_gp->tg_pt_gp_transition_work,
- transition_tmo);
- } else {
+ schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
+ if (explicit) {
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
- queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
- &tg_pt_gp->tg_pt_gp_transition_work, 0);
wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
}
@@ -1149,8 +1138,12 @@ int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *tg_pt_gp;
int primary, valid_states, rc = 0;
+ if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+ return -ENODEV;
+
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
- if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
+ if (core_alua_check_transition(new_state, valid_states, &primary,
+ explicit) != 0)
return -EINVAL;
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
@@ -1695,8 +1688,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
- INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
- core_alua_do_transition_tg_pt_work);
+ INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+ core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
@@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp(
dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
- flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+ flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
@@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp;
int ret;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev)
{
- if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+ if (!(dev->transport->transport_flags &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 54b36c9835be..38b5025e4c7a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->aborted_task()\n");
return -EINVAL;
}
+ if (!tfo->check_stop_free) {
+ pr_err("Missing tfo->check_stop_free()\n");
+ return -EINVAL;
+ }
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index a8f8e53f2f57..94cda7991e80 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
buf = kzalloc(12, GFP_KERNEL);
if (!buf)
- return;
+ goto out_free;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
-out_free:
+
kfree(buf);
}
@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
sd->lun, sd->queue_depth);
}
- dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_block_size =
+ min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors =
- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
- if (sd->type == TYPE_TAPE)
+ if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd);
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ }
return 0;
}
@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
return 0;
}
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
- struct scsi_device *sd)
- __releases(sh->host_lock)
-{
- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
- struct Scsi_Host *sh = sd->host;
- int ret;
-
- spin_unlock_irq(sh->host_lock);
- ret = pscsi_add_device_to_list(dev, sd);
- if (ret)
- return ret;
-
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
- sd->channel, sd->id, sd->lun);
- return 0;
-}
-
static int pscsi_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
case TYPE_DISK:
ret = pscsi_create_type_disk(dev, sd);
break;
- case TYPE_ROM:
- ret = pscsi_create_type_rom(dev, sd);
- break;
default:
- ret = pscsi_create_type_other(dev, sd);
+ ret = pscsi_create_type_nondisk(dev, sd);
break;
}
@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host);
- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
- scsi_device_put(sd);
+ scsi_device_put(sd);
pdv->pdv_sd = NULL;
}
@@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
- dump_stack();
return 0;
}
@@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate)
static const struct target_backend_ops pscsi_ops = {
.name = "pscsi",
.owner = THIS_MODULE,
- .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
+ .transport_flags = TRANSPORT_FLAG_PASSTHROUGH |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 68d8aef7ab78..c194063f169b 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
break;
case VERIFY:
+ case VERIFY_16:
size = 0;
- sectors = transport_get_sectors_10(cdb);
- cmd->t_task_lba = transport_lba_32(cdb);
+ if (cdb[0] == VERIFY) {
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ }
cmd->execute_cmd = sbc_emulate_noop;
goto check_lba;
case REZERO_UNIT:
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index c0dbfa016575..6fb191914f45 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -602,7 +602,8 @@ int core_tpg_add_lun(
if (ret)
goto out_kill_ref;
- if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+ if (!(dev->transport->transport_flags &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 434d9d693989..b1a3cdb29468 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
* Fabric modules are expected to return '1' here if the se_cmd being
* passed is released at this point, or zero if not being released.
*/
- return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd)
- : 0;
+ return cmd->se_tfo->check_stop_free(cmd);
}
static void transport_lun_remove_cmd(struct se_cmd *cmd)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c3adefe95e50..c6874c38a10b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -28,6 +28,7 @@
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/highmem.h>
+#include <linux/configfs.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -112,6 +113,7 @@ struct tcmu_dev {
spinlock_t commands_lock;
struct timer_list timeout;
+ unsigned int cmd_time_out;
char dev_config[TCMU_CONFIG_LEN];
};
@@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
+ if (udev->cmd_time_out)
+ tcmu_cmd->deadline = jiffies +
+ msecs_to_jiffies(udev->cmd_time_out);
idr_preload(GFP_KERNEL);
spin_lock_irq(&udev->commands_lock);
@@ -451,7 +455,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
pr_debug("sleeping for ring space\n");
spin_unlock_irq(&udev->cmdr_lock);
- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
+ if (udev->cmd_time_out)
+ ret = schedule_timeout(
+ msecs_to_jiffies(udev->cmd_time_out));
+ else
+ ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
finish_wait(&udev->wait_cmdr, &__wait);
if (!ret) {
pr_warn("tcmu: command timed out\n");
@@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
/* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info);
- mod_timer(&udev->timeout,
- round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
+ if (udev->cmd_time_out)
+ mod_timer(&udev->timeout, round_jiffies_up(jiffies +
+ msecs_to_jiffies(udev->cmd_time_out)));
return TCM_NO_SENSE;
}
@@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
}
udev->hba = hba;
+ udev->cmd_time_out = TCMU_TIME_OUT;
init_waitqueue_head(&udev->wait_cmdr);
spin_lock_init(&udev->cmdr_lock);
@@ -960,7 +970,8 @@ static int tcmu_configure_device(struct se_device *dev)
if (dev->dev_attrib.hw_block_size == 0)
dev->dev_attrib.hw_block_size = 512;
/* Other attributes can be configured in userspace */
- dev->dev_attrib.hw_max_sectors = 128;
+ if (!dev->dev_attrib.hw_max_sectors)
+ dev->dev_attrib.hw_max_sectors = 128;
dev->dev_attrib.hw_queue_depth = 128;
ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
@@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
kfree(udev);
}
+static bool tcmu_dev_configured(struct tcmu_dev *udev)
+{
+ return udev->uio_info.uio_dev ? true : false;
+}
+
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev)
spin_unlock_irq(&udev->commands_lock);
WARN_ON(!all_expired);
- /* Device was configured */
- if (udev->uio_info.uio_dev) {
+ if (tcmu_dev_configured(udev)) {
tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
udev->uio_info.uio_dev->minor);
@@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev)
}
enum {
- Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
+ Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
+ Opt_err,
};
static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"},
{Opt_dev_size, "dev_size=%u"},
{Opt_hw_block_size, "hw_block_size=%u"},
+ {Opt_hw_max_sectors, "hw_max_sectors=%u"},
{Opt_err, NULL}
};
+static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
+{
+ unsigned long tmp_ul;
+ char *arg_p;
+ int ret;
+
+ arg_p = match_strdup(arg);
+ if (!arg_p)
+ return -ENOMEM;
+
+ ret = kstrtoul(arg_p, 0, &tmp_ul);
+ kfree(arg_p);
+ if (ret < 0) {
+ pr_err("kstrtoul() failed for dev attrib\n");
+ return ret;
+ }
+ if (!tmp_ul) {
+ pr_err("dev attrib must be nonzero\n");
+ return -EINVAL;
+ }
+ *dev_attrib = tmp_ul;
+ return 0;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
@@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
char *orig, *ptr, *opts, *arg_p;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
- unsigned long tmp_ul;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
pr_err("kstrtoul() failed for dev_size=\n");
break;
case Opt_hw_block_size:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoul(arg_p, 0, &tmp_ul);
- kfree(arg_p);
- if (ret < 0) {
- pr_err("kstrtoul() failed for hw_block_size=\n");
- break;
- }
- if (!tmp_ul) {
- pr_err("hw_block_size must be nonzero\n");
- break;
- }
- dev->dev_attrib.hw_block_size = tmp_ul;
+ ret = tcmu_set_dev_attrib(&args[0],
+ &(dev->dev_attrib.hw_block_size));
+ break;
+ case Opt_hw_max_sectors:
+ ret = tcmu_set_dev_attrib(&args[0],
+ &(dev->dev_attrib.hw_max_sectors));
break;
default:
break;
}
+
+ if (ret)
+ break;
}
kfree(orig);
@@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd)
return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
}
-static const struct target_backend_ops tcmu_ops = {
+static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = container_of(da->da_dev,
+ struct tcmu_dev, se_dev);
+
+ return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
+}
+
+static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = container_of(da->da_dev,
+ struct tcmu_dev, se_dev);
+ u32 val;
+ int ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (!val) {
+ pr_err("Illegal value for cmd_time_out\n");
+ return -EINVAL;
+ }
+
+ udev->cmd_time_out = val * MSEC_PER_SEC;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, cmd_time_out);
+
+static struct configfs_attribute **tcmu_attrs;
+
+static struct target_backend_ops tcmu_ops = {
.name = "user",
.owner = THIS_MODULE,
.transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
@@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = {
.show_configfs_dev_params = tcmu_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = tcmu_get_blocks,
- .tb_dev_attrib_attrs = passthrough_attrib_attrs,
+ .tb_dev_attrib_attrs = NULL,
};
static int __init tcmu_module_init(void)
{
- int ret;
+ int ret, i, len = 0;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void)
goto out_unreg_device;
}
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ len += sizeof(struct configfs_attribute *);
+ }
+ len += sizeof(struct configfs_attribute *) * 2;
+
+ tcmu_attrs = kzalloc(len, GFP_KERNEL);
+ if (!tcmu_attrs) {
+ ret = -ENOMEM;
+ goto out_unreg_genl;
+ }
+
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ tcmu_attrs[i] = passthrough_attrib_attrs[i];
+ }
+ tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
+ tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
+
ret = transport_backend_register(&tcmu_ops);
if (ret)
- goto out_unreg_genl;
+ goto out_attrs;
return 0;
+out_attrs:
+ kfree(tcmu_attrs);
out_unreg_genl:
genl_unregister_family(&tcmu_genl_family);
out_unreg_device:
@@ -1194,6 +1287,7 @@ out_free_cache:
static void __exit tcmu_module_exit(void)
{
target_backend_unregister(&tcmu_ops);
+ kfree(tcmu_attrs);
genl_unregister_family(&tcmu_genl_family);
root_device_unregister(tcmu_root_device);
kmem_cache_destroy(tcmu_cmd_cache);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 1bacbc3b19a0..e94aea8c0d05 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -114,7 +114,7 @@
#define DEFAULT_TX_BUF_COUNT 3
struct n_hdlc_buf {
- struct n_hdlc_buf *link;
+ struct list_head list_item;
int count;
char buf[1];
};
@@ -122,8 +122,7 @@ struct n_hdlc_buf {
#define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
struct n_hdlc_buf_list {
- struct n_hdlc_buf *head;
- struct n_hdlc_buf *tail;
+ struct list_head list;
int count;
spinlock_t spinlock;
};
@@ -136,7 +135,6 @@ struct n_hdlc_buf_list {
* @backup_tty - TTY to use if tty gets closed
* @tbusy - reentrancy flag for tx wakeup code
* @woke_up - FIXME: describe this field
- * @tbuf - currently transmitting tx buffer
* @tx_buf_list - list of pending transmit frame buffers
* @rx_buf_list - list of received frame buffers
* @tx_free_buf_list - list unused transmit frame buffers
@@ -149,7 +147,6 @@ struct n_hdlc {
struct tty_struct *backup_tty;
int tbusy;
int woke_up;
- struct n_hdlc_buf *tbuf;
struct n_hdlc_buf_list tx_buf_list;
struct n_hdlc_buf_list rx_buf_list;
struct n_hdlc_buf_list tx_free_buf_list;
@@ -159,6 +156,8 @@ struct n_hdlc {
/*
* HDLC buffer list manipulation functions
*/
+static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
+ struct n_hdlc_buf *buf);
static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
struct n_hdlc_buf *buf);
static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
@@ -208,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
struct n_hdlc_buf *buf;
- unsigned long flags;
while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
- spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
- if (n_hdlc->tbuf) {
- n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
- n_hdlc->tbuf = NULL;
- }
- spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
}
static struct tty_ldisc_ops n_hdlc_ldisc = {
@@ -283,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
} else
break;
}
- kfree(n_hdlc->tbuf);
kfree(n_hdlc);
} /* end of n_hdlc_release() */
@@ -402,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
n_hdlc->woke_up = 0;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
- /* get current transmit buffer or get new transmit */
- /* buffer from list of pending transmit buffers */
-
- tbuf = n_hdlc->tbuf;
- if (!tbuf)
- tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
-
+ tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
while (tbuf) {
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)sending frame %p, count=%d\n",
@@ -420,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
/* rollback was possible and has been done */
if (actual == -ERESTARTSYS) {
- n_hdlc->tbuf = tbuf;
+ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
break;
}
/* if transmit error, throw frame away by */
@@ -435,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
/* free current transmit buffer */
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
-
- /* this tx buffer is done */
- n_hdlc->tbuf = NULL;
-
+
/* wait up sleeping writers */
wake_up_interruptible(&tty->write_wait);
@@ -448,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)frame %p pending\n",
__FILE__,__LINE__,tbuf);
-
- /* buffer not accepted by driver */
- /* set this buffer as pending buffer */
- n_hdlc->tbuf = tbuf;
+
+ /*
+ * the buffer was not accepted by driver,
+ * return it back into tx queue
+ */
+ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
break;
}
}
@@ -749,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
int error = 0;
int count;
unsigned long flags;
-
+ struct n_hdlc_buf *buf = NULL;
+
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
__FILE__,__LINE__,cmd);
@@ -763,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
/* report count of read data available */
/* in next available frame (if any) */
spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
- if (n_hdlc->rx_buf_list.head)
- count = n_hdlc->rx_buf_list.head->count;
+ buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
+ struct n_hdlc_buf, list_item);
+ if (buf)
+ count = buf->count;
else
count = 0;
spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
@@ -776,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
count = tty_chars_in_buffer(tty);
/* add size of next output frame in queue */
spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
- if (n_hdlc->tx_buf_list.head)
- count += n_hdlc->tx_buf_list.head->count;
+ buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
+ struct n_hdlc_buf, list_item);
+ if (buf)
+ count += buf->count;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
error = put_user(count, (int __user *)arg);
break;
@@ -825,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_wait(filp, &tty->write_wait, wait);
/* set bits for operations that won't block */
- if (n_hdlc->rx_buf_list.head)
+ if (!list_empty(&n_hdlc->rx_buf_list.list))
mask |= POLLIN | POLLRDNORM; /* readable */
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
mask |= POLLHUP;
if (tty_hung_up_p(filp))
mask |= POLLHUP;
if (!tty_is_writelocked(tty) &&
- n_hdlc->tx_free_buf_list.head)
+ !list_empty(&n_hdlc->tx_free_buf_list.list))
mask |= POLLOUT | POLLWRNORM; /* writable */
}
return mask;
@@ -856,7 +845,12 @@ static struct n_hdlc *n_hdlc_alloc(void)
spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
-
+
+ INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
+ INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
+ INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
+ INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
+
/* allocate free rx buffer list */
for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
@@ -884,53 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void)
} /* end of n_hdlc_alloc() */
/**
+ * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
+ * @buf_list - pointer to the buffer list
+ * @buf - pointer to the buffer
+ */
+static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
+ struct n_hdlc_buf *buf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&buf_list->spinlock, flags);
+
+ list_add(&buf->list_item, &buf_list->list);
+ buf_list->count++;
+
+ spin_unlock_irqrestore(&buf_list->spinlock, flags);
+}
+
+/**
* n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
- * @list - pointer to buffer list
+ * @buf_list - pointer to buffer list
* @buf - pointer to buffer
*/
-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
+static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
struct n_hdlc_buf *buf)
{
unsigned long flags;
- spin_lock_irqsave(&list->spinlock,flags);
-
- buf->link=NULL;
- if (list->tail)
- list->tail->link = buf;
- else
- list->head = buf;
- list->tail = buf;
- (list->count)++;
-
- spin_unlock_irqrestore(&list->spinlock,flags);
-
+
+ spin_lock_irqsave(&buf_list->spinlock, flags);
+
+ list_add_tail(&buf->list_item, &buf_list->list);
+ buf_list->count++;
+
+ spin_unlock_irqrestore(&buf_list->spinlock, flags);
} /* end of n_hdlc_buf_put() */
/**
* n_hdlc_buf_get - remove and return an HDLC buffer from list
- * @list - pointer to HDLC buffer list
+ * @buf_list - pointer to HDLC buffer list
*
* Remove and return an HDLC buffer from the head of the specified HDLC buffer
* list.
* Returns a pointer to HDLC buffer if available, otherwise %NULL.
*/
-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
+static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
{
unsigned long flags;
struct n_hdlc_buf *buf;
- spin_lock_irqsave(&list->spinlock,flags);
-
- buf = list->head;
+
+ spin_lock_irqsave(&buf_list->spinlock, flags);
+
+ buf = list_first_entry_or_null(&buf_list->list,
+ struct n_hdlc_buf, list_item);
if (buf) {
- list->head = buf->link;
- (list->count)--;
+ list_del(&buf->list_item);
+ buf_list->count--;
}
- if (!list->head)
- list->tail = NULL;
-
- spin_unlock_irqrestore(&list->spinlock,flags);
+
+ spin_unlock_irqrestore(&buf_list->spinlock, flags);
return buf;
-
} /* end of n_hdlc_buf_get() */
static char hdlc_banner[] __initdata =
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index b4f86c219db1..7a17aedbf902 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1031,8 +1031,10 @@ static int s3c64xx_serial_startup(struct uart_port *port)
if (ourport->dma) {
ret = s3c24xx_serial_request_dma(ourport);
if (ret < 0) {
- dev_warn(port->dev, "DMA request failed\n");
- return ret;
+ dev_warn(port->dev,
+ "DMA request failed, DMA will not be used\n");
+ devm_kfree(port->dev, ourport->dma);
+ ourport->dma = NULL;
}
}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 2092e46b1380..f8d0747810e7 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -250,6 +250,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
val = dwc3_omap_read_utmi_ctrl(omap);
val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
dwc3_omap_write_utmi_ctrl(omap, val);
+ break;
case OMAP_DWC3_VBUS_OFF:
val = dwc3_omap_read_utmi_ctrl(omap);
@@ -392,7 +393,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
{
u32 reg;
struct device_node *node = omap->dev->of_node;
- int utmi_mode = 0;
+ u32 utmi_mode = 0;
reg = dwc3_omap_read_utmi_ctrl(omap);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4db97ecae885..0d75158e43fe 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1342,6 +1342,68 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
if (r == req) {
/* wait until it is processed */
dwc3_stop_active_transfer(dwc, dep->number, true);
+
+ /*
+ * If request was already started, this means we had to
+ * stop the transfer. With that we also need to ignore
+ * all TRBs used by the request, however TRBs can only
+ * be modified after completion of END_TRANSFER
+ * command. So what we do here is that we wait for
+ * END_TRANSFER completion and only after that, we jump
+ * over TRBs by clearing HWO and incrementing dequeue
+ * pointer.
+ *
+ * Note that we have 2 possible types of transfers here:
+ *
+ * i) Linear buffer request
+ * ii) SG-list based request
+ *
+ * SG-list based requests will have r->num_pending_sgs
+ * set to a valid number (> 0). Linear requests,
+ * normally use a single TRB.
+ *
+ * For each of these two cases, if r->unaligned flag is
+ * set, one extra TRB has been used to align transfer
+ * size to wMaxPacketSize.
+ *
+ * All of these cases need to be taken into
+ * consideration so we don't mess up our TRB ring
+ * pointers.
+ */
+ wait_event_lock_irq(dep->wait_end_transfer,
+ !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
+ dwc->lock);
+
+ if (!r->trb)
+ goto out1;
+
+ if (r->num_pending_sgs) {
+ struct dwc3_trb *trb;
+ int i = 0;
+
+ for (i = 0; i < r->num_pending_sgs; i++) {
+ trb = r->trb + i;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+
+ if (r->unaligned) {
+ trb = r->trb + r->num_pending_sgs + 1;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+ } else {
+ struct dwc3_trb *trb = r->trb;
+
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+
+ if (r->unaligned) {
+ trb = r->trb + 1;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+ }
goto out1;
}
dev_err(dwc->dev, "request %p was not queued to %s\n",
@@ -1352,6 +1414,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
out1:
/* giveback the request */
+ dep->queued_requests--;
dwc3_gadget_giveback(dep, req, -ECONNRESET);
out0:
@@ -2126,12 +2189,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
return 1;
}
- if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
- return 1;
-
count = trb->size & DWC3_TRB_SIZE_MASK;
req->remaining += count;
+ if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
+ return 1;
+
if (dep->direction) {
if (count) {
trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
@@ -3228,15 +3291,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
- int ret;
-
if (!dwc->gadget_driver)
return 0;
- ret = dwc3_gadget_run_stop(dwc, false, false);
- if (ret < 0)
- return ret;
-
+ dwc3_gadget_run_stop(dwc, false, false);
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 3129bcf74d7d..265e223ab645 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -28,23 +28,23 @@ struct dwc3;
#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget))
/* DEPCFG parameter 1 */
-#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0)
+#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9)
#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
-#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16)
+#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
-#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25)
+#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
#define DWC3_DEPCFG_BULK_BASED (1 << 30)
#define DWC3_DEPCFG_FIFO_BASED (1 << 31)
/* DEPCFG parameter 0 */
-#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1)
-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3)
-#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17)
-#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22)
+#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1)
+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3)
+#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17)
+#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22)
#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
/* This applies for core versions earlier than 1.94a */
#define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 78c44979dde3..cbff3b02840d 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -269,6 +269,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
ret = unregister_gadget(gi);
if (ret)
goto err;
+ kfree(name);
} else {
if (gi->composite.gadget_driver.udc_name) {
ret = -EBUSY;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index a5b7cd615698..a0085571824d 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1834,11 +1834,14 @@ static int ffs_func_eps_enable(struct ffs_function *func)
spin_lock_irqsave(&func->ffs->eps_lock, flags);
while(count--) {
struct usb_endpoint_descriptor *ds;
+ struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
+ int needs_comp_desc = false;
int desc_idx;
- if (ffs->gadget->speed == USB_SPEED_SUPER)
+ if (ffs->gadget->speed == USB_SPEED_SUPER) {
desc_idx = 2;
- else if (ffs->gadget->speed == USB_SPEED_HIGH)
+ needs_comp_desc = true;
+ } else if (ffs->gadget->speed == USB_SPEED_HIGH)
desc_idx = 1;
else
desc_idx = 0;
@@ -1855,6 +1858,14 @@ static int ffs_func_eps_enable(struct ffs_function *func)
ep->ep->driver_data = ep;
ep->ep->desc = ds;
+
+ comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
+ USB_DT_ENDPOINT_SIZE);
+ ep->ep->maxburst = comp_desc->bMaxBurst + 1;
+
+ if (needs_comp_desc)
+ ep->ep->comp_desc = comp_desc;
+
ret = usb_ep_enable(ep->ep);
if (likely(!ret)) {
epfile->ep = ep;
@@ -2253,7 +2264,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
if (len < sizeof(*d) ||
d->bFirstInterfaceNumber >= ffs->interfaces_count ||
- d->Reserved1)
+ !d->Reserved1)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
if (d->Reserved2[i])
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 27ed51b5082f..29b41b5dee04 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -258,13 +258,6 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
v4l2_event_queue(&uvc->vdev, &v4l2_event);
- /* Pass additional setup data to userspace */
- if (uvc->event_setup_out && uvc->event_length) {
- uvc->control_req->length = uvc->event_length;
- return usb_ep_queue(uvc->func.config->cdev->gadget->ep0,
- uvc->control_req, GFP_ATOMIC);
- }
-
return 0;
}
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index a2615d64d07c..a2c916869293 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -84,8 +84,7 @@ static int ep_open(struct inode *, struct file *);
/* /dev/gadget/$CHIP represents ep0 and the whole device */
enum ep0_state {
- /* DISBLED is the initial state.
- */
+ /* DISABLED is the initial state. */
STATE_DEV_DISABLED = 0,
/* Only one open() of /dev/gadget/$CHIP; only one file tracks
@@ -1782,8 +1781,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
spin_lock_irq (&dev->lock);
value = -EINVAL;
- if (dev->buf)
+ if (dev->buf) {
+ kfree(kbuf);
goto fail;
+ }
dev->buf = kbuf;
/* full or low speed config */
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 11bbce28bc23..2035906b8ced 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -610,7 +610,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
- unsigned long flags, ept_cfg, maxpacket;
+ unsigned long flags, maxpacket;
unsigned int nr_trans;
DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
@@ -630,7 +630,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
ep->is_in = 0;
DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n",
- ep->ep.name, ept_cfg, maxpacket);
+ ep->ep.name, ep->ept_cfg, maxpacket);
if (usb_endpoint_dir_in(desc)) {
ep->is_in = 1;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index c60abe3a68f9..8cabc5944d5f 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1031,6 +1031,8 @@ static int dummy_udc_probe(struct platform_device *pdev)
int rc;
dum = *((void **)dev_get_platdata(&pdev->dev));
+ /* Clear usb_gadget region for new registration to udc-core */
+ memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
dum->gadget.max_speed = USB_SPEED_SUPER;
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 85504419ab31..3828c2ec8623 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1146,15 +1146,15 @@ static int scan_dma_completions(struct net2280_ep *ep)
*/
while (!list_empty(&ep->queue)) {
struct net2280_request *req;
- u32 tmp;
+ u32 req_dma_count;
req = list_entry(ep->queue.next,
struct net2280_request, queue);
if (!req->valid)
break;
rmb();
- tmp = le32_to_cpup(&req->td->dmacount);
- if ((tmp & BIT(VALID_BIT)) != 0)
+ req_dma_count = le32_to_cpup(&req->td->dmacount);
+ if ((req_dma_count & BIT(VALID_BIT)) != 0)
break;
/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
@@ -1163,40 +1163,41 @@ static int scan_dma_completions(struct net2280_ep *ep)
*/
if (unlikely(req->td->dmadesc == 0)) {
/* paranoia */
- tmp = readl(&ep->dma->dmacount);
- if (tmp & DMA_BYTE_COUNT_MASK)
+ u32 const ep_dmacount = readl(&ep->dma->dmacount);
+
+ if (ep_dmacount & DMA_BYTE_COUNT_MASK)
break;
/* single transfer mode */
- dma_done(ep, req, tmp, 0);
+ dma_done(ep, req, req_dma_count, 0);
num_completed++;
break;
} else if (!ep->is_in &&
(req->req.length % ep->ep.maxpacket) &&
!(ep->dev->quirks & PLX_PCIE)) {
- tmp = readl(&ep->regs->ep_stat);
+ u32 const ep_stat = readl(&ep->regs->ep_stat);
/* AVOID TROUBLE HERE by not issuing short reads from
* your gadget driver. That helps avoids errata 0121,
* 0122, and 0124; not all cases trigger the warning.
*/
- if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
+ if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
ep_warn(ep->dev, "%s lost packet sync!\n",
ep->ep.name);
req->req.status = -EOVERFLOW;
} else {
- tmp = readl(&ep->regs->ep_avail);
- if (tmp) {
+ u32 const ep_avail = readl(&ep->regs->ep_avail);
+ if (ep_avail) {
/* fifo gets flushed later */
ep->out_overflow = 1;
ep_dbg(ep->dev,
"%s dma, discard %d len %d\n",
- ep->ep.name, tmp,
+ ep->ep.name, ep_avail,
req->req.length);
req->req.status = -EOVERFLOW;
}
}
}
- dma_done(ep, req, tmp, 0);
+ dma_done(ep, req, req_dma_count, 0);
num_completed++;
}
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index e1335ad5bce9..832c4fdbe985 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -2534,9 +2534,10 @@ static int pxa_udc_remove(struct platform_device *_dev)
usb_del_gadget_udc(&udc->gadget);
pxa_cleanup_debugfs(udc);
- if (!IS_ERR_OR_NULL(udc->transceiver))
+ if (!IS_ERR_OR_NULL(udc->transceiver)) {
usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
- usb_put_phy(udc->transceiver);
+ usb_put_phy(udc->transceiver);
+ }
udc->transceiver = NULL;
the_controller = NULL;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 414e3c376dbb..5302f988e7e6 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -350,7 +350,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
- if (valid_port(wIndex)) {
+ if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
ohci_at91_port_suspend(ohci_at91->sfr_regmap,
1);
return 0;
@@ -393,7 +393,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
- if (valid_port(wIndex)) {
+ if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
ohci_at91_port_suspend(ohci_at91->sfr_regmap,
0);
return 0;
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 363d125300ea..2b4a00fa735d 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -109,7 +109,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
/* xhci 1.1 controllers have the HCCPARAMS2 register */
- if (hci_version > 100) {
+ if (hci_version > 0x100) {
temp = readl(&xhci->cap_regs->hcc_params2);
xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
xhci_dbg(xhci, " HC %s Force save context capability",
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 9066ec9e0c2e..67d5dc79b6b5 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -382,7 +382,6 @@ static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk,
static int xhci_mtk_setup(struct usb_hcd *hcd);
static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
- .extra_priv_size = sizeof(struct xhci_hcd),
.reset = xhci_mtk_setup,
};
@@ -678,13 +677,13 @@ static int xhci_mtk_probe(struct platform_device *pdev)
goto power_off_phys;
}
- if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
- xhci->shared_hcd->can_do_streams = 1;
-
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto dealloc_usb2_hcd;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6d33b42ffcf5..bd02a6cd8e2c 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -286,6 +286,8 @@ static int xhci_plat_remove(struct platform_device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct clk *clk = xhci->clk;
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
+
usb_remove_hcd(xhci->shared_hcd);
usb_phy_shutdown(hcd->usb_phy);
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index a59fafb4b329..74436f8ca538 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1308,7 +1308,6 @@ static int tegra_xhci_setup(struct usb_hcd *hcd)
}
static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = {
- .extra_priv_size = sizeof(struct xhci_hcd),
.reset = tegra_xhci_setup,
};
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6d6c46000e56..50aee8b7718b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -868,7 +868,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
spin_lock_irqsave(&xhci->lock, flags);
- /* disble usb3 ports Wake bits*/
+ /* disable usb3 ports Wake bits */
port_index = xhci->num_usb3_ports;
port_array = xhci->usb3_ports;
while (port_index--) {
@@ -879,7 +879,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
writel(t2, port_array[port_index]);
}
- /* disble usb2 ports Wake bits*/
+ /* disable usb2 ports Wake bits */
port_index = xhci->num_usb2_ports;
port_array = xhci->usb2_ports;
while (port_index--) {
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 095778ff984d..37c63cb39714 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -781,12 +781,6 @@ static int iowarrior_probe(struct usb_interface *interface,
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
- if (iface_desc->desc.bNumEndpoints < 1) {
- dev_err(&interface->dev, "Invalid number of endpoints\n");
- retval = -EINVAL;
- goto error;
- }
-
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
@@ -797,6 +791,21 @@ static int iowarrior_probe(struct usb_interface *interface,
/* this one will match for the IOWarrior56 only */
dev->int_out_endpoint = endpoint;
}
+
+ if (!dev->int_in_endpoint) {
+ dev_err(&interface->dev, "no interrupt-in endpoint found\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
+ if (!dev->int_out_endpoint) {
+ dev_err(&interface->dev, "no interrupt-out endpoint found\n");
+ retval = -ENODEV;
+ goto error;
+ }
+ }
+
/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4e18600dc9b4..91f66d68bcb7 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -375,18 +375,24 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
if (of_get_property(np, "dynamic-power-switching", NULL))
hub->conf_data2 |= BIT(7);
- if (of_get_property(np, "oc-delay-100us", NULL)) {
- hub->conf_data2 &= ~BIT(5);
- hub->conf_data2 &= ~BIT(4);
- } else if (of_get_property(np, "oc-delay-4ms", NULL)) {
- hub->conf_data2 &= ~BIT(5);
- hub->conf_data2 |= BIT(4);
- } else if (of_get_property(np, "oc-delay-8ms", NULL)) {
- hub->conf_data2 |= BIT(5);
- hub->conf_data2 &= ~BIT(4);
- } else if (of_get_property(np, "oc-delay-16ms", NULL)) {
- hub->conf_data2 |= BIT(5);
- hub->conf_data2 |= BIT(4);
+ if (!of_property_read_u32(np, "oc-delay-us", property_u32)) {
+ if (*property_u32 == 100) {
+ /* 100 us*/
+ hub->conf_data2 &= ~BIT(5);
+ hub->conf_data2 &= ~BIT(4);
+ } else if (*property_u32 == 4000) {
+ /* 4 ms */
+ hub->conf_data2 &= ~BIT(5);
+ hub->conf_data2 |= BIT(4);
+ } else if (*property_u32 == 16000) {
+ /* 16 ms */
+ hub->conf_data2 |= BIT(5);
+ hub->conf_data2 |= BIT(4);
+ } else {
+ /* 8 ms (DEFAULT) */
+ hub->conf_data2 |= BIT(5);
+ hub->conf_data2 &= ~BIT(4);
+ }
}
if (of_get_property(np, "compound-device", NULL))
@@ -432,30 +438,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
}
}
- hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
- if (!of_property_read_u32(np, "max-sp-power", property_u32))
- hub->max_power_sp = min_t(u8, be32_to_cpu(*property_u32) / 2,
- 250);
-
- hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
- if (!of_property_read_u32(np, "max-bp-power", property_u32))
- hub->max_power_bp = min_t(u8, be32_to_cpu(*property_u32) / 2,
- 250);
-
- hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
- if (!of_property_read_u32(np, "max-sp-current", property_u32))
- hub->max_current_sp = min_t(u8, be32_to_cpu(*property_u32) / 2,
- 250);
-
- hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
- if (!of_property_read_u32(np, "max-bp-current", property_u32))
- hub->max_current_bp = min_t(u8, be32_to_cpu(*property_u32) / 2,
- 250);
-
hub->power_on_time = USB251XB_DEF_POWER_ON_TIME;
- if (!of_property_read_u32(np, "power-on-time", property_u32))
- hub->power_on_time = min_t(u8, be32_to_cpu(*property_u32) / 2,
- 255);
+ if (!of_property_read_u32(np, "power-on-time-ms", property_u32))
+ hub->power_on_time = min_t(u8, *property_u32 / 2, 255);
if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
@@ -492,6 +477,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
/* The following parameters are currently not exposed to devicetree, but
* may be as soon as needed.
*/
+ hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
+ hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
+ hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
+ hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
hub->boost_up = USB251XB_DEF_BOOST_UP;
hub->boost_x = USB251XB_DEF_BOOST_X;
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index db68156568e6..b3b33cf7ddf6 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = {
};
MODULE_DEVICE_TABLE(i2c, isp1301_id);
+static const struct of_device_id isp1301_of_match[] = {
+ {.compatible = "nxp,isp1301" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, isp1301_of_match);
+
static struct i2c_client *isp1301_i2c_client;
static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear)
@@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
static struct i2c_driver isp1301_driver = {
.driver = {
.name = DRV_NAME,
+ .of_match_table = of_match_ptr(isp1301_of_match),
},
.probe = isp1301_probe,
.remove = isp1301_remove,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index ab78111e0968..6537d3ca2797 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1500,7 +1500,7 @@ static int digi_read_oob_callback(struct urb *urb)
return -1;
/* handle each oob command */
- for (i = 0; i < urb->actual_length - 4; i += 4) {
+ for (i = 0; i < urb->actual_length - 3; i += 4) {
opcode = buf[i];
line = buf[i + 1];
status = buf[i + 2];
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index ceaeebaa6f90..a76b95d32157 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1674,6 +1674,12 @@ static void edge_interrupt_callback(struct urb *urb)
function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
port_number, function, data[1]);
+
+ if (port_number >= edge_serial->serial->num_ports) {
+ dev_err(dev, "bad port number %d\n", port_number);
+ goto exit;
+ }
+
port = edge_serial->serial->port[port_number];
edge_port = usb_get_serial_port_data(port);
if (!edge_port) {
@@ -1755,7 +1761,7 @@ static void edge_bulk_in_callback(struct urb *urb)
port_number = edge_port->port->port_number;
- if (edge_port->lsr_event) {
+ if (urb->actual_length > 0 && edge_port->lsr_event) {
edge_port->lsr_event = 0;
dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
__func__, port_number, edge_port->lsr_mask, *data);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index a180b17d2432..dd706953b466 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -31,7 +31,6 @@
#define BT_IGNITIONPRO_ID 0x2000
/* function prototypes */
-static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port);
static void omninet_process_read_urb(struct urb *urb);
static void omninet_write_bulk_callback(struct urb *urb);
static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -60,7 +59,6 @@ static struct usb_serial_driver zyxel_omninet_device = {
.attach = omninet_attach,
.port_probe = omninet_port_probe,
.port_remove = omninet_port_remove,
- .open = omninet_open,
.write = omninet_write,
.write_room = omninet_write_room,
.write_bulk_callback = omninet_write_bulk_callback,
@@ -140,17 +138,6 @@ static int omninet_port_remove(struct usb_serial_port *port)
return 0;
}
-static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
-{
- struct usb_serial *serial = port->serial;
- struct usb_serial_port *wport;
-
- wport = serial->port[1];
- tty_port_tty_set(&wport->port, tty);
-
- return usb_serial_generic_open(tty, port);
-}
-
#define OMNINET_HEADERLEN 4
#define OMNINET_BULKOUTSIZE 64
#define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN)
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 93c6c9b08daa..8a069aa154ed 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -200,6 +200,11 @@ static void safe_process_read_urb(struct urb *urb)
if (!safe)
goto out;
+ if (length < 2) {
+ dev_err(&port->dev, "malformed packet\n");
+ return;
+ }
+
fcs = fcs_compute10(data, length, CRC10_INITFCS);
if (fcs) {
dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 16cc18369111..9129f6cb8230 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2071,6 +2071,20 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/*
+ * Reported by Tobias Jakobi <tjakobi@math.uni-bielefeld.de>
+ * The INIC-3619 bridge is used in the StarTech SLSODDU33B
+ * SATA-USB enclosure for slimline optical drives.
+ *
+ * The quirk enables MakeMKV to properly exchange keys with
+ * an installed BD drive.
+ */
+UNUSUAL_DEV( 0x13fd, 0x3609, 0x0209, 0x0209,
+ "Initio Corporation",
+ "INIC-3619",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Reported by Qinglin Ye <yestyle@gmail.com> */
UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
"Kingston",
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index c77a0751a311..f3bf8f4e2d6c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -36,6 +36,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/highmem.h>
+#include <linux/refcount.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -86,7 +87,7 @@ struct grant_map {
int index;
int count;
int flags;
- atomic_t users;
+ refcount_t users;
struct unmap_notify notify;
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
@@ -166,7 +167,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
add->index = 0;
add->count = count;
- atomic_set(&add->users, 1);
+ refcount_set(&add->users, 1);
return add;
@@ -212,7 +213,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
if (!map)
return;
- if (!atomic_dec_and_test(&map->users))
+ if (!refcount_dec_and_test(&map->users))
return;
atomic_sub(map->count, &pages_mapped);
@@ -400,7 +401,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
struct grant_map *map = vma->vm_private_data;
pr_debug("gntdev_vma_open %p\n", vma);
- atomic_inc(&map->users);
+ refcount_inc(&map->users);
}
static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -1004,7 +1005,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto unlock_out;
}
- atomic_inc(&map->users);
+ refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f8afc6dcc29f..e8cef1ad0fe3 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -681,3 +681,50 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
return 0;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ if (__generic_dma_ops(dev)->mmap)
+ return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
+ dma_addr, size, attrs);
+#endif
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
+
+/*
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ unsigned long attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ if (__generic_dma_ops(dev)->get_sgtable) {
+#if 0
+ /*
+ * This check verifies that the page belongs to the current domain and
+ * is not one mapped from another domain.
+ * This check is for debug only, and should not go to production build
+ */
+ unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
+ BUG_ON (!page_is_ram(bfn));
+#endif
+ return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
+ handle, size, attrs);
+ }
+#endif
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 4d343eed08f5..1f4733b80c87 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -55,7 +55,6 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
-#include <linux/init.h>
#include <xen/xenbus.h>
#include <xen/xen.h>
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index b29447e03ede..25d404d22cae 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work)
{
struct afs_server *server;
struct afs_vnode *vnode, *xvnode;
- time_t now;
+ time64_t now;
long timeout;
int ret;
@@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work)
_enter("");
- now = get_seconds();
+ now = ktime_get_real_seconds();
/* find the first vnode to update */
spin_lock(&server->cb_lock);
@@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work)
/* and then reschedule */
_debug("reschedule");
- vnode->update_at = get_seconds() + afs_vnode_update_timeout;
+ vnode->update_at = ktime_get_real_seconds() +
+ afs_vnode_update_timeout;
spin_lock(&server->cb_lock);
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 2edbdcbf6432..3062cceb5c2a 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -187,7 +187,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
struct afs_callback *cb;
struct afs_server *server;
__be32 *bp;
- u32 tmp;
int ret, loop;
_enter("{%u}", call->unmarshall);
@@ -249,9 +248,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
if (ret < 0)
return ret;
- tmp = ntohl(call->tmp);
- _debug("CB count: %u", tmp);
- if (tmp != call->count && tmp != 0)
+ call->count2 = ntohl(call->tmp);
+ _debug("CB count: %u", call->count2);
+ if (call->count2 != call->count && call->count2 != 0)
return -EBADMSG;
call->offset = 0;
call->unmarshall++;
@@ -259,14 +258,14 @@ static int afs_deliver_cb_callback(struct afs_call *call)
case 4:
_debug("extract CB array");
ret = afs_extract_data(call, call->buffer,
- call->count * 3 * 4, false);
+ call->count2 * 3 * 4, false);
if (ret < 0)
return ret;
_debug("unmarshall CB array");
cb = call->request;
bp = call->buffer;
- for (loop = call->count; loop > 0; loop--, cb++) {
+ for (loop = call->count2; loop > 0; loop--, cb++) {
cb->version = ntohl(*bp++);
cb->expiry = ntohl(*bp++);
cb->type = ntohl(*bp++);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index ba7b71fba34b..0d5b8508869b 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -30,6 +30,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
const struct file_operations afs_file_operations = {
.open = afs_open,
+ .flush = afs_flush,
.release = afs_release,
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
@@ -184,10 +185,13 @@ int afs_page_filler(void *data, struct page *page)
if (!req)
goto enomem;
+ /* We request a full page. If the page is a partial one at the
+ * end of the file, the server will return a short read and the
+ * unmarshalling code will clear the unfilled space.
+ */
atomic_set(&req->usage, 1);
req->pos = (loff_t)page->index << PAGE_SHIFT;
- req->len = min_t(size_t, i_size_read(inode) - req->pos,
- PAGE_SIZE);
+ req->len = PAGE_SIZE;
req->nr_pages = 1;
req->pages[0] = page;
get_page(page);
@@ -208,7 +212,13 @@ int afs_page_filler(void *data, struct page *page)
fscache_uncache_page(vnode->cache, page);
#endif
BUG_ON(PageFsCache(page));
- goto error;
+
+ if (ret == -EINTR ||
+ ret == -ENOMEM ||
+ ret == -ERESTARTSYS ||
+ ret == -EAGAIN)
+ goto error;
+ goto io_error;
}
SetPageUptodate(page);
@@ -227,10 +237,12 @@ int afs_page_filler(void *data, struct page *page)
_leave(" = 0");
return 0;
+io_error:
+ SetPageError(page);
+ goto error;
enomem:
ret = -ENOMEM;
error:
- SetPageError(page);
unlock_page(page);
_leave(" = %d", ret);
return ret;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index ac8e766978dc..19f76ae36982 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -17,6 +17,12 @@
#include "afs_fs.h"
/*
+ * We need somewhere to discard into in case the server helpfully returns more
+ * than we asked for in FS.FetchData{,64}.
+ */
+static u8 afs_discard_buffer[64];
+
+/*
* decode an AFSFid block
*/
static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid)
@@ -105,7 +111,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
vnode->vfs_inode.i_mode = mode;
}
- vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
+ vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_version = data_version;
@@ -139,7 +145,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
vnode->cb_version = ntohl(*bp++);
vnode->cb_expiry = ntohl(*bp++);
vnode->cb_type = ntohl(*bp++);
- vnode->cb_expires = vnode->cb_expiry + get_seconds();
+ vnode->cb_expires = vnode->cb_expiry + ktime_get_real_seconds();
*_bp = bp;
}
@@ -315,7 +321,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
void *buffer;
int ret;
- _enter("{%u,%zu/%u;%u/%llu}",
+ _enter("{%u,%zu/%u;%llu/%llu}",
call->unmarshall, call->offset, call->count,
req->remain, req->actual_len);
@@ -353,12 +359,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
req->actual_len |= ntohl(call->tmp);
_debug("DATA length: %llu", req->actual_len);
- /* Check that the server didn't want to send us extra. We
- * might want to just discard instead, but that requires
- * cooperation from AF_RXRPC.
- */
- if (req->actual_len > req->len)
- return -EBADMSG;
req->remain = req->actual_len;
call->offset = req->pos & (PAGE_SIZE - 1);
@@ -368,6 +368,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->unmarshall++;
begin_page:
+ ASSERTCMP(req->index, <, req->nr_pages);
if (req->remain > PAGE_SIZE - call->offset)
size = PAGE_SIZE - call->offset;
else
@@ -378,7 +379,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
/* extract the returned data */
case 3:
- _debug("extract data %u/%llu %zu/%u",
+ _debug("extract data %llu/%llu %zu/%u",
req->remain, req->actual_len, call->offset, call->count);
buffer = kmap(req->pages[req->index]);
@@ -389,19 +390,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
if (call->offset == PAGE_SIZE) {
if (req->page_done)
req->page_done(call, req);
+ req->index++;
if (req->remain > 0) {
- req->index++;
call->offset = 0;
+ if (req->index >= req->nr_pages) {
+ call->unmarshall = 4;
+ goto begin_discard;
+ }
goto begin_page;
}
}
+ goto no_more_data;
+
+ /* Discard any excess data the server gave us */
+ begin_discard:
+ case 4:
+ size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain);
+ call->count = size;
+ _debug("extract discard %llu/%llu %zu/%u",
+ req->remain, req->actual_len, call->offset, call->count);
+
+ call->offset = 0;
+ ret = afs_extract_data(call, afs_discard_buffer, call->count, true);
+ req->remain -= call->offset;
+ if (ret < 0)
+ return ret;
+ if (req->remain > 0)
+ goto begin_discard;
no_more_data:
call->offset = 0;
- call->unmarshall++;
+ call->unmarshall = 5;
/* extract the metadata */
- case 4:
+ case 5:
ret = afs_extract_data(call, call->buffer,
(21 + 3 + 6) * 4, false);
if (ret < 0)
@@ -416,16 +438,17 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->offset = 0;
call->unmarshall++;
- case 5:
+ case 6:
break;
}
- if (call->count < PAGE_SIZE) {
- buffer = kmap(req->pages[req->index]);
- memset(buffer + call->count, 0, PAGE_SIZE - call->count);
- kunmap(req->pages[req->index]);
+ for (; req->index < req->nr_pages; req->index++) {
+ if (call->count < PAGE_SIZE)
+ zero_user_segment(req->pages[req->index],
+ call->count, PAGE_SIZE);
if (req->page_done)
req->page_done(call, req);
+ call->count = 0;
}
_leave(" = 0 [done]");
@@ -711,8 +734,8 @@ int afs_fs_create(struct afs_server *server,
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
- *bp++ = htonl(AFS_SET_MODE);
- *bp++ = 0; /* mtime */
+ *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(mode & S_IALLUGO); /* unix mode */
@@ -980,8 +1003,8 @@ int afs_fs_symlink(struct afs_server *server,
memset(bp, 0, c_padsz);
bp = (void *) bp + c_padsz;
}
- *bp++ = htonl(AFS_SET_MODE);
- *bp++ = 0; /* mtime */
+ *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(S_IRWXUGO); /* unix mode */
@@ -1180,8 +1203,8 @@ static int afs_fs_store_data64(struct afs_server *server,
*bp++ = htonl(vnode->fid.vnode);
*bp++ = htonl(vnode->fid.unique);
- *bp++ = 0; /* mask */
- *bp++ = 0; /* mtime */
+ *bp++ = htonl(AFS_SET_MTIME); /* mask */
+ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
@@ -1213,7 +1236,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
_enter(",%x,{%x:%u},,",
key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
- size = to - offset;
+ size = (loff_t)to - (loff_t)offset;
if (first != last)
size += (loff_t)(last - first) << PAGE_SHIFT;
pos = (loff_t)first << PAGE_SHIFT;
@@ -1257,8 +1280,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
*bp++ = htonl(vnode->fid.vnode);
*bp++ = htonl(vnode->fid.unique);
- *bp++ = 0; /* mask */
- *bp++ = 0; /* mtime */
+ *bp++ = htonl(AFS_SET_MTIME); /* mask */
+ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 1e4897a048d2..aae55dd15108 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -54,8 +54,21 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
inode->i_fop = &afs_dir_file_operations;
break;
case AFS_FTYPE_SYMLINK:
- inode->i_mode = S_IFLNK | vnode->status.mode;
- inode->i_op = &page_symlink_inode_operations;
+ /* Symlinks with a mode of 0644 are actually mountpoints. */
+ if ((vnode->status.mode & 0777) == 0644) {
+ inode->i_flags |= S_AUTOMOUNT;
+
+ spin_lock(&vnode->lock);
+ set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+ spin_unlock(&vnode->lock);
+
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_op = &afs_mntpt_inode_operations;
+ inode->i_fop = &afs_mntpt_file_operations;
+ } else {
+ inode->i_mode = S_IFLNK | vnode->status.mode;
+ inode->i_op = &page_symlink_inode_operations;
+ }
inode_nohighmem(inode);
break;
default:
@@ -70,27 +83,15 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
set_nlink(inode, vnode->status.nlink);
inode->i_uid = vnode->status.owner;
- inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_gid = vnode->status.group;
inode->i_size = vnode->status.size;
- inode->i_ctime.tv_sec = vnode->status.mtime_server;
+ inode->i_ctime.tv_sec = vnode->status.mtime_client;
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
inode->i_generation = vnode->fid.unique;
inode->i_version = vnode->status.data_version;
inode->i_mapping->a_ops = &afs_fs_aops;
-
- /* check to see whether a symbolic link is really a mountpoint */
- if (vnode->status.type == AFS_FTYPE_SYMLINK) {
- afs_mntpt_check_symlink(vnode, key);
-
- if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) {
- inode->i_mode = S_IFDIR | vnode->status.mode;
- inode->i_op = &afs_mntpt_inode_operations;
- inode->i_fop = &afs_mntpt_file_operations;
- }
- }
-
return 0;
}
@@ -245,12 +246,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
vnode->cb_version = 0;
vnode->cb_expiry = 0;
vnode->cb_type = 0;
- vnode->cb_expires = get_seconds();
+ vnode->cb_expires = ktime_get_real_seconds();
} else {
vnode->cb_version = cb->version;
vnode->cb_expiry = cb->expiry;
vnode->cb_type = cb->type;
- vnode->cb_expires = vnode->cb_expiry + get_seconds();
+ vnode->cb_expires = vnode->cb_expiry +
+ ktime_get_real_seconds();
}
}
@@ -323,7 +325,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
!test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
- if (vnode->cb_expires < get_seconds() + 10) {
+ if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
_debug("callback expired");
set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
} else {
@@ -444,7 +446,7 @@ void afs_evict_inode(struct inode *inode)
mutex_lock(&vnode->permits_lock);
permits = vnode->permits;
- rcu_assign_pointer(vnode->permits, NULL);
+ RCU_INIT_POINTER(vnode->permits, NULL);
mutex_unlock(&vnode->permits_lock);
if (permits)
call_rcu(&permits->rcu, afs_zap_permits);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5dfa56903a2d..a6901360fb81 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -11,6 +11,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/rxrpc.h>
@@ -90,7 +91,10 @@ struct afs_call {
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
unsigned first_offset; /* offset into mapping[first] */
- unsigned last_to; /* amount of mapping[last] */
+ union {
+ unsigned last_to; /* amount of mapping[last] */
+ unsigned count2; /* count used in unmarshalling */
+ };
unsigned char unmarshall; /* unmarshalling phase */
bool incoming; /* T if incoming call */
bool send_pages; /* T if data from mapping should be sent */
@@ -127,12 +131,11 @@ struct afs_call_type {
*/
struct afs_read {
loff_t pos; /* Where to start reading */
- loff_t len; /* How much to read */
+ loff_t len; /* How much we're asking for */
loff_t actual_len; /* How much we're actually getting */
+ loff_t remain; /* Amount remaining */
atomic_t usage;
- unsigned int remain; /* Amount remaining */
unsigned int index; /* Which page we're reading into */
- unsigned int pg_offset; /* Offset in page we're at */
unsigned int nr_pages;
void (*page_done)(struct afs_call *, struct afs_read *);
struct page *pages[];
@@ -247,7 +250,7 @@ struct afs_cache_vhash {
*/
struct afs_vlocation {
atomic_t usage;
- time_t time_of_death; /* time at which put reduced usage to 0 */
+ time64_t time_of_death; /* time at which put reduced usage to 0 */
struct list_head link; /* link in cell volume location list */
struct list_head grave; /* link in master graveyard list */
struct list_head update; /* link in master update list */
@@ -258,7 +261,7 @@ struct afs_vlocation {
struct afs_cache_vlocation vldb; /* volume information DB record */
struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
wait_queue_head_t waitq; /* status change waitqueue */
- time_t update_at; /* time at which record should be updated */
+ time64_t update_at; /* time at which record should be updated */
spinlock_t lock; /* access lock */
afs_vlocation_state_t state; /* volume location state */
unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */
@@ -271,7 +274,7 @@ struct afs_vlocation {
*/
struct afs_server {
atomic_t usage;
- time_t time_of_death; /* time at which put reduced usage to 0 */
+ time64_t time_of_death; /* time at which put reduced usage to 0 */
struct in_addr addr; /* server address */
struct afs_cell *cell; /* cell in which server resides */
struct list_head link; /* link in cell's server list */
@@ -374,8 +377,8 @@ struct afs_vnode {
struct rb_node server_rb; /* link in server->fs_vnodes */
struct rb_node cb_promise; /* link in server->cb_promises */
struct work_struct cb_broken_work; /* work to be done on callback break */
- time_t cb_expires; /* time at which callback expires */
- time_t cb_expires_at; /* time used to order cb_promise */
+ time64_t cb_expires; /* time at which callback expires */
+ time64_t cb_expires_at; /* time used to order cb_promise */
unsigned cb_version; /* callback version */
unsigned cb_expiry; /* callback expiry time */
afs_callback_type_t cb_type; /* type of callback */
@@ -557,7 +560,6 @@ extern const struct inode_operations afs_autocell_inode_operations;
extern const struct file_operations afs_mntpt_file_operations;
extern struct vfsmount *afs_d_automount(struct path *);
-extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
extern void afs_mntpt_kill_timer(void);
/*
@@ -718,6 +720,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
extern int afs_writeback_all(struct afs_vnode *);
+extern int afs_flush(struct file *, fl_owner_t);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 91ea1aa0d8b3..100b207efc9e 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code)
case RXKADDATALEN: return -EKEYREJECTED;
case RXKADILLEGALLEVEL: return -EKEYREJECTED;
+ case RXGEN_OPCODE: return -ENOTSUPP;
+
default: return -EREMOTEIO;
}
}
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index d4fb0afc0097..bd3b65cde282 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -47,59 +47,6 @@ static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
static unsigned long afs_mntpt_expiry_timeout = 10 * 60;
/*
- * check a symbolic link to see whether it actually encodes a mountpoint
- * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
- */
-int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
-{
- struct page *page;
- size_t size;
- char *buf;
- int ret;
-
- _enter("{%x:%u,%u}",
- vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
-
- /* read the contents of the symlink into the pagecache */
- page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
- afs_page_filler, key);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
-
- ret = -EIO;
- if (PageError(page))
- goto out_free;
-
- buf = kmap(page);
-
- /* examine the symlink's contents */
- size = vnode->status.size;
- _debug("symlink to %*.*s", (int) size, (int) size, buf);
-
- if (size > 2 &&
- (buf[0] == '%' || buf[0] == '#') &&
- buf[size - 1] == '.'
- ) {
- _debug("symlink is a mountpoint");
- spin_lock(&vnode->lock);
- set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
- vnode->vfs_inode.i_flags |= S_AUTOMOUNT;
- spin_unlock(&vnode->lock);
- }
-
- ret = 0;
-
- kunmap(page);
-out_free:
- put_page(page);
-out:
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
* no valid lookup procedure on this sort of dir
*/
static struct dentry *afs_mntpt_lookup(struct inode *dir,
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 419ef05dcb5e..8f76b13d5549 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -259,67 +259,74 @@ void afs_flat_call_destructor(struct afs_call *call)
call->buffer = NULL;
}
+#define AFS_BVEC_MAX 8
+
+/*
+ * Load the given bvec with the next few pages.
+ */
+static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
+ struct bio_vec *bv, pgoff_t first, pgoff_t last,
+ unsigned offset)
+{
+ struct page *pages[AFS_BVEC_MAX];
+ unsigned int nr, n, i, to, bytes = 0;
+
+ nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX);
+ n = find_get_pages_contig(call->mapping, first, nr, pages);
+ ASSERTCMP(n, ==, nr);
+
+ msg->msg_flags |= MSG_MORE;
+ for (i = 0; i < nr; i++) {
+ to = PAGE_SIZE;
+ if (first + i >= last) {
+ to = call->last_to;
+ msg->msg_flags &= ~MSG_MORE;
+ }
+ bv[i].bv_page = pages[i];
+ bv[i].bv_len = to - offset;
+ bv[i].bv_offset = offset;
+ bytes += to - offset;
+ offset = 0;
+ }
+
+ iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes);
+}
+
/*
* attach the data from a bunch of pages on an inode to a call
*/
static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
{
- struct page *pages[8];
- unsigned count, n, loop, offset, to;
+ struct bio_vec bv[AFS_BVEC_MAX];
+ unsigned int bytes, nr, loop, offset;
pgoff_t first = call->first, last = call->last;
int ret;
- _enter("");
-
offset = call->first_offset;
call->first_offset = 0;
do {
- _debug("attach %lx-%lx", first, last);
-
- count = last - first + 1;
- if (count > ARRAY_SIZE(pages))
- count = ARRAY_SIZE(pages);
- n = find_get_pages_contig(call->mapping, first, count, pages);
- ASSERTCMP(n, ==, count);
-
- loop = 0;
- do {
- struct bio_vec bvec = {.bv_page = pages[loop],
- .bv_offset = offset};
- msg->msg_flags = 0;
- to = PAGE_SIZE;
- if (first + loop >= last)
- to = call->last_to;
- else
- msg->msg_flags = MSG_MORE;
- bvec.bv_len = to - offset;
- offset = 0;
-
- _debug("- range %u-%u%s",
- offset, to, msg->msg_flags ? " [more]" : "");
- iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC,
- &bvec, 1, to - offset);
-
- /* have to change the state *before* sending the last
- * packet as RxRPC might give us the reply before it
- * returns from sending the request */
- if (first + loop >= last)
- call->state = AFS_CALL_AWAIT_REPLY;
- ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
- msg, to - offset);
- if (ret < 0)
- break;
- } while (++loop < count);
- first += count;
-
- for (loop = 0; loop < count; loop++)
- put_page(pages[loop]);
+ afs_load_bvec(call, msg, bv, first, last, offset);
+ offset = 0;
+ bytes = msg->msg_iter.count;
+ nr = msg->msg_iter.nr_segs;
+
+ /* Have to change the state *before* sending the last
+ * packet as RxRPC might give us the reply before it
+ * returns from sending the request.
+ */
+ if (first + nr - 1 >= last)
+ call->state = AFS_CALL_AWAIT_REPLY;
+ ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
+ msg, bytes);
+ for (loop = 0; loop < nr; loop++)
+ put_page(bv[loop].bv_page);
if (ret < 0)
break;
+
+ first += nr;
} while (first <= last);
- _leave(" = %d", ret);
return ret;
}
@@ -333,6 +340,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
struct rxrpc_call *rxcall;
struct msghdr msg;
struct kvec iov[1];
+ size_t offset;
+ u32 abort_code;
int ret;
_enter("%x,{%d},", addr->s_addr, ntohs(call->port));
@@ -381,9 +390,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
msg.msg_controllen = 0;
msg.msg_flags = (call->send_pages ? MSG_MORE : 0);
- /* have to change the state *before* sending the last packet as RxRPC
- * might give us the reply before it returns from sending the
- * request */
+ /* We have to change the state *before* sending the last packet as
+ * rxrpc might give us the reply before it returns from sending the
+ * request. Further, if the send fails, we may already have been given
+ * a notification and may have collected it.
+ */
if (!call->send_pages)
call->state = AFS_CALL_AWAIT_REPLY;
ret = rxrpc_kernel_send_data(afs_socket, rxcall,
@@ -405,7 +416,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
return afs_wait_for_call_to_complete(call);
error_do_abort:
- rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
+ call->state = AFS_CALL_COMPLETE;
+ if (ret != -ECONNABORTED) {
+ rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT,
+ -ret, "KSD");
+ } else {
+ abort_code = 0;
+ offset = 0;
+ rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset,
+ false, &abort_code);
+ ret = call->type->abort_to_error(abort_code);
+ }
error_kill_call:
afs_put_call(call);
_leave(" = %d", ret);
@@ -452,16 +473,18 @@ static void afs_deliver_to_call(struct afs_call *call)
case -EINPROGRESS:
case -EAGAIN:
goto out;
+ case -ECONNABORTED:
+ goto call_complete;
case -ENOTCONN:
abort_code = RX_CALL_DEAD;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
abort_code, -ret, "KNC");
- goto do_abort;
+ goto save_error;
case -ENOTSUPP:
- abort_code = RX_INVALID_OPERATION;
+ abort_code = RXGEN_OPCODE;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
abort_code, -ret, "KIV");
- goto do_abort;
+ goto save_error;
case -ENODATA:
case -EBADMSG:
case -EMSGSIZE:
@@ -471,7 +494,7 @@ static void afs_deliver_to_call(struct afs_call *call)
abort_code = RXGEN_SS_UNMARSHAL;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
abort_code, EBADMSG, "KUM");
- goto do_abort;
+ goto save_error;
}
}
@@ -482,8 +505,9 @@ out:
_leave("");
return;
-do_abort:
+save_error:
call->error = ret;
+call_complete:
call->state = AFS_CALL_COMPLETE;
goto done;
}
@@ -493,7 +517,6 @@ do_abort:
*/
static int afs_wait_for_call_to_complete(struct afs_call *call)
{
- const char *abort_why;
int ret;
DECLARE_WAITQUEUE(myself, current);
@@ -512,13 +535,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
continue;
}
- abort_why = "KWC";
- ret = call->error;
- if (call->state == AFS_CALL_COMPLETE)
- break;
- abort_why = "KWI";
- ret = -EINTR;
- if (signal_pending(current))
+ if (call->state == AFS_CALL_COMPLETE ||
+ signal_pending(current))
break;
schedule();
}
@@ -526,13 +544,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
remove_wait_queue(&call->waitq, &myself);
__set_current_state(TASK_RUNNING);
- /* kill the call */
+ /* Kill off the call if it's still live. */
if (call->state < AFS_CALL_COMPLETE) {
- _debug("call incomplete");
+ _debug("call interrupted");
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
- RX_CALL_DEAD, -ret, abort_why);
+ RX_USER_ABORT, -EINTR, "KWI");
}
+ ret = call->error;
_debug("call complete");
afs_put_call(call);
_leave(" = %d", ret);
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 8d010422dc89..ecb86a670180 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -114,7 +114,7 @@ void afs_clear_permits(struct afs_vnode *vnode)
mutex_lock(&vnode->permits_lock);
permits = vnode->permits;
- rcu_assign_pointer(vnode->permits, NULL);
+ RCU_INIT_POINTER(vnode->permits, NULL);
mutex_unlock(&vnode->permits_lock);
if (permits)
@@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask)
} else {
if (!(access & AFS_ACE_LOOKUP))
goto permission_denied;
+ if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
+ goto permission_denied;
if (mask & (MAY_EXEC | MAY_READ)) {
if (!(access & AFS_ACE_READ))
goto permission_denied;
+ if (!(inode->i_mode & S_IRUSR))
+ goto permission_denied;
} else if (mask & MAY_WRITE) {
if (!(access & AFS_ACE_WRITE))
goto permission_denied;
+ if (!(inode->i_mode & S_IWUSR))
+ goto permission_denied;
}
}
key_put(key);
- ret = generic_permission(inode, mask);
_leave(" = %d", ret);
return ret;
diff --git a/fs/afs/server.c b/fs/afs/server.c
index d4066ab7dd55..c001b1f2455f 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server)
spin_lock(&afs_server_graveyard_lock);
if (atomic_read(&server->usage) == 0) {
list_move_tail(&server->grave, &afs_server_graveyard);
- server->time_of_death = get_seconds();
+ server->time_of_death = ktime_get_real_seconds();
queue_delayed_work(afs_wq, &afs_server_reaper,
afs_server_timeout * HZ);
}
@@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work)
LIST_HEAD(corpses);
struct afs_server *server;
unsigned long delay, expiry;
- time_t now;
+ time64_t now;
- now = get_seconds();
+ now = ktime_get_real_seconds();
spin_lock(&afs_server_graveyard_lock);
while (!list_empty(&afs_server_graveyard)) {
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index d7d8dd8c0b31..37b7c3b342a6 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
struct afs_vlocation *xvl;
/* wait at least 10 minutes before updating... */
- vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+ vl->update_at = ktime_get_real_seconds() +
+ afs_vlocation_update_timeout;
spin_lock(&afs_vlocation_updates_lock);
@@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl)
if (atomic_read(&vl->usage) == 0) {
_debug("buried");
list_move_tail(&vl->grave, &afs_vlocation_graveyard);
- vl->time_of_death = get_seconds();
+ vl->time_of_death = ktime_get_real_seconds();
queue_delayed_work(afs_wq, &afs_vlocation_reap,
afs_vlocation_timeout * HZ);
@@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work)
LIST_HEAD(corpses);
struct afs_vlocation *vl;
unsigned long delay, expiry;
- time_t now;
+ time64_t now;
_enter("");
- now = get_seconds();
+ now = ktime_get_real_seconds();
spin_lock(&afs_vlocation_graveyard_lock);
while (!list_empty(&afs_vlocation_graveyard)) {
@@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work)
{
struct afs_cache_vlocation vldb;
struct afs_vlocation *vl, *xvl;
- time_t now;
+ time64_t now;
long timeout;
int ret;
_enter("");
- now = get_seconds();
+ now = ktime_get_real_seconds();
/* find a record to update */
spin_lock(&afs_vlocation_updates_lock);
@@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work)
/* and then reschedule */
_debug("reschedule");
- vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+ vl->update_at = ktime_get_real_seconds() +
+ afs_vlocation_update_timeout;
spin_lock(&afs_vlocation_updates_lock);
diff --git a/fs/afs/write.c b/fs/afs/write.c
index c83c1a0e851f..2d2fccd5044b 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -84,10 +84,9 @@ void afs_put_writeback(struct afs_writeback *wb)
* partly or wholly fill a page that's under preparation for writing
*/
static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
- loff_t pos, struct page *page)
+ loff_t pos, unsigned int len, struct page *page)
{
struct afs_read *req;
- loff_t i_size;
int ret;
_enter(",,%llu", (unsigned long long)pos);
@@ -99,14 +98,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
atomic_set(&req->usage, 1);
req->pos = pos;
+ req->len = len;
req->nr_pages = 1;
req->pages[0] = page;
-
- i_size = i_size_read(&vnode->vfs_inode);
- if (pos + PAGE_SIZE > i_size)
- req->len = i_size - pos;
- else
- req->len = PAGE_SIZE;
+ get_page(page);
ret = afs_vnode_fetch_data(vnode, key, req);
afs_put_read(req);
@@ -159,12 +154,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
kfree(candidate);
return -ENOMEM;
}
- *pagep = page;
- /* page won't leak in error case: it eventually gets cleaned off LRU */
if (!PageUptodate(page) && len != PAGE_SIZE) {
- ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
+ ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
if (ret < 0) {
+ unlock_page(page);
+ put_page(page);
kfree(candidate);
_leave(" = %d [prep]", ret);
return ret;
@@ -172,6 +167,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
SetPageUptodate(page);
}
+ /* page won't leak in error case: it eventually gets cleaned off LRU */
+ *pagep = page;
+
try_again:
spin_lock(&vnode->writeback_lock);
@@ -233,7 +231,7 @@ flush_conflicting_wb:
if (wb->state == AFS_WBACK_PENDING)
wb->state = AFS_WBACK_CONFLICTING;
spin_unlock(&vnode->writeback_lock);
- if (PageDirty(page)) {
+ if (clear_page_dirty_for_io(page)) {
ret = afs_write_back_from_locked_page(wb, page);
if (ret < 0) {
afs_put_writeback(candidate);
@@ -257,7 +255,9 @@ int afs_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct key *key = file->private_data;
loff_t i_size, maybe_i_size;
+ int ret;
_enter("{%x:%u},{%lx}",
vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -273,6 +273,20 @@ int afs_write_end(struct file *file, struct address_space *mapping,
spin_unlock(&vnode->writeback_lock);
}
+ if (!PageUptodate(page)) {
+ if (copied < len) {
+ /* Try and load any missing data from the server. The
+ * unmarshalling routine will take care of clearing any
+ * bits that are beyond the EOF.
+ */
+ ret = afs_fill_page(vnode, key, pos + copied,
+ len - copied, page);
+ if (ret < 0)
+ return ret;
+ }
+ SetPageUptodate(page);
+ }
+
set_page_dirty(page);
if (PageDirty(page))
_debug("dirtied");
@@ -307,10 +321,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
ASSERTCMP(pv.nr, ==, count);
for (loop = 0; loop < count; loop++) {
- ClearPageUptodate(pv.pages[loop]);
+ struct page *page = pv.pages[loop];
+ ClearPageUptodate(page);
if (error)
- SetPageError(pv.pages[loop]);
- end_page_writeback(pv.pages[loop]);
+ SetPageError(page);
+ if (PageWriteback(page))
+ end_page_writeback(page);
+ if (page->index >= first)
+ first = page->index + 1;
}
__pagevec_release(&pv);
@@ -335,8 +353,6 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb,
_enter(",%lx", primary_page->index);
count = 1;
- if (!clear_page_dirty_for_io(primary_page))
- BUG();
if (test_set_page_writeback(primary_page))
BUG();
@@ -502,17 +518,17 @@ static int afs_writepages_region(struct address_space *mapping,
*/
lock_page(page);
- if (page->mapping != mapping) {
+ if (page->mapping != mapping || !PageDirty(page)) {
unlock_page(page);
put_page(page);
continue;
}
- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
-
- if (PageWriteback(page) || !PageDirty(page)) {
+ if (PageWriteback(page)) {
unlock_page(page);
+ if (wbc->sync_mode != WB_SYNC_NONE)
+ wait_on_page_writeback(page);
+ put_page(page);
continue;
}
@@ -523,6 +539,8 @@ static int afs_writepages_region(struct address_space *mapping,
wb->state = AFS_WBACK_WRITING;
spin_unlock(&wb->vnode->writeback_lock);
+ if (!clear_page_dirty_for_io(page))
+ BUG();
ret = afs_write_back_from_locked_page(wb, page);
unlock_page(page);
put_page(page);
@@ -746,6 +764,20 @@ out:
}
/*
+ * Flush out all outstanding writes on a file opened for writing when it is
+ * closed.
+ */
+int afs_flush(struct file *file, fl_owner_t id)
+{
+ _enter("");
+
+ if ((file->f_mode & FMODE_WRITE) == 0)
+ return 0;
+
+ return vfs_fsync(file, 0);
+}
+
+/*
* notification that a previously read-only page is about to become writable
* - if it returns an error, the caller will deliver a bus error signal
*/
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 7d398d300e97..9382db998ec9 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -743,7 +743,7 @@ static int tcp_accept_from_sock(struct connection *con)
newsock->type = con->sock->type;
newsock->ops = con->sock->ops;
- result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
+ result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK, true);
if (result < 0)
goto accept_err;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 338d2f73eb29..a2c05f2ada6d 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1359,6 +1359,16 @@ out:
return 0;
}
+static void fat_dummy_inode_init(struct inode *inode)
+{
+ /* Initialize this dummy inode to work as no-op. */
+ MSDOS_I(inode)->mmu_private = 0;
+ MSDOS_I(inode)->i_start = 0;
+ MSDOS_I(inode)->i_logstart = 0;
+ MSDOS_I(inode)->i_attrs = 0;
+ MSDOS_I(inode)->i_pos = 0;
+}
+
static int fat_read_root(struct inode *inode)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
@@ -1803,12 +1813,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
fat_inode = new_inode(sb);
if (!fat_inode)
goto out_fail;
- MSDOS_I(fat_inode)->i_pos = 0;
+ fat_dummy_inode_init(fat_inode);
sbi->fat_inode = fat_inode;
fsinfo_inode = new_inode(sb);
if (!fsinfo_inode)
goto out_fail;
+ fat_dummy_inode_init(fsinfo_inode);
fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
sbi->fsinfo_inode = fsinfo_inode;
insert_inode_hash(fsinfo_inode);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ef600591d96f..63ee2940775c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -173,19 +173,33 @@ static void wb_wakeup(struct bdi_writeback *wb)
spin_unlock_bh(&wb->work_lock);
}
+static void finish_writeback_work(struct bdi_writeback *wb,
+ struct wb_writeback_work *work)
+{
+ struct wb_completion *done = work->done;
+
+ if (work->auto_free)
+ kfree(work);
+ if (done && atomic_dec_and_test(&done->cnt))
+ wake_up_all(&wb->bdi->wb_waitq);
+}
+
static void wb_queue_work(struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
trace_writeback_queue(wb, work);
- spin_lock_bh(&wb->work_lock);
- if (!test_bit(WB_registered, &wb->state))
- goto out_unlock;
if (work->done)
atomic_inc(&work->done->cnt);
- list_add_tail(&work->list, &wb->work_list);
- mod_delayed_work(bdi_wq, &wb->dwork, 0);
-out_unlock:
+
+ spin_lock_bh(&wb->work_lock);
+
+ if (test_bit(WB_registered, &wb->state)) {
+ list_add_tail(&work->list, &wb->work_list);
+ mod_delayed_work(bdi_wq, &wb->dwork, 0);
+ } else
+ finish_writeback_work(wb, work);
+
spin_unlock_bh(&wb->work_lock);
}
@@ -1873,16 +1887,9 @@ static long wb_do_writeback(struct bdi_writeback *wb)
set_bit(WB_writeback_running, &wb->state);
while ((work = get_next_work_item(wb)) != NULL) {
- struct wb_completion *done = work->done;
-
trace_writeback_exec(wb, work);
-
wrote += wb_writeback(wb, work);
-
- if (work->auto_free)
- kfree(work);
- if (done && atomic_dec_and_test(&done->cnt))
- wake_up_all(&wb->bdi->wb_waitq);
+ finish_writeback_work(wb, work);
}
/*
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index c45084ac642d..511e1ed7e2de 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -207,7 +207,7 @@ struct lm_lockname {
struct gfs2_sbd *ln_sbd;
u64 ln_number;
unsigned int ln_type;
-};
+} __packed __aligned(sizeof(int));
#define lm_name_equal(name1, name2) \
(((name1)->ln_number == (name2)->ln_number) && \
diff --git a/fs/iomap.c b/fs/iomap.c
index 3ca1a8e44135..141c3cd55a8b 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -846,7 +846,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = file_inode(iocb->ki_filp);
size_t count = iov_iter_count(iter);
- loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
+ loff_t pos = iocb->ki_pos, start = pos;
+ loff_t end = iocb->ki_pos + count - 1, ret = 0;
unsigned int flags = IOMAP_DIRECT;
struct blk_plug plug;
struct iomap_dio *dio;
@@ -887,12 +888,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
}
if (mapping->nrpages) {
- ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
+ ret = filemap_write_and_wait_range(mapping, start, end);
if (ret)
goto out_free_dio;
ret = invalidate_inode_pages2_range(mapping,
- iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT);
WARN_ON_ONCE(ret);
ret = 0;
}
@@ -941,6 +942,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
__set_current_state(TASK_RUNNING);
}
+ ret = iomap_dio_complete(dio);
+
/*
* Try again to invalidate clean pages which might have been cached by
* non-direct readahead, or faulted in by get_user_pages() if the source
@@ -949,12 +952,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
* this invalidation fails, tough, the write still worked...
*/
if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
- ret = invalidate_inode_pages2_range(mapping,
- iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
- WARN_ON_ONCE(ret);
+ int err = invalidate_inode_pages2_range(mapping,
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT);
+ WARN_ON_ONCE(err);
}
- return iomap_dio_complete(dio);
+ return ret;
out_free_dio:
kfree(dio);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index bb79972dc638..773774531aff 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -232,12 +232,12 @@ static struct svc_serv_ops nfs41_cb_sv_ops = {
.svo_module = THIS_MODULE,
};
-struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
[0] = &nfs40_cb_sv_ops,
[1] = &nfs41_cb_sv_ops,
};
#else
-struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
[0] = &nfs40_cb_sv_ops,
[1] = NULL,
};
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 91a8d610ba0f..390ada8741bc 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -325,10 +325,33 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
return NULL;
}
-static bool nfs_client_init_is_complete(const struct nfs_client *clp)
+/*
+ * Return true if @clp is done initializing, false if still working on it.
+ *
+ * Use nfs_client_init_status to check if it was successful.
+ */
+bool nfs_client_init_is_complete(const struct nfs_client *clp)
{
return clp->cl_cons_state <= NFS_CS_READY;
}
+EXPORT_SYMBOL_GPL(nfs_client_init_is_complete);
+
+/*
+ * Return 0 if @clp was successfully initialized, -errno otherwise.
+ *
+ * This must be called *after* nfs_client_init_is_complete() returns true,
+ * otherwise it will pop WARN_ON_ONCE and return -EINVAL
+ */
+int nfs_client_init_status(const struct nfs_client *clp)
+{
+ /* called without checking nfs_client_init_is_complete */
+ if (clp->cl_cons_state > NFS_CS_READY) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ return clp->cl_cons_state;
+}
+EXPORT_SYMBOL_GPL(nfs_client_init_status);
int nfs_wait_client_init_complete(const struct nfs_client *clp)
{
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index f956ca20a8a3..d913e818858f 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -266,6 +266,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
struct nfs4_pnfs_ds *ret = ds;
struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
+ int status;
if (ds == NULL) {
printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
@@ -277,9 +278,14 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
if (ds->ds_clp)
goto out_test_devid;
- nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+ status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
dataserver_retrans, 4,
s->nfs_client->cl_minorversion);
+ if (status) {
+ nfs4_mark_deviceid_unavailable(devid);
+ ret = NULL;
+ goto out;
+ }
out_test_devid:
if (ret->ds_clp == NULL ||
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index f4f39b0ab09b..98b34c9b0564 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -175,7 +175,19 @@ ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg)
static inline bool
ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node)
{
- return nfs4_test_deviceid_unavailable(node);
+ /*
+ * Flexfiles should never mark a DS unavailable, but if it does
+ * print a (ratelimited) warning as this can affect performance.
+ */
+ if (nfs4_test_deviceid_unavailable(node)) {
+ u32 *p = (u32 *)node->deviceid.data;
+
+ pr_warn_ratelimited("NFS: flexfiles layout referencing an "
+ "unavailable device [%x%x%x%x]\n",
+ p[0], p[1], p[2], p[3]);
+ return true;
+ }
+ return false;
}
static inline int
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index e5a6f248697b..85fde93dff77 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -384,6 +384,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
struct inode *ino = lseg->pls_layout->plh_inode;
struct nfs_server *s = NFS_SERVER(ino);
unsigned int max_payload;
+ int status;
if (!ff_layout_mirror_valid(lseg, mirror, true)) {
pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
@@ -404,7 +405,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
/* FIXME: For now we assume the server sent only one version of NFS
* to use for the DS.
*/
- nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+ status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
dataserver_retrans,
mirror->mirror_ds->ds_versions[0].version,
mirror->mirror_ds->ds_versions[0].minor_version);
@@ -420,11 +421,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
mirror->mirror_ds->ds_versions[0].wsize = max_payload;
goto out;
}
+out_fail:
ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
mirror, lseg->pls_range.offset,
lseg->pls_range.length, NFS4ERR_NXIO,
OP_ILLEGAL, GFP_NOIO);
-out_fail:
if (fail_return || !ff_layout_has_available_ds(lseg))
pnfs_error_mark_layout_for_return(ino, lseg);
ds = NULL;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 09ca5095c04e..7b38fedb7e03 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -186,6 +186,8 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
struct nfs_fattr *,
rpc_authflavor_t);
+extern bool nfs_client_init_is_complete(const struct nfs_client *clp);
+extern int nfs_client_init_status(const struct nfs_client *clp);
extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 5ae9d64ea08b..8346ccbf2d52 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1023,9 +1023,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
- if (server->rsize > server_resp_sz)
+ if (!server->rsize || server->rsize > server_resp_sz)
server->rsize = server_resp_sz;
- if (server->wsize > server_rqst_sz)
+ if (!server->wsize || server->wsize > server_rqst_sz)
server->wsize = server_rqst_sz;
#endif /* CONFIG_NFS_V4_1 */
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1b183686c6d4..c780d98035cc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2258,8 +2258,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
return 0;
- /* even though OPEN succeeded, access is denied. Close the file */
- nfs4_close_state(state, fmode);
return -EACCES;
}
@@ -7427,11 +7425,11 @@ static void nfs4_exchange_id_release(void *data)
struct nfs41_exchange_id_data *cdata =
(struct nfs41_exchange_id_data *)data;
- nfs_put_client(cdata->args.client);
if (cdata->xprt) {
xprt_put(cdata->xprt);
rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
}
+ nfs_put_client(cdata->args.client);
kfree(cdata->res.impl_id);
kfree(cdata->res.server_scope);
kfree(cdata->res.server_owner);
@@ -7538,10 +7536,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task)) {
- status = PTR_ERR(task);
- goto out_impl_id;
- }
+ if (IS_ERR(task))
+ return PTR_ERR(task);
if (!xprt) {
status = rpc_wait_for_completion_task(task);
@@ -7569,6 +7565,7 @@ out_server_owner:
kfree(calldata->res.server_owner);
out_calldata:
kfree(calldata);
+ nfs_put_client(clp);
goto out;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index f0369e362753..80ce289eea05 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3942,7 +3942,7 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
if (len <= 0)
goto out;
dprintk("%s: name=%s\n", __func__, group_name->data);
- return NFS_ATTR_FATTR_OWNER_NAME;
+ return NFS_ATTR_FATTR_GROUP_NAME;
} else {
len = xdr_stream_decode_opaque_inline(xdr, (void **)&p,
XDR_MAX_NETOBJ);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 63f77b49a586..590e1e35781f 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -367,7 +367,7 @@ void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds);
struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
gfp_t gfp_flags);
void nfs4_pnfs_v3_ds_connect_unload(void);
-void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
struct nfs4_deviceid_node *devid, unsigned int timeo,
unsigned int retrans, u32 version, u32 minor_version);
struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 9414b492439f..7250b95549ec 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -745,15 +745,17 @@ out:
/*
* Create an rpc connection to the nfs4_pnfs_ds data server.
* Currently only supports IPv4 and IPv6 addresses.
- * If connection fails, make devid unavailable.
+ * If connection fails, make devid unavailable and return a -errno.
*/
-void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
struct nfs4_deviceid_node *devid, unsigned int timeo,
unsigned int retrans, u32 version, u32 minor_version)
{
- if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
- int err = 0;
+ int err;
+again:
+ err = 0;
+ if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
if (version == 3) {
err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
retrans);
@@ -766,12 +768,29 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
err = -EPROTONOSUPPORT;
}
- if (err)
- nfs4_mark_deviceid_unavailable(devid);
nfs4_clear_ds_conn_bit(ds);
} else {
nfs4_wait_ds_connect(ds);
+
+ /* what was waited on didn't connect AND didn't mark unavail */
+ if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid))
+ goto again;
}
+
+ /*
+ * At this point the ds->ds_clp should be ready, but it might have
+ * hit an error.
+ */
+ if (!err) {
+ if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
+ WARN_ON_ONCE(ds->ds_clp ||
+ !nfs4_test_deviceid_unavailable(devid));
+ return -EINVAL;
+ }
+ err = nfs_client_init_status(ds->ds_clp);
+ }
+
+ return err;
}
EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e75b056f46f4..abb2c8a3be42 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1784,7 +1784,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
(long long)req_offset(req));
if (status < 0) {
nfs_context_set_write_error(req->wb_context, status);
- nfs_inode_remove_request(req);
+ if (req->wb_page)
+ nfs_inode_remove_request(req);
dprintk_cont(", error = %d\n", status);
goto next;
}
@@ -1793,7 +1794,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
* returned by the server against all stored verfs. */
if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
/* We have a match */
- nfs_inode_remove_request(req);
+ if (req->wb_page)
+ nfs_inode_remove_request(req);
dprintk_cont(" OK\n");
goto next;
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 4348027384f5..d0ab7e56d0b4 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1863,7 +1863,7 @@ static int o2net_accept_one(struct socket *sock, int *more)
new_sock->type = sock->type;
new_sock->ops = sock->ops;
- ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
+ ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, false);
if (ret < 0)
goto out;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 1953986ee6bc..6e610a205e15 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/cred.h>
#include <linux/xattr.h>
-#include <linux/sched/signal.h>
#include "overlayfs.h"
#include "ovl_entry.h"
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 384fa759a563..c543cdb5f8ed 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -400,9 +400,9 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
clockid != CLOCK_BOOTTIME_ALARM))
return -EINVAL;
- if (!capable(CAP_WAKE_ALARM) &&
- (clockid == CLOCK_REALTIME_ALARM ||
- clockid == CLOCK_BOOTTIME_ALARM))
+ if ((clockid == CLOCK_REALTIME_ALARM ||
+ clockid == CLOCK_BOOTTIME_ALARM) &&
+ !capable(CAP_WAKE_ALARM))
return -EPERM;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -449,7 +449,7 @@ static int do_timerfd_settime(int ufd, int flags,
return ret;
ctx = f.file->private_data;
- if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) {
+ if (isalarm(ctx) && !capable(CAP_WAKE_ALARM)) {
fdput(f);
return -EPERM;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 973607df579d..1d227b0fcf49 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -138,8 +138,6 @@ out:
* userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
* context.
* @ctx: [in] Pointer to the userfaultfd context.
- *
- * Returns: In case of success, returns not zero.
*/
static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
{
@@ -267,6 +265,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
{
struct mm_struct *mm = ctx->mm;
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd, _pmd;
pte_t *pte;
@@ -277,7 +276,10 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out;
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (!p4d_present(*p4d))
+ goto out;
+ pud = pud_offset(p4d, address);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, address);
@@ -490,7 +492,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
* in such case.
*/
down_read(&mm->mmap_sem);
- ret = 0;
+ ret = VM_FAULT_NOPAGE;
}
}
@@ -527,10 +529,11 @@ out:
return ret;
}
-static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
- struct userfaultfd_wait_queue *ewq)
+static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
+ struct userfaultfd_wait_queue *ewq)
{
- int ret = 0;
+ if (WARN_ON_ONCE(current->flags & PF_EXITING))
+ goto out;
ewq->ctx = ctx;
init_waitqueue_entry(&ewq->wq, current);
@@ -547,8 +550,16 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
break;
if (ACCESS_ONCE(ctx->released) ||
fatal_signal_pending(current)) {
- ret = -1;
__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
+ if (ewq->msg.event == UFFD_EVENT_FORK) {
+ struct userfaultfd_ctx *new;
+
+ new = (struct userfaultfd_ctx *)
+ (unsigned long)
+ ewq->msg.arg.reserved.reserved1;
+
+ userfaultfd_ctx_put(new);
+ }
break;
}
@@ -566,9 +577,8 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
* ctx may go away after this if the userfault pseudo fd is
* already released.
*/
-
+out:
userfaultfd_ctx_put(ctx);
- return ret;
}
static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
@@ -626,7 +636,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
return 0;
}
-static int dup_fctx(struct userfaultfd_fork_ctx *fctx)
+static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
{
struct userfaultfd_ctx *ctx = fctx->orig;
struct userfaultfd_wait_queue ewq;
@@ -636,17 +646,15 @@ static int dup_fctx(struct userfaultfd_fork_ctx *fctx)
ewq.msg.event = UFFD_EVENT_FORK;
ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
- return userfaultfd_event_wait_completion(ctx, &ewq);
+ userfaultfd_event_wait_completion(ctx, &ewq);
}
void dup_userfaultfd_complete(struct list_head *fcs)
{
- int ret = 0;
struct userfaultfd_fork_ctx *fctx, *n;
list_for_each_entry_safe(fctx, n, fcs, list) {
- if (!ret)
- ret = dup_fctx(fctx);
+ dup_fctx(fctx);
list_del(&fctx->list);
kfree(fctx);
}
@@ -689,8 +697,7 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
userfaultfd_event_wait_completion(ctx, &ewq);
}
-void userfaultfd_remove(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
+bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
@@ -699,13 +706,11 @@ void userfaultfd_remove(struct vm_area_struct *vma,
ctx = vma->vm_userfaultfd_ctx.ctx;
if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
- return;
+ return true;
userfaultfd_ctx_get(ctx);
up_read(&mm->mmap_sem);
- *prev = NULL; /* We wait for ACK w/o the mmap semaphore */
-
msg_init(&ewq.msg);
ewq.msg.event = UFFD_EVENT_REMOVE;
@@ -714,7 +719,7 @@ void userfaultfd_remove(struct vm_area_struct *vma,
userfaultfd_event_wait_completion(ctx, &ewq);
- down_read(&mm->mmap_sem);
+ return false;
}
static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
@@ -775,34 +780,6 @@ void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
}
}
-void userfaultfd_exit(struct mm_struct *mm)
-{
- struct vm_area_struct *vma = mm->mmap;
-
- /*
- * We can do the vma walk without locking because the caller
- * (exit_mm) knows it now has exclusive access
- */
- while (vma) {
- struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
-
- if (ctx && (ctx->features & UFFD_FEATURE_EVENT_EXIT)) {
- struct userfaultfd_wait_queue ewq;
-
- userfaultfd_ctx_get(ctx);
-
- msg_init(&ewq.msg);
- ewq.msg.event = UFFD_EVENT_EXIT;
-
- userfaultfd_event_wait_completion(ctx, &ewq);
-
- ctx->features &= ~UFFD_FEATURE_EVENT_EXIT;
- }
-
- vma = vma->vm_next;
- }
-}
-
static int userfaultfd_release(struct inode *inode, struct file *file)
{
struct userfaultfd_ctx *ctx = file->private_data;
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 2dfdc62f795e..70a5b55e0870 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -25,24 +25,6 @@
#include "kmem.h"
#include "xfs_message.h"
-/*
- * Greedy allocation. May fail and may return vmalloced memory.
- */
-void *
-kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
-{
- void *ptr;
- size_t kmsize = maxsize;
-
- while (!(ptr = vzalloc(kmsize))) {
- if ((kmsize >>= 1) <= minsize)
- kmsize = minsize;
- }
- if (ptr)
- *size = kmsize;
- return ptr;
-}
-
void *
kmem_alloc(size_t size, xfs_km_flags_t flags)
{
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 689f746224e7..f0fc84fcaac2 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -69,8 +69,6 @@ static inline void kmem_free(const void *ptr)
}
-extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
-
static inline void *
kmem_zalloc(size_t size, xfs_km_flags_t flags)
{
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index a9c66d47757a..9bd104f32908 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -763,8 +763,8 @@ xfs_bmap_extents_to_btree(
args.type = XFS_ALLOCTYPE_START_BNO;
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
} else if (dfops->dop_low) {
-try_another_ag:
args.type = XFS_ALLOCTYPE_START_BNO;
+try_another_ag:
args.fsbno = *firstblock;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -790,13 +790,17 @@ try_another_ag:
if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
args.fsbno == NULLFSBLOCK &&
args.type == XFS_ALLOCTYPE_NEAR_BNO) {
- dfops->dop_low = true;
+ args.type = XFS_ALLOCTYPE_FIRST_AG;
goto try_another_ag;
}
+ if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
+ xfs_iroot_realloc(ip, -1, whichfork);
+ xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ return -ENOSPC;
+ }
/*
* Allocation can't fail, the space was reserved.
*/
- ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(*firstblock == NULLFSBLOCK ||
args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
*firstblock = cur->bc_private.b.firstblock = args.fsbno;
@@ -4150,6 +4154,19 @@ xfs_bmapi_read(
return 0;
}
+/*
+ * Add a delayed allocation extent to an inode. Blocks are reserved from the
+ * global pool and the extent inserted into the inode in-core extent tree.
+ *
+ * On entry, got refers to the first extent beyond the offset of the extent to
+ * allocate or eof is specified if no such extent exists. On return, got refers
+ * to the extent record that was inserted to the inode fork.
+ *
+ * Note that the allocated extent may have been merged with contiguous extents
+ * during insertion into the inode fork. Thus, got does not reflect the current
+ * state of the inode fork on return. If necessary, the caller can use lastx to
+ * look up the updated record in the inode fork.
+ */
int
xfs_bmapi_reserve_delalloc(
struct xfs_inode *ip,
@@ -4236,13 +4253,8 @@ xfs_bmapi_reserve_delalloc(
got->br_startblock = nullstartblock(indlen);
got->br_blockcount = alen;
got->br_state = XFS_EXT_NORM;
- xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
- /*
- * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
- * might have merged it into one of the neighbouring ones.
- */
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
+ xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
/*
* Tag the inode if blocks were preallocated. Note that COW fork
@@ -4254,10 +4266,6 @@ xfs_bmapi_reserve_delalloc(
if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
xfs_inode_set_cowblocks_tag(ip);
- ASSERT(got->br_startoff <= aoff);
- ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
- ASSERT(isnullstartblock(got->br_startblock));
- ASSERT(got->br_state == XFS_EXT_NORM);
return 0;
out_unreserve_blocks:
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index f93072b58a58..fd55db479385 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -447,8 +447,8 @@ xfs_bmbt_alloc_block(
if (args.fsbno == NULLFSBLOCK) {
args.fsbno = be64_to_cpu(start->l);
-try_another_ag:
args.type = XFS_ALLOCTYPE_START_BNO;
+try_another_ag:
/*
* Make sure there is sufficient room left in the AG to
* complete a full tree split for an extent insert. If
@@ -488,8 +488,8 @@ try_another_ag:
if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
args.fsbno == NULLFSBLOCK &&
args.type == XFS_ALLOCTYPE_NEAR_BNO) {
- cur->bc_private.b.dfops->dop_low = true;
args.fsbno = cur->bc_private.b.firstblock;
+ args.type = XFS_ALLOCTYPE_FIRST_AG;
goto try_another_ag;
}
@@ -506,7 +506,7 @@ try_another_ag:
goto error0;
cur->bc_private.b.dfops->dop_low = true;
}
- if (args.fsbno == NULLFSBLOCK) {
+ if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index d04547fcf274..eb00bc133bca 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -125,6 +125,8 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
+extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp,
+ int size);
/* xfs_dir2_readdir.c */
extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index c6809ff41197..96b45cd6c63f 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -629,6 +629,93 @@ xfs_dir2_sf_check(
}
#endif /* DEBUG */
+/* Verify the consistency of an inline directory. */
+int
+xfs_dir2_sf_verify(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *sfp,
+ int size)
+{
+ struct xfs_dir2_sf_entry *sfep;
+ struct xfs_dir2_sf_entry *next_sfep;
+ char *endp;
+ const struct xfs_dir_ops *dops;
+ xfs_ino_t ino;
+ int i;
+ int i8count;
+ int offset;
+ __uint8_t filetype;
+
+ dops = xfs_dir_get_ops(mp, NULL);
+
+ /*
+ * Give up if the directory is way too short.
+ */
+ XFS_WANT_CORRUPTED_RETURN(mp, size >
+ offsetof(struct xfs_dir2_sf_hdr, parent));
+ XFS_WANT_CORRUPTED_RETURN(mp, size >=
+ xfs_dir2_sf_hdr_size(sfp->i8count));
+
+ endp = (char *)sfp + size;
+
+ /* Check .. entry */
+ ino = dops->sf_get_parent_ino(sfp);
+ i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
+ XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+ offset = dops->data_first_offset;
+
+ /* Check all reported entries */
+ sfep = xfs_dir2_sf_firstentry(sfp);
+ for (i = 0; i < sfp->count; i++) {
+ /*
+ * struct xfs_dir2_sf_entry has a variable length.
+ * Check the fixed-offset parts of the structure are
+ * within the data buffer.
+ */
+ XFS_WANT_CORRUPTED_RETURN(mp,
+ ((char *)sfep + sizeof(*sfep)) < endp);
+
+ /* Don't allow names with known bad length. */
+ XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0);
+ XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN);
+
+ /*
+ * Check that the variable-length part of the structure is
+ * within the data buffer. The next entry starts after the
+ * name component, so nextentry is an acceptable test.
+ */
+ next_sfep = dops->sf_nextentry(sfp, sfep);
+ XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep);
+
+ /* Check that the offsets always increase. */
+ XFS_WANT_CORRUPTED_RETURN(mp,
+ xfs_dir2_sf_get_offset(sfep) >= offset);
+
+ /* Check the inode number. */
+ ino = dops->sf_get_ino(sfp, sfep);
+ i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
+ XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+
+ /* Check the file type. */
+ filetype = dops->sf_get_ftype(sfep);
+ XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX);
+
+ offset = xfs_dir2_sf_get_offset(sfep) +
+ dops->data_entsize(sfep->namelen);
+
+ sfep = next_sfep;
+ }
+ XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count);
+ XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp);
+
+ /* Make sure this whole thing ought to be in local format. */
+ XFS_WANT_CORRUPTED_RETURN(mp, offset +
+ (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+ (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize);
+
+ return 0;
+}
+
/*
* Create a new (shortform) directory.
*/
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 25c1e078aef6..9653e964eda4 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -33,6 +33,8 @@
#include "xfs_trace.h"
#include "xfs_attr_sf.h"
#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_priv.h"
kmem_zone_t *xfs_ifork_zone;
@@ -320,6 +322,7 @@ xfs_iformat_local(
int whichfork,
int size)
{
+ int error;
/*
* If the size is unreasonable, then something
@@ -336,6 +339,14 @@ xfs_iformat_local(
return -EFSCORRUPTED;
}
+ if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
+ error = xfs_dir2_sf_verify(ip->i_mount,
+ (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
+ size);
+ if (error)
+ return error;
+ }
+
xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
return 0;
}
@@ -856,7 +867,7 @@ xfs_iextents_copy(
* In these cases, the format always takes precedence, because the
* format indicates the current state of the fork.
*/
-void
+int
xfs_iflush_fork(
xfs_inode_t *ip,
xfs_dinode_t *dip,
@@ -866,6 +877,7 @@ xfs_iflush_fork(
char *cp;
xfs_ifork_t *ifp;
xfs_mount_t *mp;
+ int error;
static const short brootflag[2] =
{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
static const short dataflag[2] =
@@ -874,7 +886,7 @@ xfs_iflush_fork(
{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
if (!iip)
- return;
+ return 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
/*
* This can happen if we gave up in iformat in an error path,
@@ -882,12 +894,19 @@ xfs_iflush_fork(
*/
if (!ifp) {
ASSERT(whichfork == XFS_ATTR_FORK);
- return;
+ return 0;
}
cp = XFS_DFORK_PTR(dip, whichfork);
mp = ip->i_mount;
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
case XFS_DINODE_FMT_LOCAL:
+ if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
+ error = xfs_dir2_sf_verify(mp,
+ (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
+ ifp->if_bytes);
+ if (error)
+ return error;
+ }
if ((iip->ili_fields & dataflag[whichfork]) &&
(ifp->if_bytes > 0)) {
ASSERT(ifp->if_u1.if_data != NULL);
@@ -940,6 +959,7 @@ xfs_iflush_fork(
ASSERT(0);
break;
}
+ return 0;
}
/*
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 7fb8365326d1..132dc59fdde6 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -140,7 +140,7 @@ typedef struct xfs_ifork {
struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
-void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
+int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
struct xfs_inode_log_item *, int);
void xfs_idestroy_fork(struct xfs_inode *, int);
void xfs_idata_realloc(struct xfs_inode *, int, int);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index bf65a9ea8642..61494295d92f 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -274,54 +274,49 @@ xfs_end_io(
struct xfs_ioend *ioend =
container_of(work, struct xfs_ioend, io_work);
struct xfs_inode *ip = XFS_I(ioend->io_inode);
+ xfs_off_t offset = ioend->io_offset;
+ size_t size = ioend->io_size;
int error = ioend->io_bio->bi_error;
/*
- * Set an error if the mount has shut down and proceed with end I/O
- * processing so it can perform whatever cleanups are necessary.
+ * Just clean up the in-memory strutures if the fs has been shut down.
*/
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
error = -EIO;
+ goto done;
+ }
/*
- * For a CoW extent, we need to move the mapping from the CoW fork
- * to the data fork. If instead an error happened, just dump the
- * new blocks.
+ * Clean up any COW blocks on an I/O error.
*/
- if (ioend->io_type == XFS_IO_COW) {
- if (error)
- goto done;
- if (ioend->io_bio->bi_error) {
- error = xfs_reflink_cancel_cow_range(ip,
- ioend->io_offset, ioend->io_size);
- goto done;
+ if (unlikely(error)) {
+ switch (ioend->io_type) {
+ case XFS_IO_COW:
+ xfs_reflink_cancel_cow_range(ip, offset, size, true);
+ break;
}
- error = xfs_reflink_end_cow(ip, ioend->io_offset,
- ioend->io_size);
- if (error)
- goto done;
+
+ goto done;
}
/*
- * For unwritten extents we need to issue transactions to convert a
- * range to normal written extens after the data I/O has finished.
- * Detecting and handling completion IO errors is done individually
- * for each case as different cleanup operations need to be performed
- * on error.
+ * Success: commit the COW or unwritten blocks if needed.
*/
- if (ioend->io_type == XFS_IO_UNWRITTEN) {
- if (error)
- goto done;
- error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
- ioend->io_size);
- } else if (ioend->io_append_trans) {
- error = xfs_setfilesize_ioend(ioend, error);
- } else {
- ASSERT(!xfs_ioend_is_append(ioend) ||
- ioend->io_type == XFS_IO_COW);
+ switch (ioend->io_type) {
+ case XFS_IO_COW:
+ error = xfs_reflink_end_cow(ip, offset, size);
+ break;
+ case XFS_IO_UNWRITTEN:
+ error = xfs_iomap_write_unwritten(ip, offset, size);
+ break;
+ default:
+ ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
+ break;
}
done:
+ if (ioend->io_append_trans)
+ error = xfs_setfilesize_ioend(ioend, error);
xfs_destroy_ioend(ioend, error);
}
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 003a99b83bd8..ad9396e516f6 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -71,22 +71,11 @@ xfs_dir2_sf_getdents(
struct xfs_da_geometry *geo = args->geo;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Give up if the directory is way too short.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
-
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
- if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
- return -EFSCORRUPTED;
-
/*
* If the block number in the offset is out of range, we're done.
*/
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 7234b9748c36..3531f8f72fa5 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1608,7 +1608,7 @@ xfs_inode_free_cowblocks(
xfs_ilock(ip, XFS_IOLOCK_EXCL);
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
- ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+ ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index edfa6a55b064..c7fe2c2123ab 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1615,7 +1615,7 @@ xfs_itruncate_extents(
/* Remove all pending CoW reservations. */
error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
- last_block);
+ last_block, true);
if (error)
goto out;
@@ -3475,6 +3475,7 @@ xfs_iflush_int(
struct xfs_inode_log_item *iip = ip->i_itemp;
struct xfs_dinode *dip;
struct xfs_mount *mp = ip->i_mount;
+ int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(xfs_isiflocked(ip));
@@ -3557,9 +3558,14 @@ xfs_iflush_int(
if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
ip->i_d.di_flushiter = 0;
- xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
- if (XFS_IFORK_Q(ip))
- xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
+ error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
+ if (error)
+ return error;
+ if (XFS_IFORK_Q(ip)) {
+ error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
+ if (error)
+ return error;
+ }
xfs_inobp_check(mp, bp);
/*
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 41662fb14e87..288ee5b840d7 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -630,6 +630,11 @@ retry:
goto out_unlock;
}
+ /*
+ * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
+ * them out if the write happens to fail.
+ */
+ iomap->flags = IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
done:
if (isnullstartblock(got.br_startblock))
@@ -1071,16 +1076,22 @@ xfs_file_iomap_end_delalloc(
struct xfs_inode *ip,
loff_t offset,
loff_t length,
- ssize_t written)
+ ssize_t written,
+ struct iomap *iomap)
{
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t start_fsb;
xfs_fileoff_t end_fsb;
int error = 0;
- /* behave as if the write failed if drop writes is enabled */
- if (xfs_mp_drop_writes(mp))
+ /*
+ * Behave as if the write failed if drop writes is enabled. Set the NEW
+ * flag to force delalloc cleanup.
+ */
+ if (xfs_mp_drop_writes(mp)) {
+ iomap->flags |= IOMAP_F_NEW;
written = 0;
+ }
/*
* start_fsb refers to the first unused block after a short write. If
@@ -1094,14 +1105,14 @@ xfs_file_iomap_end_delalloc(
end_fsb = XFS_B_TO_FSB(mp, offset + length);
/*
- * Trim back delalloc blocks if we didn't manage to write the whole
- * range reserved.
+ * Trim delalloc blocks if they were allocated by this write and we
+ * didn't manage to write the whole range.
*
* We don't need to care about racing delalloc as we hold i_mutex
* across the reserve/allocate/unreserve calls. If there are delalloc
* blocks in the range, they are ours.
*/
- if (start_fsb < end_fsb) {
+ if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
XFS_FSB_TO_B(mp, end_fsb) - 1);
@@ -1131,7 +1142,7 @@ xfs_file_iomap_end(
{
if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
- length, written);
+ length, written, iomap);
return 0;
}
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 66e881790c17..2a6d9b1558e0 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -361,7 +361,6 @@ xfs_bulkstat(
xfs_agino_t agino; /* inode # in allocation group */
xfs_agnumber_t agno; /* allocation group number */
xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
- size_t irbsize; /* size of irec buffer in bytes */
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
int nirbuf; /* size of irbuf */
int ubcount; /* size of user's buffer */
@@ -388,11 +387,10 @@ xfs_bulkstat(
*ubcountp = 0;
*done = 0;
- irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
+ irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
if (!irbuf)
return -ENOMEM;
-
- nirbuf = irbsize / sizeof(*irbuf);
+ nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
/*
* Loop over the allocation groups, starting from the last
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 450bde68bb75..688ebff1f663 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -513,8 +513,7 @@ STATIC void
xfs_set_inoalignment(xfs_mount_t *mp)
{
if (xfs_sb_version_hasalign(&mp->m_sb) &&
- mp->m_sb.sb_inoalignmt >=
- XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
+ mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
else
mp->m_inoalign_mask = 0;
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index da6d08fb359c..4a84c5ea266d 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -548,14 +548,18 @@ xfs_reflink_trim_irec_to_next_cow(
}
/*
- * Cancel all pending CoW reservations for some block range of an inode.
+ * Cancel CoW reservations for some block range of an inode.
+ *
+ * If cancel_real is true this function cancels all COW fork extents for the
+ * inode; if cancel_real is false, real extents are not cleared.
*/
int
xfs_reflink_cancel_cow_blocks(
struct xfs_inode *ip,
struct xfs_trans **tpp,
xfs_fileoff_t offset_fsb,
- xfs_fileoff_t end_fsb)
+ xfs_fileoff_t end_fsb,
+ bool cancel_real)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got, del;
@@ -579,7 +583,7 @@ xfs_reflink_cancel_cow_blocks(
&idx, &got, &del);
if (error)
break;
- } else {
+ } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
xfs_trans_ijoin(*tpp, ip, 0);
xfs_defer_init(&dfops, &firstfsb);
@@ -621,13 +625,17 @@ xfs_reflink_cancel_cow_blocks(
}
/*
- * Cancel all pending CoW reservations for some byte range of an inode.
+ * Cancel CoW reservations for some byte range of an inode.
+ *
+ * If cancel_real is true this function cancels all COW fork extents for the
+ * inode; if cancel_real is false, real extents are not cleared.
*/
int
xfs_reflink_cancel_cow_range(
struct xfs_inode *ip,
xfs_off_t offset,
- xfs_off_t count)
+ xfs_off_t count,
+ bool cancel_real)
{
struct xfs_trans *tp;
xfs_fileoff_t offset_fsb;
@@ -653,7 +661,8 @@ xfs_reflink_cancel_cow_range(
xfs_trans_ijoin(tp, ip, 0);
/* Scrape out the old CoW reservations */
- error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb);
+ error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
+ cancel_real);
if (error)
goto out_cancel;
@@ -1450,7 +1459,7 @@ next:
* We didn't find any shared blocks so turn off the reflink flag.
* First, get rid of any leftover CoW mappings.
*/
- error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF);
+ error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
if (error)
return error;
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 33ac9b8db683..d29a7967f029 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -39,9 +39,9 @@ extern void xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
- xfs_fileoff_t end_fsb);
+ xfs_fileoff_t end_fsb, bool cancel_real);
extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
- xfs_off_t count);
+ xfs_off_t count, bool cancel_real);
extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count);
extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 890862f2447c..685c042a120f 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -953,7 +953,7 @@ xfs_fs_destroy_inode(
XFS_STATS_INC(ip->i_mount, vn_remove);
if (xfs_is_reflink_inode(ip)) {
- error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+ error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount))
xfs_warn(ip->i_mount,
"Error %d while evicting CoW blocks for inode %llu.",
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 5bdab6bffd23..928fd66b1271 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -15,7 +15,6 @@
((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
NULL: pmd_offset(pud, address))
-#define pud_alloc(mm, pgd, address) (pgd)
#define pud_offset(pgd, start) (pgd)
#define pud_none(pud) 0
#define pud_bad(pud) 0
@@ -35,4 +34,6 @@
#undef pud_addr_end
#define pud_addr_end(addr, end) (end)
+#include <asm-generic/5level-fixup.h>
+
#endif
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
new file mode 100644
index 000000000000..b5ca82dc4175
--- /dev/null
+++ b/include/asm-generic/5level-fixup.h
@@ -0,0 +1,41 @@
+#ifndef _5LEVEL_FIXUP_H
+#define _5LEVEL_FIXUP_H
+
+#define __ARCH_HAS_5LEVEL_HACK
+#define __PAGETABLE_P4D_FOLDED
+
+#define P4D_SHIFT PGDIR_SHIFT
+#define P4D_SIZE PGDIR_SIZE
+#define P4D_MASK PGDIR_MASK
+#define PTRS_PER_P4D 1
+
+#define p4d_t pgd_t
+
+#define pud_alloc(mm, p4d, address) \
+ ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \
+ NULL : pud_offset(p4d, address))
+
+#define p4d_alloc(mm, pgd, address) (pgd)
+#define p4d_offset(pgd, start) (pgd)
+#define p4d_none(p4d) 0
+#define p4d_bad(p4d) 0
+#define p4d_present(p4d) 1
+#define p4d_ERROR(p4d) do { } while (0)
+#define p4d_clear(p4d) pgd_clear(p4d)
+#define p4d_val(p4d) pgd_val(p4d)
+#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
+#define p4d_page(p4d) pgd_page(p4d)
+#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
+
+#define __p4d(x) __pgd(x)
+#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d)
+
+#undef p4d_free_tlb
+#define p4d_free_tlb(tlb, x, addr) do { } while (0)
+#define p4d_free(mm, x) do { } while (0)
+#define __p4d_free_tlb(tlb, x, addr) do { } while (0)
+
+#undef p4d_addr_end
+#define p4d_addr_end(addr, end) (end)
+
+#endif
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
new file mode 100644
index 000000000000..752fb7511750
--- /dev/null
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -0,0 +1,62 @@
+#ifndef _PGTABLE_NOP4D_HACK_H
+#define _PGTABLE_NOP4D_HACK_H
+
+#ifndef __ASSEMBLY__
+#include <asm-generic/5level-fixup.h>
+
+#define __PAGETABLE_PUD_FOLDED
+
+/*
+ * Having the pud type consist of a pgd gets the size right, and allows
+ * us to conceptually access the pgd entry that this pud is folded into
+ * without casting.
+ */
+typedef struct { pgd_t pgd; } pud_t;
+
+#define PUD_SHIFT PGDIR_SHIFT
+#define PTRS_PER_PUD 1
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgd) { }
+#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
+
+#define pgd_populate(mm, pgd, pud) do { } while (0)
+/*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+ return (pud_t *)pgd;
+}
+
+#define pud_val(x) (pgd_val((x).pgd))
+#define __pud(x) ((pud_t) { __pgd(x) })
+
+#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
+#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
+
+/*
+ * allocating and freeing a pud is trivial: the 1-entry pud is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define pud_alloc_one(mm, address) NULL
+#define pud_free(mm, x) do { } while (0)
+#define __pud_free_tlb(tlb, x, a) do { } while (0)
+
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOP4D_HACK_H */
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
new file mode 100644
index 000000000000..de364ecb8df6
--- /dev/null
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -0,0 +1,56 @@
+#ifndef _PGTABLE_NOP4D_H
+#define _PGTABLE_NOP4D_H
+
+#ifndef __ASSEMBLY__
+
+#define __PAGETABLE_P4D_FOLDED
+
+typedef struct { pgd_t pgd; } p4d_t;
+
+#define P4D_SHIFT PGDIR_SHIFT
+#define PTRS_PER_P4D 1
+#define P4D_SIZE (1UL << P4D_SHIFT)
+#define P4D_MASK (~(P4D_SIZE-1))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the p4d is never bad, and a p4d always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgd) { }
+#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd))
+
+#define pgd_populate(mm, pgd, p4d) do { } while (0)
+/*
+ * (p4ds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval })
+
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+ return (p4d_t *)pgd;
+}
+
+#define p4d_val(x) (pgd_val((x).pgd))
+#define __p4d(x) ((p4d_t) { __pgd(x) })
+
+#define pgd_page(pgd) (p4d_page((p4d_t){ pgd }))
+#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd }))
+
+/*
+ * allocating and freeing a p4d is trivial: the 1-entry p4d is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define p4d_alloc_one(mm, address) NULL
+#define p4d_free(mm, x) do { } while (0)
+#define __p4d_free_tlb(tlb, x, a) do { } while (0)
+
+#undef p4d_addr_end
+#define p4d_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOP4D_H */
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index 810431d8351b..c2b9b96d6268 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -3,52 +3,57 @@
#ifndef __ASSEMBLY__
+#ifdef __ARCH_USE_5LEVEL_HACK
+#include <asm-generic/pgtable-nop4d-hack.h>
+#else
+#include <asm-generic/pgtable-nop4d.h>
+
#define __PAGETABLE_PUD_FOLDED
/*
- * Having the pud type consist of a pgd gets the size right, and allows
- * us to conceptually access the pgd entry that this pud is folded into
+ * Having the pud type consist of a p4d gets the size right, and allows
+ * us to conceptually access the p4d entry that this pud is folded into
* without casting.
*/
-typedef struct { pgd_t pgd; } pud_t;
+typedef struct { p4d_t p4d; } pud_t;
-#define PUD_SHIFT PGDIR_SHIFT
+#define PUD_SHIFT P4D_SHIFT
#define PTRS_PER_PUD 1
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * The "p4d_xxx()" functions here are trivial for a folded two-level
* setup: the pud is never bad, and a pud always exists (as it's folded
- * into the pgd entry)
+ * into the p4d entry)
*/
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pgd_present(pgd_t pgd) { return 1; }
-static inline void pgd_clear(pgd_t *pgd) { }
-#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
+static inline int p4d_none(p4d_t p4d) { return 0; }
+static inline int p4d_bad(p4d_t p4d) { return 0; }
+static inline int p4d_present(p4d_t p4d) { return 1; }
+static inline void p4d_clear(p4d_t *p4d) { }
+#define pud_ERROR(pud) (p4d_ERROR((pud).p4d))
-#define pgd_populate(mm, pgd, pud) do { } while (0)
+#define p4d_populate(mm, p4d, pud) do { } while (0)
/*
- * (puds are folded into pgds so this doesn't get actually called,
+ * (puds are folded into p4ds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
-#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
+#define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval })
-static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
- return (pud_t *)pgd;
+ return (pud_t *)p4d;
}
-#define pud_val(x) (pgd_val((x).pgd))
-#define __pud(x) ((pud_t) { __pgd(x) } )
+#define pud_val(x) (p4d_val((x).p4d))
+#define __pud(x) ((pud_t) { __p4d(x) })
-#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
-#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
+#define p4d_page(p4d) (pud_page((pud_t){ p4d }))
+#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d }))
/*
* allocating and freeing a pud is trivial: the 1-entry pud is
- * inside the pgd, so has no extra memory associated with it.
+ * inside the p4d, so has no extra memory associated with it.
*/
#define pud_alloc_one(mm, address) NULL
#define pud_free(mm, x) do { } while (0)
@@ -58,4 +63,5 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
#define pud_addr_end(addr, end) (end)
#endif /* __ASSEMBLY__ */
+#endif /* !__ARCH_USE_5LEVEL_HACK */
#endif /* _PGTABLE_NOPUD_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f4ca23b158b3..1fad160f35de 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -10,9 +10,9 @@
#include <linux/bug.h>
#include <linux/errno.h>
-#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \
- CONFIG_PGTABLE_LEVELS
-#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED
+#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
+ defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
+#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
#endif
/*
@@ -424,6 +424,13 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
+#ifndef p4d_addr_end
+#define p4d_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
#ifndef pud_addr_end
#define pud_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
@@ -444,6 +451,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
* Do the tests inline, but report and clear the bad entry in mm/memory.c.
*/
void pgd_clear_bad(pgd_t *);
+void p4d_clear_bad(p4d_t *);
void pud_clear_bad(pud_t *);
void pmd_clear_bad(pmd_t *);
@@ -458,6 +466,17 @@ static inline int pgd_none_or_clear_bad(pgd_t *pgd)
return 0;
}
+static inline int p4d_none_or_clear_bad(p4d_t *p4d)
+{
+ if (p4d_none(*p4d))
+ return 1;
+ if (unlikely(p4d_bad(*p4d))) {
+ p4d_clear_bad(p4d);
+ return 1;
+ }
+ return 0;
+}
+
static inline int pud_none_or_clear_bad(pud_t *pud)
{
if (pud_none(*pud))
@@ -844,11 +863,30 @@ static inline int pmd_protnone(pmd_t pmd)
#endif /* CONFIG_MMU */
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#ifndef __PAGETABLE_P4D_FOLDED
+int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
+int p4d_clear_huge(p4d_t *p4d);
+#else
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int p4d_clear_huge(p4d_t *p4d)
+{
+ return 0;
+}
+#endif /* !__PAGETABLE_P4D_FOLDED */
+
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
return 0;
@@ -857,6 +895,10 @@ static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
+static inline int p4d_clear_huge(p4d_t *p4d)
+{
+ return 0;
+}
static inline int pud_clear_huge(pud_t *pud)
{
return 0;
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 4329bc6ef04b..8afa4335e5b2 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -270,6 +270,12 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
__pte_free_tlb(tlb, ptep, address); \
} while (0)
+#define pmd_free_tlb(tlb, pmdp, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __pmd_free_tlb(tlb, pmdp, address); \
+ } while (0)
+
#ifndef __ARCH_HAS_4LEVEL_HACK
#define pud_free_tlb(tlb, pudp, address) \
do { \
@@ -278,11 +284,13 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
} while (0)
#endif
-#define pmd_free_tlb(tlb, pmdp, address) \
+#ifndef __ARCH_HAS_5LEVEL_HACK
+#define p4d_free_tlb(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
- __pmd_free_tlb(tlb, pmdp, address); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __p4d_free_tlb(tlb, pudp, address); \
} while (0)
+#endif
#define tlb_migrate_finish(mm) do {} while (0)
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index a2bfd7843f18..e2b9c6fe2714 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -73,7 +73,7 @@ int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
void af_alg_release_parent(struct sock *sk);
-int af_alg_accept(struct sock *sk, struct socket *newsock);
+int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
diff --git a/include/dt-bindings/sound/cs42l42.h b/include/dt-bindings/sound/cs42l42.h
index 399a123aed58..db69d84ed7d1 100644
--- a/include/dt-bindings/sound/cs42l42.h
+++ b/include/dt-bindings/sound/cs42l42.h
@@ -20,7 +20,7 @@
#define CS42L42_HPOUT_LOAD_1NF 0
#define CS42L42_HPOUT_LOAD_10NF 1
-/* HPOUT Clamp to GND Overide */
+/* HPOUT Clamp to GND Override */
#define CS42L42_HPOUT_CLAMP_EN 0
#define CS42L42_HPOUT_CLAMP_DIS 1
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 673acda012af..9b05886f9773 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -287,18 +287,15 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
}
/* Validate the processor object's proc_id */
-bool acpi_processor_validate_proc_id(int proc_id);
+bool acpi_duplicate_processor_id(int proc_id);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
int *pcpu);
int acpi_unmap_cpu(int cpu);
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-void acpi_set_processor_mapping(void);
-
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 796016e63c1d..5a7da607ca04 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -435,7 +435,6 @@ struct request_queue {
struct delayed_work delay_work;
struct backing_dev_info *backing_dev_info;
- struct disk_devt *disk_devt;
/*
* The queue owner gets to use this for whatever they like.
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 1816c5e26581..88cd5dc8e238 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -48,6 +48,7 @@ struct ceph_options {
unsigned long mount_timeout; /* jiffies */
unsigned long osd_idle_ttl; /* jiffies */
unsigned long osd_keepalive_timeout; /* jiffies */
+ unsigned long osd_request_timeout; /* jiffies */
/*
* any type that can't be simply compared or doesn't need need
@@ -68,6 +69,7 @@ struct ceph_options {
#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
+#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */
#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000)
#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000)
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 2ea0c282f3dc..c125b5d9e13c 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -189,6 +189,7 @@ struct ceph_osd_request {
/* internal */
unsigned long r_stamp; /* jiffies, send or check time */
+ unsigned long r_start_stamp; /* jiffies */
int r_attempts;
struct ceph_eversion r_replay_version; /* aka reassert_version */
u32 r_last_force_resend;
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 61d042bbbf60..68449293c4b6 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -163,6 +163,7 @@ struct dccp_request_sock {
__u64 dreq_isr;
__u64 dreq_gsr;
__be32 dreq_service;
+ spinlock_t dreq_lock;
struct list_head dreq_featneg;
__u32 dreq_timestamp_echo;
__u32 dreq_timestamp_time;
diff --git a/include/linux/device.h b/include/linux/device.h
index 30c4570e928d..9ef518af5515 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1140,7 +1140,6 @@ static inline bool device_supports_offline(struct device *dev)
extern void lock_device_hotplug(void);
extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
-void assert_held_device_hotplug(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0c167fdee5f7..fbf7b39e8103 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -409,6 +409,7 @@ struct bpf_prog {
u16 pages; /* Number of allocated pages */
kmemcheck_bitfield_begin(meta);
u16 jited:1, /* Is our filter JIT'ed? */
+ locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1, /* Do we need dst entry? */
@@ -554,22 +555,29 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
- set_memory_ro((unsigned long)fp, fp->pages);
+ fp->locked = 1;
+ WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
}
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
- set_memory_rw((unsigned long)fp, fp->pages);
+ if (fp->locked) {
+ WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
+ /* In case set_memory_rw() fails, we want to be the first
+ * to crash here instead of some random place later on.
+ */
+ fp->locked = 0;
+ }
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
- set_memory_ro((unsigned long)hdr, hdr->pages);
+ WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
}
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{
- set_memory_rw((unsigned long)hdr, hdr->pages);
+ WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
}
#else
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index aad3fd0ff5f8..7251f7bb45e8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2678,7 +2678,7 @@ static const char * const kernel_read_file_str[] = {
static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
{
- if (id < 0 || id >= READING_MAX_ID)
+ if ((unsigned)id >= READING_MAX_ID)
return kernel_read_file_str[READING_UNKNOWN];
return kernel_read_file_str[id];
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index a999d281a2f1..76f39754e7b0 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -167,13 +167,6 @@ struct blk_integrity {
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-struct disk_devt {
- atomic_t count;
- void (*release)(struct disk_devt *disk_devt);
-};
-
-void put_disk_devt(struct disk_devt *disk_devt);
-void get_disk_devt(struct disk_devt *disk_devt);
struct gendisk {
/* major, first_minor and minors are input parameters only,
@@ -183,7 +176,6 @@ struct gendisk {
int first_minor;
int minors; /* maximum number of minors, =1 for
* disks that can't be partitioned. */
- struct disk_devt *disk_devt;
char disk_name[DISK_NAME_LEN]; /* name of major driver */
char *(*devnode)(struct gendisk *gd, umode_t *mode);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 503099d8aada..b857fc8cc2ec 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -122,7 +122,7 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags);
int pmd_huge(pmd_t pmd);
-int pud_huge(pud_t pmd);
+int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
@@ -197,6 +197,9 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
#ifndef pgd_huge
#define pgd_huge(x) 0
#endif
+#ifndef p4d_huge
+#define p4d_huge(x) 0
+#endif
#ifndef pgd_write
static inline int pgd_write(pgd_t pgd)
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 672cfef72fc8..97cbca19430d 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -373,6 +373,8 @@
#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT)
#define ICC_IGRPEN1_EL1_SHIFT 0
#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT)
+#define ICC_SRE_EL1_DIB (1U << 2)
+#define ICC_SRE_EL1_DFB (1U << 1)
#define ICC_SRE_EL1_SRE (1U << 0)
/*
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 188eced6813e..9f3616085423 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -524,6 +524,10 @@ static inline struct irq_domain *irq_find_matching_fwnode(
{
return NULL;
}
+static inline bool irq_domain_check_msi_remap(void)
+{
+ return false;
+}
#endif /* !CONFIG_IRQ_DOMAIN */
#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 8e06d758ee48..2afd74b9d844 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -90,6 +90,13 @@ extern bool static_key_initialized;
struct static_key {
atomic_t enabled;
/*
+ * Note:
+ * To make anonymous unions work with old compilers, the static
+ * initialization of them requires brackets. This creates a dependency
+ * on the order of the struct with the initializers. If any fields
+ * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
+ * to be modified.
+ *
* bit 0 => 1 if key is initially true
* 0 if initially false
* bit 1 => 1 if points to struct static_key_mod
@@ -166,10 +173,10 @@ extern void static_key_disable(struct static_key *key);
*/
#define STATIC_KEY_INIT_TRUE \
{ .enabled = { 1 }, \
- .entries = (void *)JUMP_TYPE_TRUE }
+ { .entries = (void *)JUMP_TYPE_TRUE } }
#define STATIC_KEY_INIT_FALSE \
{ .enabled = { 0 }, \
- .entries = (void *)JUMP_TYPE_FALSE }
+ { .entries = (void *)JUMP_TYPE_FALSE } }
#else /* !HAVE_JUMP_LABEL */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index ceb3fe78a0d3..5734480c9590 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -6,6 +6,7 @@
struct kmem_cache;
struct page;
struct vm_struct;
+struct task_struct;
#ifdef CONFIG_KASAN
@@ -18,6 +19,7 @@ extern unsigned char kasan_zero_page[PAGE_SIZE];
extern pte_t kasan_zero_pte[PTRS_PER_PTE];
extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
extern pud_t kasan_zero_pud[PTRS_PER_PUD];
+extern p4d_t kasan_zero_p4d[PTRS_PER_P4D];
void kasan_populate_zero_shadow(const void *shadow_start,
const void *shadow_end);
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index b01fe1009084..87ff4f58a2f0 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -29,6 +29,11 @@ struct hlist_nulls_node {
((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_nulls_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \
+ })
/**
* ptr_is_a_nulls - Test if a ptr is a nulls
* @ptr: ptr to be tested
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0d65dd72c0f4..5f01c88f0800 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1560,14 +1560,24 @@ static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
return ptep;
}
+#ifdef __PAGETABLE_P4D_FOLDED
+static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long address)
+{
+ return 0;
+}
+#else
+int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+#endif
+
#ifdef __PAGETABLE_PUD_FOLDED
-static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
unsigned long address)
{
return 0;
}
#else
-int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
#endif
#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
@@ -1619,11 +1629,22 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
* Remove it when 4level-fixup.h has been removed.
*/
#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
-static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+
+#ifndef __ARCH_HAS_5LEVEL_HACK
+static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long address)
+{
+ return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
+ NULL : p4d_offset(pgd, address);
+}
+
+static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
+ unsigned long address)
{
- return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
- NULL: pud_offset(pgd, address);
+ return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
+ NULL : pud_offset(p4d, address);
}
+#endif /* !__ARCH_HAS_5LEVEL_HACK */
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
@@ -2385,7 +2406,8 @@ void sparse_mem_maps_populate_node(struct page **map_map,
struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
-pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
+p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
+pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
void *vmemmap_alloc_block(unsigned long size, int node);
diff --git a/include/linux/net.h b/include/linux/net.h
index cd0c8bd0a1de..0620f5e18c96 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -146,7 +146,7 @@ struct proto_ops {
int (*socketpair)(struct socket *sock1,
struct socket *sock2);
int (*accept) (struct socket *sock,
- struct socket *newsock, int flags);
+ struct socket *newsock, int flags, bool kern);
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int *sockaddr_len, int peer);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 772476028a65..43a774873aa9 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -837,6 +837,10 @@ int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
int genphy_soft_reset(struct phy_device *phydev);
+static inline int genphy_no_soft_reset(struct phy_device *phydev)
+{
+ return 0;
+}
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h
new file mode 100644
index 000000000000..d60d4e278609
--- /dev/null
+++ b/include/linux/purgatory.h
@@ -0,0 +1,23 @@
+#ifndef _LINUX_PURGATORY_H
+#define _LINUX_PURGATORY_H
+
+#include <linux/types.h>
+#include <crypto/sha.h>
+#include <uapi/linux/kexec.h>
+
+struct kexec_sha_region {
+ unsigned long start;
+ unsigned long len;
+};
+
+/*
+ * These forward declarations serve two purposes:
+ *
+ * 1) Make sparse happy when checking arch/purgatory
+ * 2) Document that these are required to be global so the symbol
+ * lookup in kexec works
+ */
+extern struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX];
+extern u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE];
+
+#endif
diff --git a/include/linux/random.h b/include/linux/random.h
index 7bd2403e4fef..ed5c3838780d 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -37,14 +37,26 @@ extern void get_random_bytes(void *buf, int nbytes);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
-extern int random_int_secret_init(void);
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
#endif
-unsigned int get_random_int(void);
-unsigned long get_random_long(void);
+u32 get_random_u32(void);
+u64 get_random_u64(void);
+static inline unsigned int get_random_int(void)
+{
+ return get_random_u32();
+}
+static inline unsigned long get_random_long(void)
+{
+#if BITS_PER_LONG == 64
+ return get_random_u64();
+#else
+ return get_random_u32();
+#endif
+}
+
unsigned long randomize_page(unsigned long start, unsigned long range);
u32 prandom_u32(void);
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 4ae95f7e8597..a23a33153180 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -156,5 +156,19 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
+/**
+ * hlist_nulls_for_each_entry_safe -
+ * iterate over list of given type safe against removal of list entry
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_nulls_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_nulls_node within the struct.
+ */
+#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \
+ for (({barrier();}), \
+ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \
+ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
#endif
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index ad3e5158e586..c9f795e9a2ee 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -65,7 +65,7 @@ struct regulator_state {
int uV; /* suspend voltage */
unsigned int mode; /* suspend regulator operating mode */
int enabled; /* is regulator enabled in this suspend state */
- int disabled; /* is the regulator disbled in this suspend state */
+ int disabled; /* is the regulator disabled in this suspend state */
};
/**
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index be765234c0a2..32354b4b4b2b 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -72,7 +72,7 @@ struct ucounts {
struct hlist_node node;
struct user_namespace *ns;
kuid_t uid;
- atomic_t count;
+ int count;
atomic_t ucount[UCOUNT_COUNTS];
};
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 0468548acebf..48a3483dccb1 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -61,8 +61,7 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
unsigned long from, unsigned long to,
unsigned long len);
-extern void userfaultfd_remove(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
+extern bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
@@ -72,8 +71,6 @@ extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
extern void userfaultfd_unmap_complete(struct mm_struct *mm,
struct list_head *uf);
-extern void userfaultfd_exit(struct mm_struct *mm);
-
#else /* CONFIG_USERFAULTFD */
/* mm helpers */
@@ -120,11 +117,11 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
{
}
-static inline void userfaultfd_remove(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
+static inline bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
+ return true;
}
static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
@@ -139,10 +136,6 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
{
}
-static inline void userfaultfd_exit(struct mm_struct *mm)
-{
-}
-
#endif /* CONFIG_USERFAULTFD */
#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 6aa1b6cb5828..a80b7b59cf33 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -79,6 +79,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_SPLIT_PAGE_FAILED,
THP_DEFERRED_SPLIT_PAGE,
THP_SPLIT_PMD,
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ THP_SPLIT_PUD,
+#endif
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
#endif
diff --git a/include/linux/wait.h b/include/linux/wait.h
index aacb1282d19a..db076ca7f11d 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -620,30 +620,19 @@ do { \
__ret; \
})
+extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *);
+extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
-#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
+#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
({ \
- int __ret = 0; \
+ int __ret; \
DEFINE_WAIT(__wait); \
if (exclusive) \
__wait.flags |= WQ_FLAG_EXCLUSIVE; \
do { \
- if (likely(list_empty(&__wait.task_list))) \
- __add_wait_queue_tail(&(wq), &__wait); \
- set_current_state(TASK_INTERRUPTIBLE); \
- if (signal_pending(current)) { \
- __ret = -ERESTARTSYS; \
+ __ret = fn(&(wq), &__wait); \
+ if (__ret) \
break; \
- } \
- if (irq) \
- spin_unlock_irq(&(wq).lock); \
- else \
- spin_unlock(&(wq).lock); \
- schedule(); \
- if (irq) \
- spin_lock_irq(&(wq).lock); \
- else \
- spin_lock(&(wq).lock); \
} while (!(condition)); \
__remove_wait_queue(&(wq), &__wait); \
__set_current_state(TASK_RUNNING); \
@@ -676,7 +665,7 @@ do { \
*/
#define wait_event_interruptible_locked(wq, condition) \
((condition) \
- ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
/**
* wait_event_interruptible_locked_irq - sleep until a condition gets true
@@ -703,7 +692,7 @@ do { \
*/
#define wait_event_interruptible_locked_irq(wq, condition) \
((condition) \
- ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
/**
* wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
@@ -734,7 +723,7 @@ do { \
*/
#define wait_event_interruptible_exclusive_locked(wq, condition) \
((condition) \
- ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
/**
* wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
@@ -765,7 +754,7 @@ do { \
*/
#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
((condition) \
- ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
#define __wait_event_killable(wq, condition) \
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 458b400373d4..38aac554dbba 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -20,8 +20,17 @@ struct device;
int vsp1_du_init(struct device *dev);
-int vsp1_du_setup_lif(struct device *dev, unsigned int width,
- unsigned int height);
+/**
+ * struct vsp1_du_lif_config - VSP LIF configuration
+ * @width: output frame width
+ * @height: output frame height
+ */
+struct vsp1_du_lif_config {
+ unsigned int width;
+ unsigned int height;
+};
+
+int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg);
struct vsp1_du_atomic_config {
u32 pixelformat;
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index b7952d55b9c0..f39ae697347f 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -20,7 +20,8 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags, int is_sendmsg);
int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
-int inet_accept(struct socket *sock, struct socket *newsock, int flags);
+int inet_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern);
int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 826f198374f8..c7a577976bec 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -258,7 +258,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
return (unsigned long)min_t(u64, when, max_when);
}
-struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
int inet_csk_get_port(struct sock *sk, unsigned short snum);
diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h
index cb2615ccf761..d784f242cf7b 100644
--- a/include/net/irda/timer.h
+++ b/include/net/irda/timer.h
@@ -59,7 +59,7 @@ struct lap_cb;
* Slot timer must never exceed 85 ms, and must always be at least 25 ms,
* suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of
* devices, and other stackes uses a lot more, so it's best we do it as well
- * (Note : this is the default value and sysctl overides it - Jean II)
+ * (Note : this is the default value and sysctl overrides it - Jean II)
*/
#define SLOT_TIMEOUT (90*HZ/1000)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a244db5e5ff7..07a0b128625a 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -476,7 +476,8 @@ struct sctp_pf {
int (*send_verify) (struct sctp_sock *, union sctp_addr *);
int (*supported_addrs)(const struct sctp_sock *, __be16 *);
struct sock *(*create_accept_sk) (struct sock *sk,
- struct sctp_association *asoc);
+ struct sctp_association *asoc,
+ bool kern);
int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
diff --git a/include/net/sock.h b/include/net/sock.h
index 5e5997654db6..03252d53975d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -236,6 +236,7 @@ struct sock_common {
* @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
* @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
* @sk_lock: synchronizer
+ * @sk_kern_sock: True if sock is using kernel lock classes
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux
@@ -430,7 +431,8 @@ struct sock {
#endif
kmemcheck_bitfield_begin(flags);
- unsigned int sk_padding : 2,
+ unsigned int sk_padding : 1,
+ sk_kern_sock : 1,
sk_no_check_tx : 1,
sk_no_check_rx : 1,
sk_userlocks : 4,
@@ -1015,7 +1017,8 @@ struct proto {
int addr_len);
int (*disconnect)(struct sock *sk, int flags);
- struct sock * (*accept)(struct sock *sk, int flags, int *err);
+ struct sock * (*accept)(struct sock *sk, int flags, int *err,
+ bool kern);
int (*ioctl)(struct sock *sk, int cmd,
unsigned long arg);
@@ -1573,7 +1576,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
int sock_no_bind(struct socket *, struct sockaddr *, int);
int sock_no_connect(struct socket *, struct sockaddr *, int, int);
int sock_no_socketpair(struct socket *, struct socket *);
-int sock_no_accept(struct socket *, struct socket *, int);
+int sock_no_accept(struct socket *, struct socket *, int, bool);
int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
unsigned int sock_no_poll(struct file *, struct socket *,
struct poll_table_struct *);
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index b0e275de6dec..583875ea136a 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -196,6 +196,7 @@ struct iscsi_conn {
struct iscsi_task *task; /* xmit task in progress */
/* xmit */
+ spinlock_t taskqueuelock; /* protects the next three lists */
struct list_head mgmtqueue; /* mgmt (control) xmit queue */
struct list_head cmdqueue; /* data-path cmd queue */
struct list_head requeue; /* tasks needing another run */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 6f22b39f1b0c..080c7ce9bae8 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -472,6 +472,10 @@ static inline int scsi_device_created(struct scsi_device *sdev)
sdev->sdev_state == SDEV_CREATED_BLOCK;
}
+int scsi_internal_device_block(struct scsi_device *sdev, bool wait);
+int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state);
+
/* accessor functions for the SCSI parameters */
static inline int scsi_device_sync(struct scsi_device *sdev)
{
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index b54b98dc2d4a..1b0f447ce850 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -4,7 +4,12 @@
#include <linux/types.h>
#include <target/target_core_base.h>
-#define TRANSPORT_FLAG_PASSTHROUGH 1
+#define TRANSPORT_FLAG_PASSTHROUGH 0x1
+/*
+ * ALUA commands, state checks and setup operations are handled by the
+ * backend module.
+ */
+#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2
struct request_queue;
struct scatterlist;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 37c274e61acc..4b784b6e21c0 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp {
struct list_head tg_pt_gp_lun_list;
struct se_lun *tg_pt_gp_alua_lun;
struct se_node_acl *tg_pt_gp_alua_nacl;
- struct delayed_work tg_pt_gp_transition_work;
+ struct work_struct tg_pt_gp_transition_work;
struct completion *tg_pt_gp_transition_complete;
};
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 14e49c798135..b35533b94277 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -1,5 +1,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM raw_syscalls
+#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE syscalls
#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 407cb55df6ac..7fb97863c945 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -33,8 +33,8 @@ extern "C" {
#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
struct drm_omap_param {
- uint64_t param; /* in */
- uint64_t value; /* in (set_param), out (get_param) */
+ __u64 param; /* in */
+ __u64 value; /* in (set_param), out (get_param) */
};
#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
@@ -53,18 +53,18 @@ struct drm_omap_param {
#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
union omap_gem_size {
- uint32_t bytes; /* (for non-tiled formats) */
+ __u32 bytes; /* (for non-tiled formats) */
struct {
- uint16_t width;
- uint16_t height;
+ __u16 width;
+ __u16 height;
} tiled; /* (for tiled formats) */
};
struct drm_omap_gem_new {
union omap_gem_size size; /* in */
- uint32_t flags; /* in */
- uint32_t handle; /* out */
- uint32_t __pad;
+ __u32 flags; /* in */
+ __u32 handle; /* out */
+ __u32 __pad;
};
/* mask of operations: */
@@ -74,33 +74,33 @@ enum omap_gem_op {
};
struct drm_omap_gem_cpu_prep {
- uint32_t handle; /* buffer handle (in) */
- uint32_t op; /* mask of omap_gem_op (in) */
+ __u32 handle; /* buffer handle (in) */
+ __u32 op; /* mask of omap_gem_op (in) */
};
struct drm_omap_gem_cpu_fini {
- uint32_t handle; /* buffer handle (in) */
- uint32_t op; /* mask of omap_gem_op (in) */
+ __u32 handle; /* buffer handle (in) */
+ __u32 op; /* mask of omap_gem_op (in) */
/* TODO maybe here we pass down info about what regions are touched
* by sw so we can be clever about cache ops? For now a placeholder,
* set to zero and we just do full buffer flush..
*/
- uint32_t nregions;
- uint32_t __pad;
+ __u32 nregions;
+ __u32 __pad;
};
struct drm_omap_gem_info {
- uint32_t handle; /* buffer handle (in) */
- uint32_t pad;
- uint64_t offset; /* mmap offset (out) */
+ __u32 handle; /* buffer handle (in) */
+ __u32 pad;
+ __u64 offset; /* mmap offset (out) */
/* note: in case of tiled buffers, the user virtual size can be
* different from the physical size (ie. how many pages are needed
* to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
* This size here is the one that should be used if you want to
* mmap() the buffer:
*/
- uint32_t size; /* virtual size for mmap'ing (out) */
- uint32_t __pad;
+ __u32 size; /* virtual size for mmap'ing (out) */
+ __u32 __pad;
};
#define DRM_OMAP_GET_PARAM 0x00
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index d08c63f3dd6f..0c5d5dd61b6a 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -64,7 +64,7 @@ struct packet_diag_mclist {
__u32 pdmc_count;
__u16 pdmc_type;
__u16 pdmc_alen;
- __u8 pdmc_addr[MAX_ADDR_LEN];
+ __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */
};
struct packet_diag_ring {
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index c055947c5c98..3b059530dac9 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -18,8 +18,7 @@
* means the userland is reading).
*/
#define UFFD_API ((__u64)0xAA)
-#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT | \
- UFFD_FEATURE_EVENT_FORK | \
+#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \
UFFD_FEATURE_EVENT_REMAP | \
UFFD_FEATURE_EVENT_REMOVE | \
UFFD_FEATURE_EVENT_UNMAP | \
@@ -113,7 +112,6 @@ struct uffd_msg {
#define UFFD_EVENT_REMAP 0x14
#define UFFD_EVENT_REMOVE 0x15
#define UFFD_EVENT_UNMAP 0x16
-#define UFFD_EVENT_EXIT 0x17
/* flags for UFFD_EVENT_PAGEFAULT */
#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
@@ -163,7 +161,6 @@ struct uffdio_api {
#define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4)
#define UFFD_FEATURE_MISSING_SHMEM (1<<5)
#define UFFD_FEATURE_EVENT_UNMAP (1<<6)
-#define UFFD_FEATURE_EVENT_EXIT (1<<7)
__u64 features;
__u64 ioctls;
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index a0083be5d529..1f6d78f044b6 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -2,6 +2,7 @@
#define __LINUX_SWIOTLB_XEN_H
#include <linux/dma-direction.h>
+#include <linux/scatterlist.h>
#include <linux/swiotlb.h>
extern int xen_swiotlb_init(int verbose, bool early);
@@ -55,4 +56,14 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
extern int
xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
+
+extern int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+
+extern int
+xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ unsigned long attrs);
#endif /* __LINUX_SWIOTLB_XEN_H */
diff --git a/init/main.c b/init/main.c
index eae2f15657c6..f9c9d9948203 100644
--- a/init/main.c
+++ b/init/main.c
@@ -882,7 +882,6 @@ static void __init do_basic_setup(void)
do_ctors();
usermodehelper_enable();
do_initcalls();
- random_int_secret_init();
}
static void __init do_pre_smp_initcalls(void)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ea87fb19a94..afe5bab376c9 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,11 +13,12 @@
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
+#include <linux/rculist_nulls.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
struct bucket {
- struct hlist_head head;
+ struct hlist_nulls_head head;
raw_spinlock_t lock;
};
@@ -44,9 +45,14 @@ enum extra_elem_state {
/* each htab element is struct htab_elem + key + value */
struct htab_elem {
union {
- struct hlist_node hash_node;
- struct bpf_htab *htab;
- struct pcpu_freelist_node fnode;
+ struct hlist_nulls_node hash_node;
+ struct {
+ void *padding;
+ union {
+ struct bpf_htab *htab;
+ struct pcpu_freelist_node fnode;
+ };
+ };
};
union {
struct rcu_head rcu;
@@ -162,7 +168,8 @@ skip_percpu_elems:
offsetof(struct htab_elem, lru_node),
htab->elem_size, htab->map.max_entries);
else
- pcpu_freelist_populate(&htab->freelist, htab->elems,
+ pcpu_freelist_populate(&htab->freelist,
+ htab->elems + offsetof(struct htab_elem, fnode),
htab->elem_size, htab->map.max_entries);
return 0;
@@ -217,6 +224,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
int err, i;
u64 cost;
+ BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
+ offsetof(struct htab_elem, hash_node.pprev));
+ BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
+ offsetof(struct htab_elem, hash_node.pprev));
+
if (lru && !capable(CAP_SYS_ADMIN))
/* LRU implementation is much complicated than other
* maps. Hence, limit to CAP_SYS_ADMIN for now.
@@ -326,7 +338,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
goto free_htab;
for (i = 0; i < htab->n_buckets; i++) {
- INIT_HLIST_HEAD(&htab->buckets[i].head);
+ INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock);
}
@@ -366,20 +378,44 @@ static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
return &htab->buckets[hash & (htab->n_buckets - 1)];
}
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
{
return &__select_bucket(htab, hash)->head;
}
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
+/* this lookup function can only be called with bucket lock taken */
+static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
void *key, u32 key_size)
{
+ struct hlist_nulls_node *n;
+ struct htab_elem *l;
+
+ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
+ if (l->hash == hash && !memcmp(&l->key, key, key_size))
+ return l;
+
+ return NULL;
+}
+
+/* can be called without bucket lock. it will repeat the loop in
+ * the unlikely event when elements moved from one bucket into another
+ * while link list is being walked
+ */
+static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
+ u32 hash, void *key,
+ u32 key_size, u32 n_buckets)
+{
+ struct hlist_nulls_node *n;
struct htab_elem *l;
- hlist_for_each_entry_rcu(l, head, hash_node)
+again:
+ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l->hash == hash && !memcmp(&l->key, key, key_size))
return l;
+ if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
+ goto again;
+
return NULL;
}
@@ -387,7 +423,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct htab_elem *l;
u32 hash, key_size;
@@ -400,7 +436,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
head = select_bucket(htab, hash);
- l = lookup_elem_raw(head, hash, key, key_size);
+ l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
return l;
}
@@ -433,8 +469,9 @@ static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
{
struct bpf_htab *htab = (struct bpf_htab *)arg;
- struct htab_elem *l, *tgt_l;
- struct hlist_head *head;
+ struct htab_elem *l = NULL, *tgt_l;
+ struct hlist_nulls_head *head;
+ struct hlist_nulls_node *n;
unsigned long flags;
struct bucket *b;
@@ -444,9 +481,9 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
raw_spin_lock_irqsave(&b->lock, flags);
- hlist_for_each_entry_rcu(l, head, hash_node)
+ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l == tgt_l) {
- hlist_del_rcu(&l->hash_node);
+ hlist_nulls_del_rcu(&l->hash_node);
break;
}
@@ -459,7 +496,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct htab_elem *l, *next_l;
u32 hash, key_size;
int i;
@@ -473,7 +510,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
head = select_bucket(htab, hash);
/* lookup the key */
- l = lookup_elem_raw(head, hash, key, key_size);
+ l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
if (!l) {
i = 0;
@@ -481,7 +518,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
}
/* key was found, get next key in the same bucket */
- next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
+ next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
struct htab_elem, hash_node);
if (next_l) {
@@ -500,7 +537,7 @@ find_first_elem:
head = select_bucket(htab, i);
/* pick first element in the bucket */
- next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+ next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
struct htab_elem, hash_node);
if (next_l) {
/* if it's not empty, just return it */
@@ -582,9 +619,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
int err = 0;
if (prealloc) {
- l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
- if (!l_new)
+ struct pcpu_freelist_node *l;
+
+ l = pcpu_freelist_pop(&htab->freelist);
+ if (!l)
err = -E2BIG;
+ else
+ l_new = container_of(l, struct htab_elem, fnode);
} else {
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
atomic_dec(&htab->count);
@@ -661,7 +702,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
@@ -700,9 +741,9 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
/* add new element to the head of the list, so that
* concurrent search will find it before old elem
*/
- hlist_add_head_rcu(&l_new->hash_node, head);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
if (l_old) {
- hlist_del_rcu(&l_old->hash_node);
+ hlist_nulls_del_rcu(&l_old->hash_node);
free_htab_elem(htab, l_old);
}
ret = 0;
@@ -716,7 +757,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new, *l_old = NULL;
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
@@ -757,10 +798,10 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
/* add new element to the head of the list, so that
* concurrent search will find it before old elem
*/
- hlist_add_head_rcu(&l_new->hash_node, head);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
if (l_old) {
bpf_lru_node_set_ref(&l_new->lru_node);
- hlist_del_rcu(&l_old->hash_node);
+ hlist_nulls_del_rcu(&l_old->hash_node);
}
ret = 0;
@@ -781,7 +822,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
@@ -820,7 +861,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
ret = PTR_ERR(l_new);
goto err;
}
- hlist_add_head_rcu(&l_new->hash_node, head);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
}
ret = 0;
err:
@@ -834,7 +875,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
@@ -882,7 +923,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
} else {
pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
value, onallcpus);
- hlist_add_head_rcu(&l_new->hash_node, head);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
l_new = NULL;
}
ret = 0;
@@ -910,7 +951,7 @@ static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
static int htab_map_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct bucket *b;
struct htab_elem *l;
unsigned long flags;
@@ -930,7 +971,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
l = lookup_elem_raw(head, hash, key, key_size);
if (l) {
- hlist_del_rcu(&l->hash_node);
+ hlist_nulls_del_rcu(&l->hash_node);
free_htab_elem(htab, l);
ret = 0;
}
@@ -942,7 +983,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct bucket *b;
struct htab_elem *l;
unsigned long flags;
@@ -962,7 +1003,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
l = lookup_elem_raw(head, hash, key, key_size);
if (l) {
- hlist_del_rcu(&l->hash_node);
+ hlist_nulls_del_rcu(&l->hash_node);
ret = 0;
}
@@ -977,12 +1018,12 @@ static void delete_all_elements(struct bpf_htab *htab)
int i;
for (i = 0; i < htab->n_buckets; i++) {
- struct hlist_head *head = select_bucket(htab, i);
- struct hlist_node *n;
+ struct hlist_nulls_head *head = select_bucket(htab, i);
+ struct hlist_nulls_node *n;
struct htab_elem *l;
- hlist_for_each_entry_safe(l, n, head, hash_node) {
- hlist_del_rcu(&l->hash_node);
+ hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
+ hlist_nulls_del_rcu(&l->hash_node);
if (l->state != HTAB_EXTRA_ELEM_USED)
htab_elem_free(htab, l);
}
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 8bfe0afaee10..b37bd9ab7f57 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -500,9 +500,15 @@ unlock:
raw_spin_unlock(&trie->lock);
}
+static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+ return -ENOTSUPP;
+}
+
static const struct bpf_map_ops trie_ops = {
.map_alloc = trie_alloc,
.map_free = trie_free,
+ .map_get_next_key = trie_get_next_key,
.map_lookup_elem = trie_lookup_elem,
.map_update_elem = trie_update_elem,
.map_delete_elem = trie_delete_elem,
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 56eba9caa632..1dc22f6b49f5 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -1329,7 +1329,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
struct task_struct *task;
int count = 0;
- seq_printf(seq, "css_set %p\n", cset);
+ seq_printf(seq, "css_set %pK\n", cset);
list_for_each_entry(task, &cset->tasks, cg_list) {
if (count++ > MAX_TASKS_SHOWN_PER_CSS)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 0125589c7428..48851327a15e 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2669,7 +2669,7 @@ static bool css_visible(struct cgroup_subsys_state *css)
*
* Returns 0 on success, -errno on failure. On failure, csses which have
* been processed already aren't cleaned up. The caller is responsible for
- * cleaning up with cgroup_apply_control_disble().
+ * cleaning up with cgroup_apply_control_disable().
*/
static int cgroup_apply_control_enable(struct cgroup *cgrp)
{
diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
index e756dae49300..2237201d66d5 100644
--- a/kernel/cgroup/pids.c
+++ b/kernel/cgroup/pids.c
@@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task)
/* Only log the first time events_limit is incremented. */
if (atomic64_inc_return(&pids->events_limit) == 1) {
pr_info("cgroup: fork rejected by pids controller in ");
- pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));
+ pr_cont_cgroup_path(css->cgroup);
pr_cont("\n");
}
cgroup_file_notify(&pids->events_file);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f7c063239fa5..37b223e4fc05 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1335,26 +1335,21 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
struct cpuhp_step *sp;
int ret = 0;
- mutex_lock(&cpuhp_state_mutex);
-
if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
ret = cpuhp_reserve_state(state);
if (ret < 0)
- goto out;
+ return ret;
state = ret;
}
sp = cpuhp_get_step(state);
- if (name && sp->name) {
- ret = -EBUSY;
- goto out;
- }
+ if (name && sp->name)
+ return -EBUSY;
+
sp->startup.single = startup;
sp->teardown.single = teardown;
sp->name = name;
sp->multi_instance = multi_instance;
INIT_HLIST_HEAD(&sp->list);
-out:
- mutex_unlock(&cpuhp_state_mutex);
return ret;
}
@@ -1428,6 +1423,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
return -EINVAL;
get_online_cpus();
+ mutex_lock(&cpuhp_state_mutex);
if (!invoke || !sp->startup.multi)
goto add_node;
@@ -1447,16 +1443,14 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
if (ret) {
if (sp->teardown.multi)
cpuhp_rollback_install(cpu, state, node);
- goto err;
+ goto unlock;
}
}
add_node:
ret = 0;
- mutex_lock(&cpuhp_state_mutex);
hlist_add_head(node, &sp->list);
+unlock:
mutex_unlock(&cpuhp_state_mutex);
-
-err:
put_online_cpus();
return ret;
}
@@ -1491,6 +1485,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
return -EINVAL;
get_online_cpus();
+ mutex_lock(&cpuhp_state_mutex);
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
@@ -1524,6 +1519,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
}
}
out:
+ mutex_unlock(&cpuhp_state_mutex);
put_online_cpus();
/*
* If the requested state is CPUHP_AP_ONLINE_DYN, return the
@@ -1547,6 +1543,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
return -EINVAL;
get_online_cpus();
+ mutex_lock(&cpuhp_state_mutex);
+
if (!invoke || !cpuhp_get_teardown_cb(state))
goto remove;
/*
@@ -1563,7 +1561,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
}
remove:
- mutex_lock(&cpuhp_state_mutex);
hlist_del(node);
mutex_unlock(&cpuhp_state_mutex);
put_online_cpus();
@@ -1571,6 +1568,7 @@ remove:
return 0;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
+
/**
* __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
* @state: The state to remove
@@ -1589,6 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
get_online_cpus();
+ mutex_lock(&cpuhp_state_mutex);
if (sp->multi_instance) {
WARN(!hlist_empty(&sp->list),
"Error: Removing state %d which has instances left.\n",
@@ -1613,6 +1612,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
}
remove:
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
+ mutex_unlock(&cpuhp_state_mutex);
put_online_cpus();
}
EXPORT_SYMBOL(__cpuhp_remove_state);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6f41548f2e32..ff01cba86f43 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -998,7 +998,7 @@ list_update_cgroup_event(struct perf_event *event,
*/
#define PERF_CPU_HRTIMER (1000 / HZ)
/*
- * function must be called with interrupts disbled
+ * function must be called with interrupts disabled
*/
static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
{
@@ -4256,7 +4256,7 @@ int perf_event_release_kernel(struct perf_event *event)
raw_spin_lock_irq(&ctx->lock);
/*
- * Mark this even as STATE_DEAD, there is no external reference to it
+ * Mark this event as STATE_DEAD, there is no external reference to it
* anymore.
*
* Anybody acquiring event->child_mutex after the below loop _must_
@@ -10417,21 +10417,22 @@ void perf_event_free_task(struct task_struct *task)
continue;
mutex_lock(&ctx->mutex);
-again:
- list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
- group_entry)
- perf_free_event(event, ctx);
+ raw_spin_lock_irq(&ctx->lock);
+ /*
+ * Destroy the task <-> ctx relation and mark the context dead.
+ *
+ * This is important because even though the task hasn't been
+ * exposed yet the context has been (through child_list).
+ */
+ RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
+ WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
+ put_task_struct(task); /* cannot be last */
+ raw_spin_unlock_irq(&ctx->lock);
- list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
- group_entry)
+ list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
perf_free_event(event, ctx);
- if (!list_empty(&ctx->pinned_groups) ||
- !list_empty(&ctx->flexible_groups))
- goto again;
-
mutex_unlock(&ctx->mutex);
-
put_ctx(ctx);
}
}
@@ -10469,7 +10470,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
}
/*
- * inherit a event from parent task to child task:
+ * Inherit a event from parent task to child task.
+ *
+ * Returns:
+ * - valid pointer on success
+ * - NULL for orphaned events
+ * - IS_ERR() on error
*/
static struct perf_event *
inherit_event(struct perf_event *parent_event,
@@ -10563,6 +10569,16 @@ inherit_event(struct perf_event *parent_event,
return child_event;
}
+/*
+ * Inherits an event group.
+ *
+ * This will quietly suppress orphaned events; !inherit_event() is not an error.
+ * This matches with perf_event_release_kernel() removing all child events.
+ *
+ * Returns:
+ * - 0 on success
+ * - <0 on error
+ */
static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
@@ -10577,6 +10593,11 @@ static int inherit_group(struct perf_event *parent_event,
child, NULL, child_ctx);
if (IS_ERR(leader))
return PTR_ERR(leader);
+ /*
+ * @leader can be NULL here because of is_orphaned_event(). In this
+ * case inherit_event() will create individual events, similar to what
+ * perf_group_detach() would do anyway.
+ */
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx);
@@ -10586,6 +10607,17 @@ static int inherit_group(struct perf_event *parent_event,
return 0;
}
+/*
+ * Creates the child task context and tries to inherit the event-group.
+ *
+ * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
+ * inherited_all set when we 'fail' to inherit an orphaned event; this is
+ * consistent with perf_event_release_kernel() removing all child events.
+ *
+ * Returns:
+ * - 0 on success
+ * - <0 on error
+ */
static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *parent_ctx,
@@ -10608,7 +10640,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
* First allocate and initialize a context for the
* child.
*/
-
child_ctx = alloc_perf_context(parent_ctx->pmu, child);
if (!child_ctx)
return -ENOMEM;
@@ -10670,7 +10701,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
- break;
+ goto out_unlock;
}
/*
@@ -10686,7 +10717,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
- break;
+ goto out_unlock;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@@ -10714,6 +10745,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+out_unlock:
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
diff --git a/kernel/exit.c b/kernel/exit.c
index e126ebf2400c..516acdb0e0ec 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -554,7 +554,6 @@ static void exit_mm(void)
enter_lazy_tlb(mm, current);
task_unlock(current);
mm_update_next_owner(mm);
- userfaultfd_exit(mm);
mmput(mm);
if (test_thread_flag(TIF_MEMDIE))
exit_oom_victim();
diff --git a/kernel/futex.c b/kernel/futex.c
index 229a744b1781..45858ec73941 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2815,7 +2815,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
- struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
@@ -2899,6 +2898,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
+ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+ rt_mutex_unlock(&q.pi_state->pi_mutex);
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
@@ -2907,6 +2908,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
spin_unlock(q.lock_ptr);
}
} else {
+ struct rt_mutex *pi_mutex;
+
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
@@ -2930,18 +2933,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (res)
ret = (res < 0) ? res : 0;
+ /*
+ * If fixup_pi_state_owner() faulted and was unable to handle
+ * the fault, unlock the rt_mutex and return the fault to
+ * userspace.
+ */
+ if (ret && rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
- /*
- * If fixup_pi_state_owner() faulted and was unable to handle the
- * fault, unlock the rt_mutex and return the fault to userspace.
- */
- if (ret == -EFAULT) {
- if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
- rt_mutex_unlock(pi_mutex);
- } else if (ret == -EINTR) {
+ if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index b56a558e406d..b118735fea9d 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -614,13 +614,13 @@ static int kexec_calculate_store_digests(struct kimage *image)
ret = crypto_shash_final(desc, digest);
if (ret)
goto out_free_digest;
- ret = kexec_purgatory_get_set_symbol(image, "sha_regions",
- sha_regions, sha_region_sz, 0);
+ ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
+ sha_regions, sha_region_sz, 0);
if (ret)
goto out_free_digest;
- ret = kexec_purgatory_get_set_symbol(image, "sha256_digest",
- digest, SHA256_DIGEST_SIZE, 0);
+ ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
+ digest, SHA256_DIGEST_SIZE, 0);
if (ret)
goto out_free_digest;
}
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h
index 4cef7e4706b0..799a8a452187 100644
--- a/kernel/kexec_internal.h
+++ b/kernel/kexec_internal.h
@@ -15,11 +15,7 @@ int kimage_is_destination_range(struct kimage *image,
extern struct mutex kexec_mutex;
#ifdef CONFIG_KEXEC_FILE
-struct kexec_sha_region {
- unsigned long start;
- unsigned long len;
-};
-
+#include <linux/purgatory.h>
void kimage_file_post_load_cleanup(struct kimage *image);
#else /* CONFIG_KEXEC_FILE */
static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 508cbf31d43e..34e97d970761 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3262,10 +3262,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (depth) {
hlock = curr->held_locks + depth - 1;
if (hlock->class_idx == class_idx && nest_lock) {
- if (hlock->references)
+ if (hlock->references) {
+ /*
+ * Check: unsigned int references:12, overflow.
+ */
+ if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
+ return 0;
+
hlock->references++;
- else
+ } else {
hlock->references = 2;
+ }
return 1;
}
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 7bc24d477805..c65f7989f850 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -213,10 +213,9 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
*/
if (sem->count == 0)
break;
- if (signal_pending_state(state, current)) {
- ret = -EINTR;
- goto out;
- }
+ if (signal_pending_state(state, current))
+ goto out_nolock;
+
set_current_state(state);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule();
@@ -224,12 +223,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
}
/* got the lock */
sem->count = -1;
-out:
list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
+
+out_nolock:
+ list_del(&waiter.list);
+ if (!list_empty(&sem->wait_list))
+ __rwsem_do_wake(sem, 1);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+ return -EINTR;
}
void __sched __down_write(struct rw_semaphore *sem)
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index da6c9a34f62f..6b7abb334ca6 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -50,7 +50,7 @@ static void test_mutex_work(struct work_struct *work)
if (mtx->flags & TEST_MTX_TRY) {
while (!ww_mutex_trylock(&mtx->mutex))
- cpu_relax();
+ cond_resched();
} else {
ww_mutex_lock(&mtx->mutex, NULL);
}
@@ -88,7 +88,7 @@ static int __test_mutex(unsigned int flags)
ret = -EINVAL;
break;
}
- cpu_relax();
+ cond_resched();
} while (time_before(jiffies, timeout));
} else {
ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
@@ -627,7 +627,7 @@ static int __init test_ww_mutex_init(void)
if (ret)
return ret;
- ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
+ ret = stress(4095, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
if (ret)
return ret;
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 06123234f118..07e85e5229da 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -247,11 +247,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(resource_size(res), SECTION_SIZE);
- lock_device_hotplug();
mem_hotplug_begin();
arch_remove_memory(align_start, align_size);
mem_hotplug_done();
- unlock_device_hotplug();
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
pgmap_radix_release(res);
@@ -364,11 +362,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
if (error)
goto err_pfn_remap;
- lock_device_hotplug();
mem_hotplug_begin();
error = arch_add_memory(nid, align_start, align_size, true);
mem_hotplug_done();
- unlock_device_hotplug();
if (error)
goto err_add_memory;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 956383844116..3b31fc05a0f1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3287,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
struct task_struct *p;
/*
- * Optimization: we know that if all tasks are in
- * the fair class we can call that function directly:
+ * Optimization: we know that if all tasks are in the fair class we can
+ * call that function directly, but only if the @prev task wasn't of a
+ * higher scheduling class, because otherwise those loose the
+ * opportunity to pull in more work from other CPUs.
*/
- if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
+ if (likely((prev->sched_class == &idle_sched_class ||
+ prev->sched_class == &fair_sched_class) &&
+ rq->nr_running == rq->cfs.h_nr_running)) {
+
p = fair_sched_class.pick_next_task(rq, prev, rf);
if (unlikely(p == RETRY_TASK))
goto again;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 8f8de3d4d6b7..cd7cd489f739 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -36,6 +36,7 @@ struct sugov_policy {
u64 last_freq_update_time;
s64 freq_update_delay_ns;
unsigned int next_freq;
+ unsigned int cached_raw_freq;
/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work;
@@ -52,7 +53,6 @@ struct sugov_cpu {
struct update_util_data update_util;
struct sugov_policy *sg_policy;
- unsigned int cached_raw_freq;
unsigned long iowait_boost;
unsigned long iowait_boost_max;
u64 last_update;
@@ -116,7 +116,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
/**
* get_next_freq - Compute a new frequency for a given cpufreq policy.
- * @sg_cpu: schedutil cpu object to compute the new frequency for.
+ * @sg_policy: schedutil policy object to compute the new frequency for.
* @util: Current CPU utilization.
* @max: CPU capacity.
*
@@ -136,19 +136,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
* next_freq (as calculated above) is returned, subject to policy min/max and
* cpufreq driver limitations.
*/
-static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util,
- unsigned long max)
+static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+ unsigned long util, unsigned long max)
{
- struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
unsigned int freq = arch_scale_freq_invariant() ?
policy->cpuinfo.max_freq : policy->cur;
freq = (freq + (freq >> 2)) * util / max;
- if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
+ if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
return sg_policy->next_freq;
- sg_cpu->cached_raw_freq = freq;
+ sg_policy->cached_raw_freq = freq;
return cpufreq_driver_resolve_freq(policy, freq);
}
@@ -213,7 +212,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
} else {
sugov_get_util(&util, &max);
sugov_iowait_boost(sg_cpu, &util, &max);
- next_f = get_next_freq(sg_cpu, util, max);
+ next_f = get_next_freq(sg_policy, util, max);
}
sugov_update_commit(sg_policy, time, next_f);
}
@@ -267,7 +266,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
sugov_iowait_boost(j_sg_cpu, &util, &max);
}
- return get_next_freq(sg_cpu, util, max);
+ return get_next_freq(sg_policy, util, max);
}
static void sugov_update_shared(struct update_util_data *hook, u64 time,
@@ -580,6 +579,7 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->next_freq = UINT_MAX;
sg_policy->work_in_progress = false;
sg_policy->need_freq_update = false;
+ sg_policy->cached_raw_freq = 0;
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
@@ -590,7 +590,6 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_cpu->max = 0;
sg_cpu->flags = SCHED_CPUFREQ_RT;
sg_cpu->last_update = 0;
- sg_cpu->cached_raw_freq = 0;
sg_cpu->iowait_boost = 0;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 99b2c33a9fbc..a2ce59015642 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -445,13 +445,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
*
* This function returns true if:
*
- * runtime / (deadline - t) > dl_runtime / dl_period ,
+ * runtime / (deadline - t) > dl_runtime / dl_deadline ,
*
* IOW we can't recycle current parameters.
*
- * Notice that the bandwidth check is done against the period. For
+ * Notice that the bandwidth check is done against the deadline. For
* task with deadline equal to period this is the same of using
- * dl_deadline instead of dl_period in the equation above.
+ * dl_period instead of dl_deadline in the equation above.
*/
static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se, u64 t)
@@ -476,7 +476,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
* of anything below microseconds resolution is actually fiction
* (but still we want to give the user that illusion >;).
*/
- left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
+ left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
right = ((dl_se->deadline - t) >> DL_SCALE) *
(pi_se->dl_runtime >> DL_SCALE);
@@ -505,10 +505,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
}
}
+static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
+{
+ return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
+}
+
/*
* If the entity depleted all its runtime, and if we want it to sleep
* while waiting for some new execution time to become available, we
- * set the bandwidth enforcement timer to the replenishment instant
+ * set the bandwidth replenishment timer to the replenishment instant
* and try to activate it.
*
* Notice that it is important for the caller to know if the timer
@@ -530,7 +535,7 @@ static int start_dl_timer(struct task_struct *p)
* that it is actually coming from rq->clock and not from
* hrtimer's time base reading.
*/
- act = ns_to_ktime(dl_se->deadline);
+ act = ns_to_ktime(dl_next_period(dl_se));
now = hrtimer_cb_get_time(timer);
delta = ktime_to_ns(now) - rq_clock(rq);
act = ktime_add_ns(act, delta);
@@ -638,6 +643,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
lockdep_unpin_lock(&rq->lock, rf.cookie);
rq = dl_task_offline_migration(rq, p);
rf.cookie = lockdep_pin_lock(&rq->lock);
+ update_rq_clock(rq);
/*
* Now that the task has been migrated to the new RQ and we
@@ -689,6 +695,37 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
timer->function = dl_task_timer;
}
+/*
+ * During the activation, CBS checks if it can reuse the current task's
+ * runtime and period. If the deadline of the task is in the past, CBS
+ * cannot use the runtime, and so it replenishes the task. This rule
+ * works fine for implicit deadline tasks (deadline == period), and the
+ * CBS was designed for implicit deadline tasks. However, a task with
+ * constrained deadline (deadine < period) might be awakened after the
+ * deadline, but before the next period. In this case, replenishing the
+ * task would allow it to run for runtime / deadline. As in this case
+ * deadline < period, CBS enables a task to run for more than the
+ * runtime / period. In a very loaded system, this can cause a domino
+ * effect, making other tasks miss their deadlines.
+ *
+ * To avoid this problem, in the activation of a constrained deadline
+ * task after the deadline but before the next period, throttle the
+ * task and set the replenishing timer to the begin of the next period,
+ * unless it is boosted.
+ */
+static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
+{
+ struct task_struct *p = dl_task_of(dl_se);
+ struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
+
+ if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
+ dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
+ if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
+ return;
+ dl_se->dl_throttled = 1;
+ }
+}
+
static
int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
{
@@ -922,6 +959,11 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
__dequeue_dl_entity(dl_se);
}
+static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_deadline < dl_se->dl_period;
+}
+
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -948,6 +990,15 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
}
/*
+ * Check if a constrained deadline task was activated
+ * after the deadline but before the next period.
+ * If that is the case, the task will be throttled and
+ * the replenishment timer will be set to the next period.
+ */
+ if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
+ dl_check_constrained_dl(&p->dl);
+
+ /*
* If p is throttled, we do nothing. In fact, if it exhausted
* its budget it needs a replenishment and, since it now is on
* its rq, the bandwidth timer callback (which clearly has not
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e88b35ac157..dea138964b91 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5799,7 +5799,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
* Due to large variance we need a large fuzz factor; hackbench in
* particularly is sensitive here.
*/
- if ((avg_idle / 512) < avg_cost)
+ if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
return -1;
time = local_clock();
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 69631fa46c2f..1b3c8189b286 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
*/
SCHED_FEAT(TTWU_QUEUE, true)
+/*
+ * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
+ */
+SCHED_FEAT(SIS_AVG_CPU, false)
+
#ifdef HAVE_RT_PUSH_IPI
/*
* In order to avoid a thundering herd attack of CPUs that are
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index 7296b7308eca..f15fb2bdbc0d 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -169,7 +169,7 @@ static inline int calc_load_write_idx(void)
* If the folding window started, make sure we start writing in the
* next idle-delta.
*/
- if (!time_before(jiffies, calc_load_update))
+ if (!time_before(jiffies, READ_ONCE(calc_load_update)))
idx++;
return idx & 1;
@@ -202,8 +202,9 @@ void calc_load_exit_idle(void)
struct rq *this_rq = this_rq();
/*
- * If we're still before the sample window, we're done.
+ * If we're still before the pending sample window, we're done.
*/
+ this_rq->calc_load_update = READ_ONCE(calc_load_update);
if (time_before(jiffies, this_rq->calc_load_update))
return;
@@ -212,7 +213,6 @@ void calc_load_exit_idle(void)
* accounted through the nohz accounting, so skip the entire deal and
* sync up for the next window.
*/
- this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update + 10))
this_rq->calc_load_update += LOAD_FREQ;
}
@@ -308,13 +308,15 @@ calc_load_n(unsigned long load, unsigned long exp,
*/
static void calc_global_nohz(void)
{
+ unsigned long sample_window;
long delta, active, n;
- if (!time_before(jiffies, calc_load_update + 10)) {
+ sample_window = READ_ONCE(calc_load_update);
+ if (!time_before(jiffies, sample_window + 10)) {
/*
* Catch-up, fold however many we are behind still
*/
- delta = jiffies - calc_load_update - 10;
+ delta = jiffies - sample_window - 10;
n = 1 + (delta / LOAD_FREQ);
active = atomic_long_read(&calc_load_tasks);
@@ -324,7 +326,7 @@ static void calc_global_nohz(void)
avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
- calc_load_update += n * LOAD_FREQ;
+ WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ);
}
/*
@@ -352,9 +354,11 @@ static inline void calc_global_nohz(void) { }
*/
void calc_global_load(unsigned long ticks)
{
+ unsigned long sample_window;
long active, delta;
- if (time_before(jiffies, calc_load_update + 10))
+ sample_window = READ_ONCE(calc_load_update);
+ if (time_before(jiffies, sample_window + 10))
return;
/*
@@ -371,7 +375,7 @@ void calc_global_load(unsigned long ticks)
avenrun[1] = calc_load(avenrun[1], EXP_5, active);
avenrun[2] = calc_load(avenrun[2], EXP_15, active);
- calc_load_update += LOAD_FREQ;
+ WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
/*
* In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 4d2ea6f25568..b8c84c6dee64 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -242,6 +242,45 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
}
EXPORT_SYMBOL(prepare_to_wait_event);
+/*
+ * Note! These two wait functions are entered with the
+ * wait-queue lock held (and interrupts off in the _irq
+ * case), so there is no race with testing the wakeup
+ * condition in the caller before they add the wait
+ * entry to the wake queue.
+ */
+int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait)
+{
+ if (likely(list_empty(&wait->task_list)))
+ __add_wait_queue_tail(wq, wait);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ spin_unlock(&wq->lock);
+ schedule();
+ spin_lock(&wq->lock);
+ return 0;
+}
+EXPORT_SYMBOL(do_wait_intr);
+
+int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait)
+{
+ if (likely(list_empty(&wait->task_list)))
+ __add_wait_queue_tail(wq, wait);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ spin_unlock_irq(&wq->lock);
+ schedule();
+ spin_lock_irq(&wq->lock);
+ return 0;
+}
+EXPORT_SYMBOL(do_wait_intr_irq);
+
/**
* finish_wait - clean up after waiting in a queue
* @q: waitqueue waited on
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 7906b3f0c41a..497719127bf9 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -125,7 +125,7 @@ int register_refined_jiffies(long cycles_per_second)
shift_hz += cycles_per_tick/2;
do_div(shift_hz, cycles_per_tick);
/* Calculate nsec_per_tick using shift_hz */
- nsec_per_tick = (u64)TICK_NSEC << 8;
+ nsec_per_tick = (u64)NSEC_PER_SEC << 8;
nsec_per_tick += (u32)shift_hz/2;
do_div(nsec_per_tick, (u32)shift_hz);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d5038005eb5d..d4a06e714645 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -429,7 +429,7 @@ config BLK_DEV_IO_TRACE
If unsure, say N.
-config KPROBE_EVENT
+config KPROBE_EVENTS
depends on KPROBES
depends on HAVE_REGS_AND_STACK_ACCESS_API
bool "Enable kprobes-based dynamic events"
@@ -447,7 +447,7 @@ config KPROBE_EVENT
This option is also required by perf-probe subcommand of perf tools.
If you want to use perf tools, this option is strongly recommended.
-config UPROBE_EVENT
+config UPROBE_EVENTS
bool "Enable uprobes-based dynamic events"
depends on ARCH_SUPPORTS_UPROBES
depends on MMU
@@ -466,7 +466,7 @@ config UPROBE_EVENT
config BPF_EVENTS
depends on BPF_SYSCALL
- depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS
+ depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
bool
default y
help
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index e57980845549..90f2701d92a7 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -57,7 +57,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
-obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
+obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o
obj-$(CONFIG_TRACEPOINTS) += power-traces.o
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
@@ -66,7 +66,7 @@ ifeq ($(CONFIG_TRACING),y)
obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
endif
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
-obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
+obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d1597c9ee30..b9691ee8f6c1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4416,16 +4416,24 @@ static int __init set_graph_notrace_function(char *str)
}
__setup("ftrace_graph_notrace=", set_graph_notrace_function);
+static int __init set_graph_max_depth_function(char *str)
+{
+ if (!str)
+ return 0;
+ fgraph_max_depth = simple_strtoul(str, NULL, 0);
+ return 1;
+}
+__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
+
static void __init set_ftrace_early_graph(char *buf, int enable)
{
int ret;
char *func;
struct ftrace_hash *hash;
- if (enable)
- hash = ftrace_graph_hash;
- else
- hash = ftrace_graph_notrace_hash;
+ hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+ if (WARN_ON(!hash))
+ return;
while (buf) {
func = strsep(&buf, ",");
@@ -4435,6 +4443,11 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
printk(KERN_DEBUG "ftrace: function %s not "
"traceable\n", func);
}
+
+ if (enable)
+ ftrace_graph_hash = hash;
+ else
+ ftrace_graph_notrace_hash = hash;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -5488,7 +5501,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
* Normally the mcount trampoline will call the ops->func, but there
* are times that it should not. For example, if the ops does not
* have its own recursion protection, then it should call the
- * ftrace_ops_recurs_func() instead.
+ * ftrace_ops_assist_func() instead.
*
* Returns the function that the trampoline should call for @ops.
*/
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 707445ceb7ef..f35109514a01 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4341,22 +4341,22 @@ static const char readme_msg[] =
"\t\t\t traces\n"
#endif
#endif /* CONFIG_STACK_TRACER */
-#ifdef CONFIG_KPROBE_EVENT
+#ifdef CONFIG_KPROBE_EVENTS
" kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
"\t\t\t Write into this file to define/undefine new trace events.\n"
#endif
-#ifdef CONFIG_UPROBE_EVENT
+#ifdef CONFIG_UPROBE_EVENTS
" uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
"\t\t\t Write into this file to define/undefine new trace events.\n"
#endif
-#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
+#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
"\t accepts: event-definitions (one definition per line)\n"
"\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
"\t -:[<group>/]<event>\n"
-#ifdef CONFIG_KPROBE_EVENT
+#ifdef CONFIG_KPROBE_EVENTS
"\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
#endif
-#ifdef CONFIG_UPROBE_EVENT
+#ifdef CONFIG_UPROBE_EVENTS
"\t place: <path>:<offset>\n"
#endif
"\t args: <name>=fetcharg[:type]\n"
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 0c0ae54d44c6..903273c93e61 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -248,7 +248,7 @@ ASSIGN_FETCH_FUNC(file_offset, ftype), \
#define FETCH_TYPE_STRING 0
#define FETCH_TYPE_STRSIZE 1
-#ifdef CONFIG_KPROBE_EVENT
+#ifdef CONFIG_KPROBE_EVENTS
struct symbol_cache;
unsigned long update_symbol_cache(struct symbol_cache *sc);
void free_symbol_cache(struct symbol_cache *sc);
@@ -278,7 +278,7 @@ alloc_symbol_cache(const char *sym, long offset)
{
return NULL;
}
-#endif /* CONFIG_KPROBE_EVENT */
+#endif /* CONFIG_KPROBE_EVENTS */
struct probe_arg {
struct fetch_param fetch;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 1d68b5b7ad41..5fb1f2c87e6b 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -65,7 +65,7 @@ void stack_trace_print(void)
}
/*
- * When arch-specific code overides this function, the following
+ * When arch-specific code overrides this function, the following
* data should be filled up, assuming stack_trace_max_lock is held to
* prevent concurrent updates.
* stack_trace_index[]
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 62630a40ab3a..b4eeee03934f 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -144,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
new->ns = ns;
new->uid = uid;
- atomic_set(&new->count, 0);
+ new->count = 0;
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
@@ -155,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
ucounts = new;
}
}
- if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
+ if (ucounts->count == INT_MAX)
ucounts = NULL;
+ else
+ ucounts->count += 1;
spin_unlock_irq(&ucounts_lock);
return ucounts;
}
@@ -165,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts)
{
unsigned long flags;
- if (atomic_dec_and_test(&ucounts->count)) {
- spin_lock_irqsave(&ucounts_lock, flags);
+ spin_lock_irqsave(&ucounts_lock, flags);
+ ucounts->count -= 1;
+ if (!ucounts->count)
hlist_del_init(&ucounts->node);
- spin_unlock_irqrestore(&ucounts_lock, flags);
+ else
+ ucounts = NULL;
+ spin_unlock_irqrestore(&ucounts_lock, flags);
- kfree(ucounts);
- }
+ kfree(ucounts);
}
static inline bool atomic_inc_below(atomic_t *v, int u)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 072cbc9b175d..c0168b7da1ea 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1507,6 +1507,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
+ WARN_ON_ONCE(!wq);
WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
timer->data != (unsigned long)dwork);
WARN_ON_ONCE(timer_pending(timer));
diff --git a/lib/ioremap.c b/lib/ioremap.c
index a3e14ce92a56..4bb30206b942 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -14,6 +14,7 @@
#include <asm/pgtable.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+static int __read_mostly ioremap_p4d_capable;
static int __read_mostly ioremap_pud_capable;
static int __read_mostly ioremap_pmd_capable;
static int __read_mostly ioremap_huge_disabled;
@@ -35,6 +36,11 @@ void __init ioremap_huge_init(void)
}
}
+static inline int ioremap_p4d_enabled(void)
+{
+ return ioremap_p4d_capable;
+}
+
static inline int ioremap_pud_enabled(void)
{
return ioremap_pud_capable;
@@ -46,6 +52,7 @@ static inline int ioremap_pmd_enabled(void)
}
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int ioremap_p4d_enabled(void) { return 0; }
static inline int ioremap_pud_enabled(void) { return 0; }
static inline int ioremap_pmd_enabled(void) { return 0; }
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
@@ -94,14 +101,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
return 0;
}
-static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
+static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
phys_addr -= addr;
- pud = pud_alloc(&init_mm, pgd, addr);
+ pud = pud_alloc(&init_mm, p4d, addr);
if (!pud)
return -ENOMEM;
do {
@@ -120,6 +127,32 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
return 0;
}
+static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
+{
+ p4d_t *p4d;
+ unsigned long next;
+
+ phys_addr -= addr;
+ p4d = p4d_alloc(&init_mm, pgd, addr);
+ if (!p4d)
+ return -ENOMEM;
+ do {
+ next = p4d_addr_end(addr, end);
+
+ if (ioremap_p4d_enabled() &&
+ ((next - addr) == P4D_SIZE) &&
+ IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
+ if (p4d_set_huge(p4d, phys_addr + addr, prot))
+ continue;
+ }
+
+ if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
+ return -ENOMEM;
+ } while (p4d++, addr = next, addr != end);
+ return 0;
+}
+
int ioremap_page_range(unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
@@ -135,7 +168,7 @@ int ioremap_page_range(unsigned long addr,
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
+ err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
if (err)
break;
} while (pgd++, addr = next, addr != end);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 5ed506d648c4..691a9ad48497 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2129,8 +2129,8 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
if (!bitmap)
return 0;
- bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
- kfree(bitmap);
+ if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
+ kfree(bitmap);
}
return 1;
diff --git a/lib/refcount.c b/lib/refcount.c
index 1d33366189d1..aa09ad3c30b0 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -58,7 +58,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
val = old;
}
- WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+ WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
return true;
}
@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(refcount_add_not_zero);
void refcount_add(unsigned int i, refcount_t *r)
{
- WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
}
EXPORT_SYMBOL_GPL(refcount_add);
@@ -97,7 +97,7 @@ bool refcount_inc_not_zero(refcount_t *r)
val = old;
}
- WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+ WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
return true;
}
@@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
*/
void refcount_inc(refcount_t *r)
{
- WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
}
EXPORT_SYMBOL_GPL(refcount_inc);
@@ -125,7 +125,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
new = val - i;
if (new > val) {
- WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
return false;
}
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(refcount_dec_and_test);
void refcount_dec(refcount_t *r)
{
- WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+ WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
EXPORT_SYMBOL_GPL(refcount_dec);
@@ -204,7 +204,7 @@ bool refcount_dec_not_one(refcount_t *r)
new = val - 1;
if (new > val) {
- WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
return true;
}
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 6d861d090e9f..c6f2a37028c2 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -683,33 +683,26 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
{
struct radix_tree_iter iter;
- struct rb_node *rbn;
void **slot;
WARN_ON(test_bit(WB_registered, &bdi->wb.state));
spin_lock_irq(&cgwb_lock);
-
radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
cgwb_kill(*slot);
-
- while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
- struct bdi_writeback_congested *congested =
- rb_entry(rbn, struct bdi_writeback_congested, rb_node);
-
- rb_erase(rbn, &bdi->cgwb_congested_tree);
- congested->bdi = NULL; /* mark @congested unlinked */
- }
-
spin_unlock_irq(&cgwb_lock);
/*
- * All cgwb's and their congested states must be shutdown and
- * released before returning. Drain the usage counter to wait for
- * all cgwb's and cgwb_congested's ever created on @bdi.
+ * All cgwb's must be shutdown and released before returning. Drain
+ * the usage counter to wait for all cgwb's ever created on @bdi.
*/
atomic_dec(&bdi->usage_cnt);
wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
+ /*
+ * Grab back our reference so that we hold it when @bdi gets
+ * re-registered.
+ */
+ atomic_inc(&bdi->usage_cnt);
}
/**
@@ -749,6 +742,21 @@ void wb_blkcg_offline(struct blkcg *blkcg)
spin_unlock_irq(&cgwb_lock);
}
+static void cgwb_bdi_exit(struct backing_dev_info *bdi)
+{
+ struct rb_node *rbn;
+
+ spin_lock_irq(&cgwb_lock);
+ while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
+ struct bdi_writeback_congested *congested =
+ rb_entry(rbn, struct bdi_writeback_congested, rb_node);
+
+ rb_erase(rbn, &bdi->cgwb_congested_tree);
+ congested->bdi = NULL; /* mark @congested unlinked */
+ }
+ spin_unlock_irq(&cgwb_lock);
+}
+
#else /* CONFIG_CGROUP_WRITEBACK */
static int cgwb_bdi_init(struct backing_dev_info *bdi)
@@ -769,7 +777,9 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
return 0;
}
-static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
+static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
+
+static void cgwb_bdi_exit(struct backing_dev_info *bdi)
{
wb_congested_put(bdi->wb_congested);
}
@@ -857,6 +867,8 @@ int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
MINOR(owner->devt));
if (rc)
return rc;
+ /* Leaking owner reference... */
+ WARN_ON(bdi->owner);
bdi->owner = owner;
get_device(owner);
return 0;
@@ -898,6 +910,7 @@ static void bdi_exit(struct backing_dev_info *bdi)
{
WARN_ON_ONCE(bdi->dev);
wb_exit(&bdi->wb);
+ cgwb_bdi_exit(bdi);
}
static void release_bdi(struct kref *ref)
diff --git a/mm/gup.c b/mm/gup.c
index 9c047e951aa3..04aa405350dc 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -226,6 +226,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned int *page_mask)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
spinlock_t *ptl;
@@ -243,8 +244,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return no_page_table(vma, flags);
-
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none(*p4d))
+ return no_page_table(vma, flags);
+ BUILD_BUG_ON(p4d_huge(*p4d));
+ if (unlikely(p4d_bad(*p4d)))
+ return no_page_table(vma, flags);
+ pud = pud_offset(p4d, address);
if (pud_none(*pud))
return no_page_table(vma, flags);
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
@@ -325,6 +331,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
struct page **page)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -338,7 +345,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
else
pgd = pgd_offset_gate(mm, address);
BUG_ON(pgd_none(*pgd));
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ BUG_ON(p4d_none(*p4d));
+ pud = pud_offset(p4d, address);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
@@ -1400,13 +1409,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
return 1;
}
-static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pud_t *pudp;
- pudp = pud_offset(&pgd, addr);
+ pudp = pud_offset(&p4d, addr);
do {
pud_t pud = READ_ONCE(*pudp);
@@ -1428,6 +1437,31 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1;
}
+static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ p4d_t *p4dp;
+
+ p4dp = p4d_offset(&pgd, addr);
+ do {
+ p4d_t p4d = READ_ONCE(*p4dp);
+
+ next = p4d_addr_end(addr, end);
+ if (p4d_none(p4d))
+ return 0;
+ BUILD_BUG_ON(p4d_huge(p4d));
+ if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
+ if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
+ P4D_SHIFT, next, write, pages, nr))
+ return 0;
+ } else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
+ return 0;
+ } while (p4dp++, addr = next, addr != end);
+
+ return 1;
+}
+
/*
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
* the regular GUP. It will only return non-negative values.
@@ -1478,7 +1512,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
PGDIR_SHIFT, next, write, pages, &nr))
break;
- } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
break;
} while (pgdp++, addr = next, addr != end);
local_irq_restore(flags);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d36b2af4d1bf..1ebc93e179f3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1828,7 +1828,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
- count_vm_event(THP_SPLIT_PMD);
+ count_vm_event(THP_SPLIT_PUD);
pudp_huge_clear_flush_notify(vma, haddr, pud);
}
@@ -2048,6 +2048,7 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
bool freeze, struct page *page)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -2055,7 +2056,11 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
if (!pgd_present(*pgd))
return;
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (!p4d_present(*p4d))
+ return;
+
+ pud = pud_offset(p4d, address);
if (!pud_present(*pud))
return;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a7aa811b7d14..3d0aab9ee80d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4555,7 +4555,8 @@ out:
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
pgd_t *pgd = pgd_offset(mm, *addr);
- pud_t *pud = pud_offset(pgd, *addr);
+ p4d_t *p4d = p4d_offset(pgd, *addr);
+ pud_t *pud = pud_offset(p4d, *addr);
BUG_ON(page_count(virt_to_page(ptep)) == 0);
if (page_count(virt_to_page(ptep)) == 1)
@@ -4586,11 +4587,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ pud = pud_alloc(mm, p4d, addr);
if (pud) {
if (sz == PUD_SIZE) {
pte = (pte_t *)pud;
@@ -4610,18 +4613,22 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
- pmd_t *pmd = NULL;
+ pmd_t *pmd;
pgd = pgd_offset(mm, addr);
- if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (pud_present(*pud)) {
- if (pud_huge(*pud))
- return (pte_t *)pud;
- pmd = pmd_offset(pud, addr);
- }
- }
+ if (!pgd_present(*pgd))
+ return NULL;
+ p4d = p4d_offset(pgd, addr);
+ if (!p4d_present(*p4d))
+ return NULL;
+ pud = pud_offset(p4d, addr);
+ if (!pud_present(*pud))
+ return NULL;
+ if (pud_huge(*pud))
+ return (pte_t *)pud;
+ pmd = pmd_offset(pud, addr);
return (pte_t *) pmd;
}
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
index 31238dad85fb..b96a5f773d88 100644
--- a/mm/kasan/kasan_init.c
+++ b/mm/kasan/kasan_init.c
@@ -30,6 +30,9 @@
*/
unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
+#if CONFIG_PGTABLE_LEVELS > 4
+p4d_t kasan_zero_p4d[PTRS_PER_P4D] __page_aligned_bss;
+#endif
#if CONFIG_PGTABLE_LEVELS > 3
pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
#endif
@@ -82,10 +85,10 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
} while (pmd++, addr = next, addr != end);
}
-static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
+static void __init zero_pud_populate(p4d_t *p4d, unsigned long addr,
unsigned long end)
{
- pud_t *pud = pud_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
unsigned long next;
do {
@@ -107,6 +110,23 @@ static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
} while (pud++, addr = next, addr != end);
}
+static void __init zero_p4d_populate(pgd_t *pgd, unsigned long addr,
+ unsigned long end)
+{
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = p4d_addr_end(addr, end);
+
+ if (p4d_none(*p4d)) {
+ p4d_populate(&init_mm, p4d,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ zero_pud_populate(p4d, addr, next);
+ } while (p4d++, addr = next, addr != end);
+}
+
/**
* kasan_populate_zero_shadow - populate shadow memory region with
* kasan_zero_page
@@ -125,6 +145,7 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
next = pgd_addr_end(addr, end);
if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -135,9 +156,22 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
* 3,2 - level page tables where we don't have
* puds,pmds, so pgd_populate(), pud_populate()
* is noops.
+ *
+ * The ifndef is required to avoid build breakage.
+ *
+ * With 5level-fixup.h, pgd_populate() is not nop and
+ * we reference kasan_zero_p4d. It's not defined
+ * unless 5-level paging enabled.
+ *
+ * The ifndef can be dropped once all KASAN-enabled
+ * architectures will switch to pgtable-nop4d.h.
*/
- pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud));
- pud = pud_offset(pgd, addr);
+#ifndef __ARCH_HAS_5LEVEL_HACK
+ pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d));
+#endif
+ p4d = p4d_offset(pgd, addr);
+ p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
+ pud = pud_offset(p4d, addr);
pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
@@ -148,6 +182,6 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
pgd_populate(&init_mm, pgd,
early_alloc(PAGE_SIZE, NUMA_NO_NODE));
}
- zero_pud_populate(pgd, addr, next);
+ zero_p4d_populate(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 6f1ed1630873..3a8ddf8baf7d 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -25,6 +25,7 @@
#include <linux/printk.h>
#include <linux/shrinker.h>
#include <linux/slab.h>
+#include <linux/srcu.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -103,6 +104,7 @@ static int quarantine_tail;
/* Total size of all objects in global_quarantine across all batches. */
static unsigned long quarantine_size;
static DEFINE_SPINLOCK(quarantine_lock);
+DEFINE_STATIC_SRCU(remove_cache_srcu);
/* Maximum size of the global queue. */
static unsigned long quarantine_max_size;
@@ -173,17 +175,22 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
struct qlist_head *q;
struct qlist_head temp = QLIST_INIT;
+ /*
+ * Note: irq must be disabled until after we move the batch to the
+ * global quarantine. Otherwise quarantine_remove_cache() can miss
+ * some objects belonging to the cache if they are in our local temp
+ * list. quarantine_remove_cache() executes on_each_cpu() at the
+ * beginning which ensures that it either sees the objects in per-cpu
+ * lists or in the global quarantine.
+ */
local_irq_save(flags);
q = this_cpu_ptr(&cpu_quarantine);
qlist_put(q, &info->quarantine_link, cache->size);
- if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE))
+ if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
qlist_move_all(q, &temp);
- local_irq_restore(flags);
-
- if (unlikely(!qlist_empty(&temp))) {
- spin_lock_irqsave(&quarantine_lock, flags);
+ spin_lock(&quarantine_lock);
WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
if (global_quarantine[quarantine_tail].bytes >=
@@ -196,20 +203,33 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
if (new_tail != quarantine_head)
quarantine_tail = new_tail;
}
- spin_unlock_irqrestore(&quarantine_lock, flags);
+ spin_unlock(&quarantine_lock);
}
+
+ local_irq_restore(flags);
}
void quarantine_reduce(void)
{
size_t total_size, new_quarantine_size, percpu_quarantines;
unsigned long flags;
+ int srcu_idx;
struct qlist_head to_free = QLIST_INIT;
if (likely(READ_ONCE(quarantine_size) <=
READ_ONCE(quarantine_max_size)))
return;
+ /*
+ * srcu critical section ensures that quarantine_remove_cache()
+ * will not miss objects belonging to the cache while they are in our
+ * local to_free list. srcu is chosen because (1) it gives us private
+ * grace period domain that does not interfere with anything else,
+ * and (2) it allows synchronize_srcu() to return without waiting
+ * if there are no pending read critical sections (which is the
+ * expected case).
+ */
+ srcu_idx = srcu_read_lock(&remove_cache_srcu);
spin_lock_irqsave(&quarantine_lock, flags);
/*
@@ -237,6 +257,7 @@ void quarantine_reduce(void)
spin_unlock_irqrestore(&quarantine_lock, flags);
qlist_free_all(&to_free, NULL);
+ srcu_read_unlock(&remove_cache_srcu, srcu_idx);
}
static void qlist_move_cache(struct qlist_head *from,
@@ -280,12 +301,28 @@ void quarantine_remove_cache(struct kmem_cache *cache)
unsigned long flags, i;
struct qlist_head to_free = QLIST_INIT;
+ /*
+ * Must be careful to not miss any objects that are being moved from
+ * per-cpu list to the global quarantine in quarantine_put(),
+ * nor objects being freed in quarantine_reduce(). on_each_cpu()
+ * achieves the first goal, while synchronize_srcu() achieves the
+ * second.
+ */
on_each_cpu(per_cpu_remove_cache, cache, 1);
spin_lock_irqsave(&quarantine_lock, flags);
- for (i = 0; i < QUARANTINE_BATCHES; i++)
+ for (i = 0; i < QUARANTINE_BATCHES; i++) {
+ if (qlist_empty(&global_quarantine[i]))
+ continue;
qlist_move_cache(&global_quarantine[i], &to_free, cache);
+ /* Scanning whole quarantine can take a while. */
+ spin_unlock_irqrestore(&quarantine_lock, flags);
+ cond_resched();
+ spin_lock_irqsave(&quarantine_lock, flags);
+ }
spin_unlock_irqrestore(&quarantine_lock, flags);
qlist_free_all(&to_free, cache);
+
+ synchronize_srcu(&remove_cache_srcu);
}
diff --git a/mm/madvise.c b/mm/madvise.c
index dc5927c812d3..7a2abf0127ae 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -513,7 +513,43 @@ static long madvise_dontneed(struct vm_area_struct *vma,
if (!can_madv_dontneed_vma(vma))
return -EINVAL;
- userfaultfd_remove(vma, prev, start, end);
+ if (!userfaultfd_remove(vma, start, end)) {
+ *prev = NULL; /* mmap_sem has been dropped, prev is stale */
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, start);
+ if (!vma)
+ return -ENOMEM;
+ if (start < vma->vm_start) {
+ /*
+ * This "vma" under revalidation is the one
+ * with the lowest vma->vm_start where start
+ * is also < vma->vm_end. If start <
+ * vma->vm_start it means an hole materialized
+ * in the user address space within the
+ * virtual range passed to MADV_DONTNEED.
+ */
+ return -ENOMEM;
+ }
+ if (!can_madv_dontneed_vma(vma))
+ return -EINVAL;
+ if (end > vma->vm_end) {
+ /*
+ * Don't fail if end > vma->vm_end. If the old
+ * vma was splitted while the mmap_sem was
+ * released the effect of the concurrent
+ * operation may not cause MADV_DONTNEED to
+ * have an undefined result. There may be an
+ * adjacent next vma that we'll walk
+ * next. userfaultfd_remove() will generate an
+ * UFFD_EVENT_REMOVE repetition on the
+ * end-vma->vm_end range, but the manager can
+ * handle a repetition fine.
+ */
+ end = vma->vm_end;
+ }
+ VM_WARN_ON(start >= end);
+ }
zap_page_range(vma, start, end - start);
return 0;
}
@@ -554,8 +590,10 @@ static long madvise_remove(struct vm_area_struct *vma,
* mmap_sem.
*/
get_file(f);
- userfaultfd_remove(vma, prev, start, end);
- up_read(&current->mm->mmap_sem);
+ if (userfaultfd_remove(vma, start, end)) {
+ /* mmap_sem was not released by userfaultfd_remove() */
+ up_read(&current->mm->mmap_sem);
+ }
error = vfs_fallocate(f,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, end - start);
diff --git a/mm/memblock.c b/mm/memblock.c
index b64b47803e52..696f06d17c4e 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1118,7 +1118,10 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
}
} while (left < right);
- return min(PHYS_PFN(type->regions[right].base), max_pfn);
+ if (right == type->cnt)
+ return max_pfn;
+ else
+ return min(PHYS_PFN(type->regions[right].base), max_pfn);
}
/**
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c52ec893e241..2bd7541d7c11 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -466,6 +466,8 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
struct mem_cgroup_tree_per_node *mctz;
mctz = soft_limit_tree_from_page(page);
+ if (!mctz)
+ return;
/*
* Necessary to update all ancestors when hierarchy is used.
* because their event counter is not touched.
@@ -503,7 +505,8 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
for_each_node(nid) {
mz = mem_cgroup_nodeinfo(memcg, nid);
mctz = soft_limit_tree_node(nid);
- mem_cgroup_remove_exceeded(mz, mctz);
+ if (mctz)
+ mem_cgroup_remove_exceeded(mz, mctz);
}
}
@@ -2558,7 +2561,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
* is empty. Do it lockless to prevent lock bouncing. Races
* are acceptable as soft limit is best effort anyway.
*/
- if (RB_EMPTY_ROOT(&mctz->rb_root))
+ if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
return 0;
/*
@@ -4135,17 +4138,22 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
kfree(memcg->nodeinfo[node]);
}
-static void mem_cgroup_free(struct mem_cgroup *memcg)
+static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
- memcg_wb_domain_exit(memcg);
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
free_percpu(memcg->stat);
kfree(memcg);
}
+static void mem_cgroup_free(struct mem_cgroup *memcg)
+{
+ memcg_wb_domain_exit(memcg);
+ __mem_cgroup_free(memcg);
+}
+
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *memcg;
@@ -4196,7 +4204,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
fail:
if (memcg->id.id > 0)
idr_remove(&mem_cgroup_idr, memcg->id.id);
- mem_cgroup_free(memcg);
+ __mem_cgroup_free(memcg);
return NULL;
}
diff --git a/mm/memory.c b/mm/memory.c
index a97a4cec2e1f..235ba51b2fbf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -445,7 +445,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
mm_dec_nr_pmds(tlb->mm);
}
-static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
@@ -454,7 +454,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long start;
start = addr;
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
@@ -462,6 +462,39 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
free_pmd_range(tlb, pud, addr, next, floor, ceiling);
} while (pud++, addr = next, addr != end);
+ start &= P4D_MASK;
+ if (start < floor)
+ return;
+ if (ceiling) {
+ ceiling &= P4D_MASK;
+ if (!ceiling)
+ return;
+ }
+ if (end - 1 > ceiling - 1)
+ return;
+
+ pud = pud_offset(p4d, start);
+ p4d_clear(p4d);
+ pud_free_tlb(tlb, pud, start);
+}
+
+static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ p4d_t *p4d;
+ unsigned long next;
+ unsigned long start;
+
+ start = addr;
+ p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none_or_clear_bad(p4d))
+ continue;
+ free_pud_range(tlb, p4d, addr, next, floor, ceiling);
+ } while (p4d++, addr = next, addr != end);
+
start &= PGDIR_MASK;
if (start < floor)
return;
@@ -473,9 +506,9 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
if (end - 1 > ceiling - 1)
return;
- pud = pud_offset(pgd, start);
+ p4d = p4d_offset(pgd, start);
pgd_clear(pgd);
- pud_free_tlb(tlb, pud, start);
+ p4d_free_tlb(tlb, p4d, start);
}
/*
@@ -539,7 +572,7 @@ void free_pgd_range(struct mmu_gather *tlb,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+ free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end);
}
@@ -658,7 +691,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
pte_t pte, struct page *page)
{
pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
- pud_t *pud = pud_offset(pgd, addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
pmd_t *pmd = pmd_offset(pud, addr);
struct address_space *mapping;
pgoff_t index;
@@ -1023,16 +1057,16 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
}
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
+ p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pud_t *src_pud, *dst_pud;
unsigned long next;
- dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
+ dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
if (!dst_pud)
return -ENOMEM;
- src_pud = pud_offset(src_pgd, addr);
+ src_pud = pud_offset(src_p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
@@ -1056,6 +1090,28 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
return 0;
}
+static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ p4d_t *src_p4d, *dst_p4d;
+ unsigned long next;
+
+ dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
+ if (!dst_p4d)
+ return -ENOMEM;
+ src_p4d = p4d_offset(src_pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none_or_clear_bad(src_p4d))
+ continue;
+ if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
+ vma, addr, next))
+ return -ENOMEM;
+ } while (dst_p4d++, src_p4d++, addr = next, addr != end);
+ return 0;
+}
+
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
struct vm_area_struct *vma)
{
@@ -1111,7 +1167,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(src_pgd))
continue;
- if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
+ if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
vma, addr, next))) {
ret = -ENOMEM;
break;
@@ -1267,14 +1323,14 @@ next:
}
static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
- struct vm_area_struct *vma, pgd_t *pgd,
+ struct vm_area_struct *vma, p4d_t *p4d,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pud_t *pud;
unsigned long next;
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
@@ -1295,6 +1351,25 @@ next:
return addr;
}
+static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ struct zap_details *details)
+{
+ p4d_t *p4d;
+ unsigned long next;
+
+ p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none_or_clear_bad(p4d))
+ continue;
+ next = zap_pud_range(tlb, vma, p4d, addr, next, details);
+ } while (p4d++, addr = next, addr != end);
+
+ return addr;
+}
+
void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
@@ -1310,7 +1385,7 @@ void unmap_page_range(struct mmu_gather *tlb,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- next = zap_pud_range(tlb, vma, pgd, addr, next, details);
+ next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
} while (pgd++, addr = next, addr != end);
tlb_end_vma(tlb, vma);
}
@@ -1465,16 +1540,24 @@ EXPORT_SYMBOL_GPL(zap_vma_ptes);
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
- pgd_t *pgd = pgd_offset(mm, addr);
- pud_t *pud = pud_alloc(mm, pgd, addr);
- if (pud) {
- pmd_t *pmd = pmd_alloc(mm, pud, addr);
- if (pmd) {
- VM_BUG_ON(pmd_trans_huge(*pmd));
- return pte_alloc_map_lock(mm, pmd, addr, ptl);
- }
- }
- return NULL;
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ pgd = pgd_offset(mm, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return NULL;
+ pud = pud_alloc(mm, p4d, addr);
+ if (!pud)
+ return NULL;
+ pmd = pmd_alloc(mm, pud, addr);
+ if (!pmd)
+ return NULL;
+
+ VM_BUG_ON(pmd_trans_huge(*pmd));
+ return pte_alloc_map_lock(mm, pmd, addr, ptl);
}
/*
@@ -1740,7 +1823,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
return 0;
}
-static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
+static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
@@ -1748,7 +1831,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
pfn -= addr >> PAGE_SHIFT;
- pud = pud_alloc(mm, pgd, addr);
+ pud = pud_alloc(mm, p4d, addr);
if (!pud)
return -ENOMEM;
do {
@@ -1760,6 +1843,26 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
return 0;
}
+static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ unsigned long pfn, pgprot_t prot)
+{
+ p4d_t *p4d;
+ unsigned long next;
+
+ pfn -= addr >> PAGE_SHIFT;
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return -ENOMEM;
+ do {
+ next = p4d_addr_end(addr, end);
+ if (remap_pud_range(mm, p4d, addr, next,
+ pfn + (addr >> PAGE_SHIFT), prot))
+ return -ENOMEM;
+ } while (p4d++, addr = next, addr != end);
+ return 0;
+}
+
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
@@ -1816,7 +1919,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
flush_cache_range(vma, addr, end);
do {
next = pgd_addr_end(addr, end);
- err = remap_pud_range(mm, pgd, addr, next,
+ err = remap_p4d_range(mm, pgd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot);
if (err)
break;
@@ -1932,7 +2035,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
return err;
}
-static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
@@ -1940,7 +2043,7 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
- pud = pud_alloc(mm, pgd, addr);
+ pud = pud_alloc(mm, p4d, addr);
if (!pud)
return -ENOMEM;
do {
@@ -1952,6 +2055,26 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
return err;
}
+static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ pte_fn_t fn, void *data)
+{
+ p4d_t *p4d;
+ unsigned long next;
+ int err;
+
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return -ENOMEM;
+ do {
+ next = p4d_addr_end(addr, end);
+ err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
+ if (err)
+ break;
+ } while (p4d++, addr = next, addr != end);
+ return err;
+}
+
/*
* Scan a region of virtual memory, filling in page tables as necessary
* and calling a provided function on each leaf page table.
@@ -1970,7 +2093,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
pgd = pgd_offset(mm, addr);
do {
next = pgd_addr_end(addr, end);
- err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
+ err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
if (err)
break;
} while (pgd++, addr = next, addr != end);
@@ -3653,11 +3776,15 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
};
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
+ p4d_t *p4d;
int ret;
pgd = pgd_offset(mm, address);
+ p4d = p4d_alloc(mm, pgd, address);
+ if (!p4d)
+ return VM_FAULT_OOM;
- vmf.pud = pud_alloc(mm, pgd, address);
+ vmf.pud = pud_alloc(mm, p4d, address);
if (!vmf.pud)
return VM_FAULT_OOM;
if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
@@ -3779,12 +3906,35 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
}
EXPORT_SYMBOL_GPL(handle_mm_fault);
+#ifndef __PAGETABLE_P4D_FOLDED
+/*
+ * Allocate p4d page table.
+ * We've already handled the fast-path in-line.
+ */
+int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+ p4d_t *new = p4d_alloc_one(mm, address);
+ if (!new)
+ return -ENOMEM;
+
+ smp_wmb(); /* See comment in __pte_alloc */
+
+ spin_lock(&mm->page_table_lock);
+ if (pgd_present(*pgd)) /* Another has populated it */
+ p4d_free(mm, new);
+ else
+ pgd_populate(mm, pgd, new);
+ spin_unlock(&mm->page_table_lock);
+ return 0;
+}
+#endif /* __PAGETABLE_P4D_FOLDED */
+
#ifndef __PAGETABLE_PUD_FOLDED
/*
* Allocate page upper directory.
* We've already handled the fast-path in-line.
*/
-int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
{
pud_t *new = pud_alloc_one(mm, address);
if (!new)
@@ -3793,10 +3943,17 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&mm->page_table_lock);
- if (pgd_present(*pgd)) /* Another has populated it */
+#ifndef __ARCH_HAS_5LEVEL_HACK
+ if (p4d_present(*p4d)) /* Another has populated it */
pud_free(mm, new);
else
- pgd_populate(mm, pgd, new);
+ p4d_populate(mm, p4d, new);
+#else
+ if (pgd_present(*p4d)) /* Another has populated it */
+ pud_free(mm, new);
+ else
+ pgd_populate(mm, p4d, new);
+#endif /* __ARCH_HAS_5LEVEL_HACK */
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -3839,6 +3996,7 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep;
@@ -3847,7 +4005,11 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto out;
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
+ goto out;
+
+ pud = pud_offset(p4d, address);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
goto out;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 295479b792ec..6fa7208bcd56 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -125,9 +125,12 @@ void put_online_mems(void)
}
+/* Serializes write accesses to mem_hotplug.active_writer. */
+static DEFINE_MUTEX(memory_add_remove_lock);
+
void mem_hotplug_begin(void)
{
- assert_held_device_hotplug();
+ mutex_lock(&memory_add_remove_lock);
mem_hotplug.active_writer = current;
@@ -147,6 +150,7 @@ void mem_hotplug_done(void)
mem_hotplug.active_writer = NULL;
mutex_unlock(&mem_hotplug.lock);
memhp_lock_release();
+ mutex_unlock(&memory_add_remove_lock);
}
/* add this memory to iomem resource */
diff --git a/mm/mlock.c b/mm/mlock.c
index 1050511f8b2b..0dd9ca18e19e 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -380,6 +380,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
pte = get_locked_pte(vma->vm_mm, start, &ptl);
/* Make sure we do not cross the page table boundary */
end = pgd_addr_end(start, end);
+ end = p4d_addr_end(start, end);
end = pud_addr_end(start, end);
end = pmd_addr_end(start, end);
@@ -442,7 +443,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
while (start < end) {
struct page *page;
- unsigned int page_mask;
+ unsigned int page_mask = 0;
unsigned long page_increm;
struct pagevec pvec;
struct zone *zone;
@@ -456,8 +457,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
* suits munlock very well (and if somehow an abnormal page
* has sneaked into the range, we won't oops here: great).
*/
- page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
- &page_mask);
+ page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
if (page && !IS_ERR(page)) {
if (PageTransTail(page)) {
@@ -468,8 +468,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
/*
* Any THP page found by follow_page_mask() may
* have gotten split before reaching
- * munlock_vma_page(), so we need to recompute
- * the page_mask here.
+ * munlock_vma_page(), so we need to compute
+ * the page_mask here instead.
*/
page_mask = munlock_vma_page(page);
unlock_page(page);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 848e946b08e5..8edd0d576254 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -193,14 +193,14 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
}
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
- pgd_t *pgd, unsigned long addr, unsigned long end,
+ p4d_t *p4d, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
{
pud_t *pud;
unsigned long next;
unsigned long pages = 0;
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
@@ -212,6 +212,26 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
return pages;
}
+static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
+ pgd_t *pgd, unsigned long addr, unsigned long end,
+ pgprot_t newprot, int dirty_accountable, int prot_numa)
+{
+ p4d_t *p4d;
+ unsigned long next;
+ unsigned long pages = 0;
+
+ p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none_or_clear_bad(p4d))
+ continue;
+ pages += change_pud_range(vma, p4d, addr, next, newprot,
+ dirty_accountable, prot_numa);
+ } while (p4d++, addr = next, addr != end);
+
+ return pages;
+}
+
static unsigned long change_protection_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end, pgprot_t newprot,
int dirty_accountable, int prot_numa)
@@ -230,7 +250,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- pages += change_pud_range(vma, pgd, addr, next, newprot,
+ pages += change_p4d_range(vma, pgd, addr, next, newprot,
dirty_accountable, prot_numa);
} while (pgd++, addr = next, addr != end);
diff --git a/mm/mremap.c b/mm/mremap.c
index 8233b0105c82..cd8a1b199ef9 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -32,6 +32,7 @@
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -39,7 +40,11 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
if (pgd_none_or_clear_bad(pgd))
return NULL;
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none_or_clear_bad(p4d))
+ return NULL;
+
+ pud = pud_offset(p4d, addr);
if (pud_none_or_clear_bad(pud))
return NULL;
@@ -54,11 +59,15 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return NULL;
+ pud = pud_alloc(mm, p4d, addr);
if (!pud)
return NULL;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eaa64d2ffdc5..6cbde310abed 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -873,7 +873,8 @@ done_merging:
higher_page = page + (combined_pfn - pfn);
buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
higher_buddy = higher_page + (buddy_pfn - combined_pfn);
- if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
+ if (pfn_valid_within(buddy_pfn) &&
+ page_is_buddy(higher_page, higher_buddy, order + 1)) {
list_add_tail(&page->lru,
&zone->free_area[order].free_list[migratetype]);
goto out;
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index a23001a22c15..c4c9def8ffea 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -104,6 +104,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
struct mm_struct *mm = pvmw->vma->vm_mm;
struct page *page = pvmw->page;
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
/* The only possible pmd mapping has been handled on last iteration */
@@ -133,7 +134,10 @@ restart:
pgd = pgd_offset(mm, pvmw->address);
if (!pgd_present(*pgd))
return false;
- pud = pud_offset(pgd, pvmw->address);
+ p4d = p4d_offset(pgd, pvmw->address);
+ if (!p4d_present(*p4d))
+ return false;
+ pud = pud_offset(p4d, pvmw->address);
if (!pud_present(*pud))
return false;
pvmw->pmd = pmd_offset(pud, pvmw->address);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 03761577ae86..60f7856e508f 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -69,14 +69,14 @@ again:
return err;
}
-static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
+static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pud_t *pud;
unsigned long next;
int err = 0;
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
do {
again:
next = pud_addr_end(addr, end);
@@ -113,6 +113,32 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
return err;
}
+static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ p4d_t *p4d;
+ unsigned long next;
+ int err = 0;
+
+ p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none_or_clear_bad(p4d)) {
+ if (walk->pte_hole)
+ err = walk->pte_hole(addr, next, walk);
+ if (err)
+ break;
+ continue;
+ }
+ if (walk->pmd_entry || walk->pte_entry)
+ err = walk_pud_range(p4d, addr, next, walk);
+ if (err)
+ break;
+ } while (p4d++, addr = next, addr != end);
+
+ return err;
+}
+
static int walk_pgd_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
@@ -131,7 +157,7 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
continue;
}
if (walk->pmd_entry || walk->pte_entry)
- err = walk_pud_range(pgd, addr, next, walk);
+ err = walk_p4d_range(pgd, addr, next, walk);
if (err)
break;
} while (pgd++, addr = next, addr != end);
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 538998a137d2..9ac639499bd1 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -21,7 +21,6 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
/**
* pcpu_get_pages - get temp pages array
- * @chunk: chunk of interest
*
* Returns pointer to array of pointers to struct page which can be indexed
* with pcpu_page_idx(). Note that there is only one array and accesses
@@ -30,7 +29,7 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
* RETURNS:
* Pointer to temp pages array on success.
*/
-static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc)
+static struct page **pcpu_get_pages(void)
{
static struct page **pages;
size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
@@ -275,7 +274,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
{
struct page **pages;
- pages = pcpu_get_pages(chunk);
+ pages = pcpu_get_pages();
if (!pages)
return -ENOMEM;
@@ -313,7 +312,7 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
* successful population attempt so the temp pages array must
* be available now.
*/
- pages = pcpu_get_pages(chunk);
+ pages = pcpu_get_pages();
BUG_ON(!pages);
/* unmap and free */
diff --git a/mm/percpu.c b/mm/percpu.c
index 5696039b5c07..60a6488e9e6d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1011,8 +1011,11 @@ area_found:
mutex_unlock(&pcpu_alloc_mutex);
}
- if (chunk != pcpu_reserved_chunk)
+ if (chunk != pcpu_reserved_chunk) {
+ spin_lock_irqsave(&pcpu_lock, flags);
pcpu_nr_empty_pop_pages -= occ_pages;
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+ }
if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
pcpu_schedule_balance_work();
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 4ed5908c65b0..c99d9512a45b 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -22,6 +22,12 @@ void pgd_clear_bad(pgd_t *pgd)
pgd_clear(pgd);
}
+void p4d_clear_bad(p4d_t *p4d)
+{
+ p4d_ERROR(*p4d);
+ p4d_clear(p4d);
+}
+
void pud_clear_bad(pud_t *pud)
{
pud_ERROR(*pud);
diff --git a/mm/rmap.c b/mm/rmap.c
index 2da487d6cea8..49ed681ccc7b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -684,6 +684,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd = NULL;
pmd_t pmde;
@@ -692,7 +693,11 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
if (!pgd_present(*pgd))
goto out;
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (!p4d_present(*p4d))
+ goto out;
+
+ pud = pud_offset(p4d, address);
if (!pud_present(*pud))
goto out;
@@ -1316,12 +1321,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
while (page_vma_mapped_walk(&pvmw)) {
- subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
- address = pvmw.address;
-
- /* Unexpected PMD-mapped THP? */
- VM_BUG_ON_PAGE(!pvmw.pte, page);
-
/*
* If the page is mlock()d, we cannot swap it out.
* If it's recently referenced (perhaps page_referenced
@@ -1345,6 +1344,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
continue;
}
+ /* Unexpected PMD-mapped THP? */
+ VM_BUG_ON_PAGE(!pvmw.pte, page);
+
+ subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+ address = pvmw.address;
+
+
if (!(flags & TTU_IGNORE_ACCESS)) {
if (ptep_clear_flush_young_notify(vma, address,
pvmw.pte)) {
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 574c67b663fe..a56c3989f773 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -196,9 +196,9 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
return pmd;
}
-pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
+pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
{
- pud_t *pud = pud_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
if (!p)
@@ -208,6 +208,18 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
return pud;
}
+p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
+{
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d)) {
+ void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ if (!p)
+ return NULL;
+ p4d_populate(&init_mm, p4d, p);
+ }
+ return p4d;
+}
+
pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
{
pgd_t *pgd = pgd_offset_k(addr);
@@ -225,6 +237,7 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
{
unsigned long addr = start;
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -233,7 +246,10 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
pgd = vmemmap_pgd_populate(addr, node);
if (!pgd)
return -ENOMEM;
- pud = vmemmap_pud_populate(pgd, addr, node);
+ p4d = vmemmap_p4d_populate(pgd, addr, node);
+ if (!p4d)
+ return -ENOMEM;
+ pud = vmemmap_pud_populate(p4d, addr, node);
if (!pud)
return -ENOMEM;
pmd = vmemmap_pmd_populate(pud, addr, node);
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 9b5bc86f96ad..7ebb23836f68 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -267,7 +267,7 @@ int free_swap_slot(swp_entry_t entry)
{
struct swap_slots_cache *cache;
- BUG_ON(!swap_slot_cache_initialized);
+ WARN_ON_ONCE(!swap_slot_cache_initialized);
cache = &get_cpu_var(swp_slots);
if (use_swap_slot_cache && cache->slots_ret) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 521ef9b6064f..178130880b90 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1517,7 +1517,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
return 0;
}
-static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
@@ -1525,7 +1525,7 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long next;
int ret;
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
@@ -1537,6 +1537,26 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
return 0;
}
+static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ swp_entry_t entry, struct page *page)
+{
+ p4d_t *p4d;
+ unsigned long next;
+ int ret;
+
+ p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none_or_clear_bad(p4d))
+ continue;
+ ret = unuse_pud_range(vma, p4d, addr, next, entry, page);
+ if (ret)
+ return ret;
+ } while (p4d++, addr = next, addr != end);
+ return 0;
+}
+
static int unuse_vma(struct vm_area_struct *vma,
swp_entry_t entry, struct page *page)
{
@@ -1560,7 +1580,7 @@ static int unuse_vma(struct vm_area_struct *vma,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
+ ret = unuse_p4d_range(vma, pgd, addr, next, entry, page);
if (ret)
return ret;
} while (pgd++, addr = next, addr != end);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 479e631d43c2..8bcb501bce60 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -128,19 +128,22 @@ out_unlock:
static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
- pmd_t *pmd = NULL;
pgd = pgd_offset(mm, address);
- pud = pud_alloc(mm, pgd, address);
- if (pud)
- /*
- * Note that we didn't run this because the pmd was
- * missing, the *pmd may be already established and in
- * turn it may also be a trans_huge_pmd.
- */
- pmd = pmd_alloc(mm, pud, address);
- return pmd;
+ p4d = p4d_alloc(mm, pgd, address);
+ if (!p4d)
+ return NULL;
+ pud = pud_alloc(mm, p4d, address);
+ if (!pud)
+ return NULL;
+ /*
+ * Note that we didn't run this because the pmd was
+ * missing, the *pmd may be already established and in
+ * turn it may also be a trans_huge_pmd.
+ */
+ return pmd_alloc(mm, pud, address);
}
#ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b4024d688f38..0b057628a7ba 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -86,12 +86,12 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
} while (pmd++, addr = next, addr != end);
}
-static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
{
pud_t *pud;
unsigned long next;
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_clear_huge(pud))
@@ -102,6 +102,22 @@ static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
} while (pud++, addr = next, addr != end);
}
+static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+ p4d_t *p4d;
+ unsigned long next;
+
+ p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_clear_huge(p4d))
+ continue;
+ if (p4d_none_or_clear_bad(p4d))
+ continue;
+ vunmap_pud_range(p4d, addr, next);
+ } while (p4d++, addr = next, addr != end);
+}
+
static void vunmap_page_range(unsigned long addr, unsigned long end)
{
pgd_t *pgd;
@@ -113,7 +129,7 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- vunmap_pud_range(pgd, addr, next);
+ vunmap_p4d_range(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
@@ -160,13 +176,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
return 0;
}
-static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
+static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr)
{
pud_t *pud;
unsigned long next;
- pud = pud_alloc(&init_mm, pgd, addr);
+ pud = pud_alloc(&init_mm, p4d, addr);
if (!pud)
return -ENOMEM;
do {
@@ -177,6 +193,23 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
return 0;
}
+static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
+ unsigned long end, pgprot_t prot, struct page **pages, int *nr)
+{
+ p4d_t *p4d;
+ unsigned long next;
+
+ p4d = p4d_alloc(&init_mm, pgd, addr);
+ if (!p4d)
+ return -ENOMEM;
+ do {
+ next = p4d_addr_end(addr, end);
+ if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
+ return -ENOMEM;
+ } while (p4d++, addr = next, addr != end);
+ return 0;
+}
+
/*
* Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
* will have pfns corresponding to the "pages" array.
@@ -196,7 +229,7 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
+ err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
if (err)
return err;
} while (pgd++, addr = next, addr != end);
@@ -237,6 +270,10 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
unsigned long addr = (unsigned long) vmalloc_addr;
struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
/*
* XXX we might need to change this if we add VIRTUAL_BUG_ON for
@@ -244,21 +281,23 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
*/
VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
- if (!pgd_none(*pgd)) {
- pud_t *pud = pud_offset(pgd, addr);
- if (!pud_none(*pud)) {
- pmd_t *pmd = pmd_offset(pud, addr);
- if (!pmd_none(*pmd)) {
- pte_t *ptep, pte;
-
- ptep = pte_offset_map(pmd, addr);
- pte = *ptep;
- if (pte_present(pte))
- page = pte_page(pte);
- pte_unmap(ptep);
- }
- }
- }
+ if (pgd_none(*pgd))
+ return NULL;
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d))
+ return NULL;
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud))
+ return NULL;
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return NULL;
+
+ ptep = pte_offset_map(pmd, addr);
+ pte = *ptep;
+ if (pte_present(pte))
+ page = pte_page(pte);
+ pte_unmap(ptep);
return page;
}
EXPORT_SYMBOL(vmalloc_to_page);
@@ -1644,7 +1683,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
if (fatal_signal_pending(current)) {
area->nr_pages = i;
- goto fail;
+ goto fail_no_warn;
}
if (node == NUMA_NO_NODE)
@@ -1670,6 +1709,7 @@ fail:
warn_alloc(gfp_mask, NULL,
"vmalloc: allocation failure, allocated %ld of %ld bytes",
(area->nr_pages*PAGE_SIZE), area->size);
+fail_no_warn:
vfree(area->addr);
return NULL;
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 69f9aff39a2e..b1947f0cbee2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1065,6 +1065,9 @@ const char * const vmstat_text[] = {
"thp_split_page_failed",
"thp_deferred_split_page",
"thp_split_pmd",
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ "thp_split_pud",
+#endif
"thp_zero_page_alloc",
"thp_zero_page_alloc_failed",
#endif
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 8970a2fd3b1a..f9492bccfd79 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -667,6 +667,7 @@ next:
z3fold_page_unlock(zhdr);
spin_lock(&pool->lock);
if (kref_put(&zhdr->refcount, release_z3fold_page)) {
+ spin_unlock(&pool->lock);
atomic64_dec(&pool->pages_nr);
return 0;
}
diff --git a/net/atm/svc.c b/net/atm/svc.c
index db9794ec61d8..5589de7086af 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -318,7 +318,8 @@ out:
return error;
}
-static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
+static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
@@ -329,7 +330,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
lock_sock(sk);
- error = svc_create(sock_net(sk), newsock, 0, 0);
+ error = svc_create(sock_net(sk), newsock, 0, kern);
if (error)
goto out;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index a8e42cedf1db..b7c486752b3a 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1320,7 +1320,8 @@ out_release:
return err;
}
-static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
+static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sk_buff *skb;
struct sock *newsk;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f307b145ea54..507b80d59dec 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -301,7 +301,7 @@ done:
}
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index aa1a814ceddc..ac3c650cb234 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -471,7 +471,8 @@ done:
return err;
}
-static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index e4e9a2da1e7e..728e0c8dc8e7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -627,7 +627,7 @@ done:
}
static int sco_sock_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *ch;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 236f34244dbe..013f2290bfa5 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL(br_should_route_hook);
static int
br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ br_drop_fake_rtable(skb);
return netif_receive_skb(skb);
}
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 95087e6e8258..fa87fbd62bb7 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv,
}
-/* PF_BRIDGE/LOCAL_IN ************************************************/
-/* The packet is locally destined, which requires a real
- * dst_entry, so detach the fake one. On the way up, the
- * packet would pass through PRE_ROUTING again (which already
- * took place when the packet entered the bridge), but we
- * register an IPv4 PRE_ROUTING 'sabotage' hook that will
- * prevent this from happening. */
-static unsigned int br_nf_local_in(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- br_drop_fake_rtable(skb);
- return NF_ACCEPT;
-}
-
/* PF_BRIDGE/FORWARD *************************************************/
static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
@@ -908,12 +893,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
.priority = NF_BR_PRI_BRNF,
},
{
- .hook = br_nf_local_in,
- .pf = NFPROTO_BRIDGE,
- .hooknum = NF_BR_LOCAL_IN,
- .priority = NF_BR_PRI_BRNF,
- },
- {
.hook = br_nf_forward_ip,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 464e88599b9d..108533859a53 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -230,6 +230,7 @@ enum {
Opt_osdkeepalivetimeout,
Opt_mount_timeout,
Opt_osd_idle_ttl,
+ Opt_osd_request_timeout,
Opt_last_int,
/* int args above */
Opt_fsid,
@@ -256,6 +257,7 @@ static match_table_t opt_tokens = {
{Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
{Opt_mount_timeout, "mount_timeout=%d"},
{Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
+ {Opt_osd_request_timeout, "osd_request_timeout=%d"},
/* int args above */
{Opt_fsid, "fsid=%s"},
{Opt_name, "name=%s"},
@@ -361,6 +363,7 @@ ceph_parse_options(char *options, const char *dev_name,
opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
+ opt->osd_request_timeout = CEPH_OSD_REQUEST_TIMEOUT_DEFAULT;
/* get mon ip(s) */
/* ip1[:port1][,ip2[:port2]...] */
@@ -473,6 +476,15 @@ ceph_parse_options(char *options, const char *dev_name,
}
opt->mount_timeout = msecs_to_jiffies(intval * 1000);
break;
+ case Opt_osd_request_timeout:
+ /* 0 is "wait forever" (i.e. infinite timeout) */
+ if (intval < 0 || intval > INT_MAX / 1000) {
+ pr_err("osd_request_timeout out of range\n");
+ err = -EINVAL;
+ goto out;
+ }
+ opt->osd_request_timeout = msecs_to_jiffies(intval * 1000);
+ break;
case Opt_share:
opt->flags &= ~CEPH_OPT_NOSHARE;
@@ -557,6 +569,9 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
seq_printf(m, "osdkeepalivetimeout=%d,",
jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000);
+ if (opt->osd_request_timeout != CEPH_OSD_REQUEST_TIMEOUT_DEFAULT)
+ seq_printf(m, "osd_request_timeout=%d,",
+ jiffies_to_msecs(opt->osd_request_timeout) / 1000);
/* drop redundant comma */
if (m->count != pos)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index b65bbf9f45eb..e15ea9e4c495 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1709,6 +1709,8 @@ static void account_request(struct ceph_osd_request *req)
req->r_flags |= CEPH_OSD_FLAG_ONDISK;
atomic_inc(&req->r_osdc->num_requests);
+
+ req->r_start_stamp = jiffies;
}
static void submit_request(struct ceph_osd_request *req, bool wrlocked)
@@ -1789,6 +1791,14 @@ static void cancel_request(struct ceph_osd_request *req)
ceph_osdc_put_request(req);
}
+static void abort_request(struct ceph_osd_request *req, int err)
+{
+ dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
+
+ cancel_map_check(req);
+ complete_request(req, err);
+}
+
static void check_pool_dne(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
@@ -2487,6 +2497,7 @@ static void handle_timeout(struct work_struct *work)
container_of(work, struct ceph_osd_client, timeout_work.work);
struct ceph_options *opts = osdc->client->options;
unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
+ unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
LIST_HEAD(slow_osds);
struct rb_node *n, *p;
@@ -2502,15 +2513,23 @@ static void handle_timeout(struct work_struct *work)
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
bool found = false;
- for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
+ for (p = rb_first(&osd->o_requests); p; ) {
struct ceph_osd_request *req =
rb_entry(p, struct ceph_osd_request, r_node);
+ p = rb_next(p); /* abort_request() */
+
if (time_before(req->r_stamp, cutoff)) {
dout(" req %p tid %llu on osd%d is laggy\n",
req, req->r_tid, osd->o_osd);
found = true;
}
+ if (opts->osd_request_timeout &&
+ time_before(req->r_start_stamp, expiry_cutoff)) {
+ pr_err_ratelimited("tid %llu on osd%d timeout\n",
+ req->r_tid, osd->o_osd);
+ abort_request(req, -ETIMEDOUT);
+ }
}
for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
struct ceph_osd_linger_request *lreq =
@@ -2530,6 +2549,21 @@ static void handle_timeout(struct work_struct *work)
list_move_tail(&osd->o_keepalive_item, &slow_osds);
}
+ if (opts->osd_request_timeout) {
+ for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
+ struct ceph_osd_request *req =
+ rb_entry(p, struct ceph_osd_request, r_node);
+
+ p = rb_next(p); /* abort_request() */
+
+ if (time_before(req->r_start_stamp, expiry_cutoff)) {
+ pr_err_ratelimited("tid %llu on osd%d timeout\n",
+ req->r_tid, osdc->homeless_osd.o_osd);
+ abort_request(req, -ETIMEDOUT);
+ }
+ }
+ }
+
if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
maybe_request_map(osdc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 6824c0ec8373..ffe9e904d4d1 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -390,9 +390,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
dout("crush decode tunable chooseleaf_stable = %d\n",
c->chooseleaf_stable);
- crush_finalize(c);
-
done:
+ crush_finalize(c);
dout("crush_decode success\n");
return c;
@@ -1380,7 +1379,6 @@ static int decode_new_up_state_weight(void **p, void *end,
if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
(xorstate & CEPH_OSD_EXISTS)) {
pr_info("osd%d does not exist\n", osd);
- map->osd_weight[osd] = CEPH_OSD_IN;
ret = set_primary_affinity(map, osd,
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
if (ret)
diff --git a/net/core/dev.c b/net/core/dev.c
index 8637b2b71f3d..7869ae3837ca 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1304,6 +1304,7 @@ void netdev_notify_peers(struct net_device *dev)
{
rtnl_lock();
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
rtnl_unlock();
}
EXPORT_SYMBOL(netdev_notify_peers);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 3945821e9c1f..65ea0ff4017c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -953,7 +953,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
while (--i >= new_num) {
struct kobject *kobj = &dev->_rx[i].kobj;
- if (!list_empty(&dev_net(dev)->exit_list))
+ if (!atomic_read(&dev_net(dev)->count))
kobj->uevent_suppress = 1;
if (dev->sysfs_rx_queue_group)
sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -1371,7 +1371,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
while (--i >= new_num) {
struct netdev_queue *queue = dev->_tx + i;
- if (!list_empty(&dev_net(dev)->exit_list))
+ if (!atomic_read(&dev_net(dev)->count))
queue->kobj.uevent_suppress = 1;
#ifdef CONFIG_BQL
sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
{
struct device *dev = &(ndev->dev);
- if (!list_empty(&dev_net(ndev)->exit_list))
+ if (!atomic_read(&dev_net(ndev)->count))
dev_set_uevent_suppress(dev, 1);
kobject_get(&dev->kobj);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f3557958e9bf..cd4ba8c6b609 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3828,13 +3828,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
if (!skb_may_tx_timestamp(sk, false))
return;
- /* take a reference to prevent skb_orphan() from freeing the socket */
- sock_hold(sk);
-
- *skb_hwtstamps(skb) = *hwtstamps;
- __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
-
- sock_put(sk);
+ /* Take a reference to prevent skb_orphan() from freeing the socket,
+ * but only if the socket refcount is not zero.
+ */
+ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+ *skb_hwtstamps(skb) = *hwtstamps;
+ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+ sock_put(sk);
+ }
}
EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
@@ -3893,7 +3894,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
{
struct sock *sk = skb->sk;
struct sock_exterr_skb *serr;
- int err;
+ int err = 1;
skb->wifi_acked_valid = 1;
skb->wifi_acked = acked;
@@ -3903,14 +3904,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
- /* take a reference to prevent skb_orphan() from freeing the socket */
- sock_hold(sk);
-
- err = sock_queue_err_skb(sk, skb);
+ /* Take a reference to prevent skb_orphan() from freeing the socket,
+ * but only if the socket refcount is not zero.
+ */
+ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+ err = sock_queue_err_skb(sk, skb);
+ sock_put(sk);
+ }
if (err)
kfree_skb(skb);
-
- sock_put(sk);
}
EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
diff --git a/net/core/sock.c b/net/core/sock.c
index f6fd79f33097..a96d5f7a5734 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -197,66 +197,55 @@ EXPORT_SYMBOL(sk_net_capable);
/*
* Each address family might have different locking rules, so we have
- * one slock key per address family:
+ * one slock key per address family and separate keys for internal and
+ * userspace sockets.
*/
static struct lock_class_key af_family_keys[AF_MAX];
+static struct lock_class_key af_family_kern_keys[AF_MAX];
static struct lock_class_key af_family_slock_keys[AF_MAX];
+static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
/*
* Make lock validator output more readable. (we pre-construct these
* strings build-time, so that runtime initialization of socket
* locks is fast):
*/
+
+#define _sock_locks(x) \
+ x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
+ x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
+ x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
+ x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
+ x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
+ x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
+ x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
+ x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
+ x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
+ x "27" , x "28" , x "AF_CAN" , \
+ x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
+ x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
+ x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
+ x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
+ x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"
+
static const char *const af_family_key_strings[AF_MAX+1] = {
- "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
- "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
- "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
- "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
- "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
- "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
- "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
- "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
- "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
- "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
- "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
- "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
- "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
- "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
- "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC" , "sk_lock-AF_MAX"
+ _sock_locks("sk_lock-")
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
- "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
- "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
- "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
- "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
- "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
- "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
- "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
- "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
- "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
- "slock-27" , "slock-28" , "slock-AF_CAN" ,
- "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
- "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
- "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
- "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
- "slock-AF_QIPCRTR", "slock-AF_SMC" , "slock-AF_MAX"
+ _sock_locks("slock-")
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
- "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
- "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
- "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
- "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
- "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
- "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
- "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
- "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
- "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
- "clock-27" , "clock-28" , "clock-AF_CAN" ,
- "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
- "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
- "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
- "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
- "clock-AF_QIPCRTR", "clock-AF_SMC" , "clock-AF_MAX"
+ _sock_locks("clock-")
+};
+
+static const char *const af_family_kern_key_strings[AF_MAX+1] = {
+ _sock_locks("k-sk_lock-")
+};
+static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
+ _sock_locks("k-slock-")
+};
+static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
+ _sock_locks("k-clock-")
};
/*
@@ -264,6 +253,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
* so split the lock classes by using a per-AF key:
*/
static struct lock_class_key af_callback_keys[AF_MAX];
+static struct lock_class_key af_kern_callback_keys[AF_MAX];
/* Take into consideration the size of the struct sk_buff overhead in the
* determination of these values, since that is non-constant across
@@ -1293,7 +1283,16 @@ lenout:
*/
static inline void sock_lock_init(struct sock *sk)
{
- sock_lock_init_class_and_name(sk,
+ if (sk->sk_kern_sock)
+ sock_lock_init_class_and_name(
+ sk,
+ af_family_kern_slock_key_strings[sk->sk_family],
+ af_family_kern_slock_keys + sk->sk_family,
+ af_family_kern_key_strings[sk->sk_family],
+ af_family_kern_keys + sk->sk_family);
+ else
+ sock_lock_init_class_and_name(
+ sk,
af_family_slock_key_strings[sk->sk_family],
af_family_slock_keys + sk->sk_family,
af_family_key_strings[sk->sk_family],
@@ -1399,6 +1398,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
* why we need sk_prot_creator -acme
*/
sk->sk_prot = sk->sk_prot_creator = prot;
+ sk->sk_kern_sock = kern;
sock_lock_init(sk);
sk->sk_net_refcnt = kern ? 0 : 1;
if (likely(sk->sk_net_refcnt))
@@ -2277,7 +2277,8 @@ int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
}
EXPORT_SYMBOL(sock_no_socketpair);
-int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
+int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
return -EOPNOTSUPP;
}
@@ -2481,7 +2482,14 @@ void sock_init_data(struct socket *sock, struct sock *sk)
}
rwlock_init(&sk->sk_callback_lock);
- lockdep_set_class_and_name(&sk->sk_callback_lock,
+ if (sk->sk_kern_sock)
+ lockdep_set_class_and_name(
+ &sk->sk_callback_lock,
+ af_kern_callback_keys + sk->sk_family,
+ af_family_kern_clock_key_strings[sk->sk_family]);
+ else
+ lockdep_set_class_and_name(
+ &sk->sk_callback_lock,
af_callback_keys + sk->sk_family,
af_family_clock_key_strings[sk->sk_family]);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index f053198e730c..5e3a7302f774 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
for (i = 0; i < hc->tx_seqbufc; i++)
kfree(hc->tx_seqbuf[i]);
hc->tx_seqbufc = 0;
+ dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
}
static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 409d0cfd3447..b99168b0fabf 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
switch (type) {
case ICMP_REDIRECT:
- dccp_do_redirect(skb, sk);
+ if (!sock_owned_by_user(sk))
+ dccp_do_redirect(skb, sk);
goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 233b57367758..d9b6a4e403e7 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
np = inet6_sk(sk);
if (type == NDISC_REDIRECT) {
- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+ if (!sock_owned_by_user(sk)) {
+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
- if (dst)
- dst->ops->redirect(dst, sk, skb);
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+ }
goto out;
}
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index e267e6f4c9a5..abd07a443219 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -142,6 +142,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
struct dccp_request_sock *dreq = dccp_rsk(req);
bool own_req;
+ /* TCP/DCCP listeners became lockless.
+ * DCCP stores complex state in its request_sock, so we need
+ * a protection for them, now this code runs without being protected
+ * by the parent (listener) lock.
+ */
+ spin_lock_bh(&dreq->dreq_lock);
+
/* Check for retransmitted REQUEST */
if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
@@ -156,7 +163,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
inet_rtx_syn_ack(sk, req);
}
/* Network Duplicate, discard packet */
- return NULL;
+ goto out;
}
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
@@ -182,20 +189,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
req, &own_req);
- if (!child)
- goto listen_overflow;
-
- return inet_csk_complete_hashdance(sk, child, req, own_req);
+ if (child) {
+ child = inet_csk_complete_hashdance(sk, child, req, own_req);
+ goto out;
+ }
-listen_overflow:
- dccp_pr_debug("listen_overflow!\n");
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
drop:
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req);
- return NULL;
+out:
+ spin_unlock_bh(&dreq->dreq_lock);
+ return child;
}
EXPORT_SYMBOL_GPL(dccp_check_req);
@@ -246,6 +253,7 @@ int dccp_reqsk_init(struct request_sock *req,
{
struct dccp_request_sock *dreq = dccp_rsk(req);
+ spin_lock_init(&dreq->dreq_lock);
inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
inet_rsk(req)->acked = 0;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index e6e79eda9763..7de5b40a5d0d 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1070,7 +1070,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
return skb == NULL ? ERR_PTR(err) : skb;
}
-static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
+static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk, *newsk;
struct sk_buff *skb = NULL;
@@ -1099,7 +1100,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
cb = DN_SKB_CB(skb);
sk->sk_ack_backlog--;
- newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0);
+ newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
if (newsk == NULL) {
release_sock(sk);
kfree_skb(skb);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 602d40f43687..6b1fc6e4278e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -689,11 +689,12 @@ EXPORT_SYMBOL(inet_stream_connect);
* Accept a pending connection. The TCP layer now gives BSD semantics.
*/
-int inet_accept(struct socket *sock, struct socket *newsock, int flags)
+int inet_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk1 = sock->sk;
int err = -EINVAL;
- struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);
+ struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
if (!sk2)
goto do_err;
@@ -1487,8 +1488,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
int proto = iph->protocol;
int err = -ENOSYS;
- if (skb->encapsulation)
+ if (skb->encapsulation) {
+ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
skb_set_inner_network_header(skb, nhoff);
+ }
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index b4d5980ade3b..5e313c1ac94f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -424,7 +424,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
/*
* This will accept the next outstanding connection.
*/
-struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 737ce826d7ec..7a3fd25e8913 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -966,7 +966,7 @@ static int __ip_append_data(struct sock *sk,
cork->length += length;
if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9a89b8deafae..575e19dcc017 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -279,10 +279,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
*/
void tcp_v4_mtu_reduced(struct sock *sk)
{
- struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
- u32 mtu = tcp_sk(sk)->mtu_info;
+ struct dst_entry *dst;
+ u32 mtu;
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+ mtu = tcp_sk(sk)->mtu_info;
dst = inet_csk_update_pmtu(sk, mtu);
if (!dst)
return;
@@ -428,7 +431,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
switch (type) {
case ICMP_REDIRECT:
- do_redirect(icmp_skb, sk);
+ if (!sock_owned_by_user(sk))
+ do_redirect(icmp_skb, sk);
goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 40d893556e67..b2ab411c6d37 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk)
sk_mem_reclaim_partial(sk);
- if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
goto out;
if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
int event;
- if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ !icsk->icsk_pending)
goto out;
if (time_after(icsk->icsk_timeout, jiffies)) {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 04db40620ea6..a9a9553ee63d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -920,12 +920,12 @@ static int __init inet6_init(void)
err = register_pernet_subsys(&inet6_net_ops);
if (err)
goto register_pernet_fail;
- err = icmpv6_init();
- if (err)
- goto icmp_fail;
err = ip6_mr_init();
if (err)
goto ipmr_fail;
+ err = icmpv6_init();
+ if (err)
+ goto icmp_fail;
err = ndisc_init();
if (err)
goto ndisc_fail;
@@ -1061,10 +1061,10 @@ igmp_fail:
ndisc_cleanup();
ndisc_fail:
ip6_mr_cleanup();
-ipmr_fail:
- icmpv6_cleanup();
icmp_fail:
unregister_pernet_subsys(&inet6_net_ops);
+ipmr_fail:
+ icmpv6_cleanup();
register_pernet_fail:
sock_unregister(PF_INET6);
rtnl_unregister_all(PF_INET6);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index e4266746e4a2..d4bf2c68a545 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -923,6 +923,8 @@ add:
ins = &rt->dst.rt6_next;
iter = *ins;
while (iter) {
+ if (iter->rt6i_metric > rt->rt6i_metric)
+ break;
if (rt6_qualify_for_ecmp(iter)) {
*ins = iter->dst.rt6_next;
fib6_purge_rt(iter, fn, info->nl_net);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 0838e6d01d2e..93e58a5e1837 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
int err = -ENOSYS;
- if (skb->encapsulation)
+ if (skb->encapsulation) {
+ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
skb_set_inner_network_header(skb, nhoff);
+ }
iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 528b3c1f3fde..58f6288e9ba5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -768,13 +768,14 @@ slow_path:
* Fragment the datagram.
*/
- *prevhdr = NEXTHDR_FRAGMENT;
troom = rt->dst.dev->needed_tailroom;
/*
* Keep copying data until we run out.
*/
while (left > 0) {
+ u8 *fragnexthdr_offset;
+
len = left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
@@ -819,6 +820,10 @@ slow_path:
*/
skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
+ fragnexthdr_offset = skb_network_header(frag);
+ fragnexthdr_offset += prevhdr - skb_network_header(skb);
+ *fragnexthdr_offset = NEXTHDR_FRAGMENT;
+
/*
* Build fragment header.
*/
@@ -1385,7 +1390,7 @@ emsgsize:
if ((((length + fragheaderlen) > mtu) ||
(skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 644ba59fbd9d..3d8a3b63b4fd 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -485,11 +485,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
if (!skb->ignore_df && skb->len > mtu) {
skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
- if (skb->protocol == htons(ETH_P_IPV6))
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
- else
+ } else {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
+ }
return -EMSGSIZE;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 229bfcc451ef..35c58b669ebd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3299,7 +3299,6 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
+ NLA_ALIGN(sizeof(struct rtnexthop))
+ nla_total_size(16) /* RTA_GATEWAY */
- + nla_total_size(4) /* RTA_OIF */
+ lwtunnel_get_encap_size(rt->dst.lwtstate);
nexthop_len *= rt->rt6i_nsiblings;
@@ -3323,7 +3322,7 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
}
static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
- unsigned int *flags)
+ unsigned int *flags, bool skip_oif)
{
if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
*flags |= RTNH_F_LINKDOWN;
@@ -3336,7 +3335,8 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
goto nla_put_failure;
}
- if (rt->dst.dev &&
+ /* not needed for multipath encoding b/c it has a rtnexthop struct */
+ if (!skip_oif && rt->dst.dev &&
nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
goto nla_put_failure;
@@ -3350,6 +3350,7 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* add multipath next hop */
static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
{
struct rtnexthop *rtnh;
@@ -3362,7 +3363,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
rtnh->rtnh_hops = 0;
rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
- if (rt6_nexthop_info(skb, rt, &flags) < 0)
+ if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
goto nla_put_failure;
rtnh->rtnh_flags = flags;
@@ -3515,7 +3516,7 @@ static int rt6_fill_node(struct net *net,
nla_nest_end(skb, mp);
} else {
- if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags) < 0)
+ if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
goto nla_put_failure;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 60a5295a7de6..49fa2e8c3fa9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -391,10 +391,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
np = inet6_sk(sk);
if (type == NDISC_REDIRECT) {
- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+ if (!sock_owned_by_user(sk)) {
+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
- if (dst)
- dst->ops->redirect(dst, sk, skb);
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+ }
goto out;
}
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 81adc29a448d..8d77ad5cadaf 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -828,7 +828,8 @@ out:
* Wait for incoming connection
*
*/
-static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
+static int irda_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk;
struct irda_sock *new, *self = irda_sk(sk);
@@ -836,7 +837,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
struct sk_buff *skb = NULL;
int err;
- err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
+ err = irda_create(sock_net(sk), newsock, sk->sk_protocol, kern);
if (err)
return err;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 89bbde1081ce..84de7b6326dc 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -938,7 +938,7 @@ done:
/* Accept a pending connection */
static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 06186d608a27..cb4fff785cbf 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -641,11 +641,13 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
* @sock: Socket which connections arrive on.
* @newsock: Socket to move incoming connection to.
* @flags: User specified operational flags.
+ * @kern: If the socket is kernel internal
*
* Accept a new incoming connection.
* Returns 0 upon success, negative otherwise.
*/
-static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
+static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk, *newsk;
struct llc_sock *llc, *newllc;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 3818686182b2..33211f9a2656 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1288,7 +1288,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
/* fall through */
case NETDEV_CHANGE:
nh->nh_flags |= RTNH_F_LINKDOWN;
- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
+ if (event != NETDEV_UNREGISTER)
+ ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
break;
}
if (event == NETDEV_UNREGISTER)
@@ -2028,6 +2029,7 @@ static void mpls_net_exit(struct net *net)
for (index = 0; index < platform_labels; index++) {
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
RCU_INIT_POINTER(platform_label[index], NULL);
+ mpls_notify_route(net, index, rt, NULL, NULL);
mpls_rt_free(rt);
}
rtnl_unlock();
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4bbf4526b885..ebf16f7f9089 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -765,7 +765,8 @@ out_release:
return err;
}
-static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
+static int nr_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sk_buff *skb;
struct sock *newsk;
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 879885b31cce..2ffb18e73df6 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -441,7 +441,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
}
static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *new_sk;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 222bedcd9575..e81537991ddf 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -772,7 +772,8 @@ static void pep_sock_close(struct sock *sk, long timeout)
sock_put(sk);
}
-static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
+static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
+ bool kern)
{
struct pep_sock *pn = pep_sk(sk), *newpn;
struct sock *newsk = NULL;
@@ -846,7 +847,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
}
/* Create a new to-be-accepted sock */
- newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0);
+ newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
+ kern);
if (!newsk) {
pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
err = -ENOBUFS;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index a6c8da3ee893..64634e3ec2fc 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -305,7 +305,7 @@ out:
}
static int pn_socket_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
struct sock *sk = sock->sk;
struct sock *newsk;
@@ -314,7 +314,7 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock,
if (unlikely(sk->sk_state != TCP_LISTEN))
return -EINVAL;
- newsk = sk->sk_prot->accept(sk, flags, &err);
+ newsk = sk->sk_prot->accept(sk, flags, &err, kern);
if (!newsk)
return err;
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 0e04dcceb1d4..1fa75ab7b733 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -429,6 +429,7 @@ void rds_conn_destroy(struct rds_connection *conn)
*/
rds_cong_remove_conn(conn);
+ put_net(conn->c_net);
kmem_cache_free(rds_conn_slab, conn);
spin_lock_irqsave(&rds_conn_lock, flags);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index ce3775abc6e7..1c38d2c7caa8 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -442,7 +442,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_send_cq = NULL;
ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
rdsdebug("ib_create_cq send failed: %d\n", ret);
- goto out;
+ goto rds_ibdev_out;
}
ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
@@ -456,19 +456,19 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_recv_cq = NULL;
ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
rdsdebug("ib_create_cq recv failed: %d\n", ret);
- goto out;
+ goto send_cq_out;
}
ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
if (ret) {
rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
if (ret) {
rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
/* XXX negotiate max send/recv with remote? */
@@ -494,7 +494,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
if (ret) {
rdsdebug("rdma_create_qp failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
@@ -504,7 +504,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_send_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent send failed\n");
- goto out;
+ goto qp_out;
}
ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
@@ -514,7 +514,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent recv failed\n");
- goto out;
+ goto send_hdrs_dma_out;
}
ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
@@ -522,7 +522,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_ack) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent ack failed\n");
- goto out;
+ goto recv_hdrs_dma_out;
}
ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
@@ -530,7 +530,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
- goto out;
+ goto ack_dma_out;
}
ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
@@ -538,7 +538,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
- goto out;
+ goto sends_out;
}
rds_ib_recv_init_ack(ic);
@@ -546,8 +546,33 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
ic->i_send_cq, ic->i_recv_cq);
-out:
+ return ret;
+
+sends_out:
+ vfree(ic->i_sends);
+ack_dma_out:
+ ib_dma_free_coherent(dev, sizeof(struct rds_header),
+ ic->i_ack, ic->i_ack_dma);
+recv_hdrs_dma_out:
+ ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
+ sizeof(struct rds_header),
+ ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
+send_hdrs_dma_out:
+ ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
+ sizeof(struct rds_header),
+ ic->i_send_hdrs, ic->i_send_hdrs_dma);
+qp_out:
+ rdma_destroy_qp(ic->i_cm_id);
+recv_cq_out:
+ if (!ib_destroy_cq(ic->i_recv_cq))
+ ic->i_recv_cq = NULL;
+send_cq_out:
+ if (!ib_destroy_cq(ic->i_send_cq))
+ ic->i_send_cq = NULL;
+rds_ibdev_out:
+ rds_ib_remove_conn(rds_ibdev, conn);
rds_ib_dev_put(rds_ibdev);
+
return ret;
}
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 39518ef7af4d..82d38ccf5e8b 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -147,7 +147,7 @@ struct rds_connection {
/* Protocol version */
unsigned int c_version;
- possible_net_t c_net;
+ struct net *c_net;
struct list_head c_map_item;
unsigned long c_map_queued;
@@ -162,13 +162,13 @@ struct rds_connection {
static inline
struct net *rds_conn_net(struct rds_connection *conn)
{
- return read_pnet(&conn->c_net);
+ return conn->c_net;
}
static inline
void rds_conn_net_set(struct rds_connection *conn, struct net *net)
{
- write_pnet(&conn->c_net, net);
+ conn->c_net = get_net(net);
}
#define RDS_FLAG_CONG_BITMAP 0x01
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index a973d3b4dff0..225690076773 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -484,9 +484,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
* we do need to clean up the listen socket here.
*/
if (rtn->rds_tcp_listen_sock) {
- rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+ struct socket *lsock = rtn->rds_tcp_listen_sock;
+
rtn->rds_tcp_listen_sock = NULL;
- flush_work(&rtn->rds_tcp_accept_w);
+ rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
}
}
@@ -523,13 +524,13 @@ static void rds_tcp_kill_sock(struct net *net)
struct rds_tcp_connection *tc, *_tc;
LIST_HEAD(tmp_list);
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+ struct socket *lsock = rtn->rds_tcp_listen_sock;
- rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
rtn->rds_tcp_listen_sock = NULL;
- flush_work(&rtn->rds_tcp_accept_w);
+ rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
- struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ struct net *c_net = tc->t_cpath->cp_conn->c_net;
if (net != c_net || !tc->t_sock)
continue;
@@ -546,8 +547,12 @@ static void rds_tcp_kill_sock(struct net *net)
void *rds_tcp_listen_sock_def_readable(struct net *net)
{
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+ struct socket *lsock = rtn->rds_tcp_listen_sock;
+
+ if (!lsock)
+ return NULL;
- return rtn->rds_tcp_listen_sock->sk->sk_user_data;
+ return lsock->sk->sk_user_data;
}
static int rds_tcp_dev_event(struct notifier_block *this,
@@ -584,7 +589,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
- struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ struct net *c_net = tc->t_cpath->cp_conn->c_net;
if (net != c_net || !tc->t_sock)
continue;
@@ -638,19 +643,19 @@ static int rds_tcp_init(void)
goto out;
}
- ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
- if (ret) {
- pr_warn("could not register rds_tcp_dev_notifier\n");
+ ret = rds_tcp_recv_init();
+ if (ret)
goto out_slab;
- }
ret = register_pernet_subsys(&rds_tcp_net_ops);
if (ret)
- goto out_notifier;
+ goto out_recv;
- ret = rds_tcp_recv_init();
- if (ret)
+ ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
+ if (ret) {
+ pr_warn("could not register rds_tcp_dev_notifier\n");
goto out_pernet;
+ }
rds_trans_register(&rds_tcp_transport);
@@ -660,9 +665,8 @@ static int rds_tcp_init(void)
out_pernet:
unregister_pernet_subsys(&rds_tcp_net_ops);
-out_notifier:
- if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
- pr_warn("could not unregister rds_tcp_dev_notifier\n");
+out_recv:
+ rds_tcp_recv_exit();
out_slab:
kmem_cache_destroy(rds_tcp_conn_slab);
out:
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 9a1cc8906576..56ea6620fcf9 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk);
/* tcp_listen.c */
struct socket *rds_tcp_listen_init(struct net *);
-void rds_tcp_listen_stop(struct socket *);
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
void rds_tcp_listen_data_ready(struct sock *sk);
int rds_tcp_accept_one(struct socket *sock);
int rds_tcp_keepalive(struct socket *sock);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 67d0929c7d3d..507678853e6c 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -133,7 +133,7 @@ int rds_tcp_accept_one(struct socket *sock)
new_sock->type = sock->type;
new_sock->ops = sock->ops;
- ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
+ ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
if (ret < 0)
goto out;
@@ -223,6 +223,9 @@ void rds_tcp_listen_data_ready(struct sock *sk)
* before it has been accepted and the accepter has set up their
* data_ready.. we only want to queue listen work for our listening
* socket
+ *
+ * (*ready)() may be null if we are racing with netns delete, and
+ * the listen socket is being torn down.
*/
if (sk->sk_state == TCP_LISTEN)
rds_tcp_accept_work(sk);
@@ -231,7 +234,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
out:
read_unlock_bh(&sk->sk_callback_lock);
- ready(sk);
+ if (ready)
+ ready(sk);
}
struct socket *rds_tcp_listen_init(struct net *net)
@@ -271,7 +275,7 @@ out:
return NULL;
}
-void rds_tcp_listen_stop(struct socket *sock)
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
{
struct sock *sk;
@@ -292,5 +296,6 @@ void rds_tcp_listen_stop(struct socket *sock)
/* wait for accepts to stop and close the socket */
flush_workqueue(rds_wq);
+ flush_work(acceptor);
sock_release(sock);
}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index b8a1df2c9785..4a9729257023 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -871,7 +871,8 @@ out_release:
return err;
}
-static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
+static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sk_buff *skb;
struct sock *newsk;
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9f4cfa25af7c..18b2ad8be8e2 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -420,6 +420,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
u16 skew)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ enum rxrpc_call_state state;
unsigned int offset = sizeof(struct rxrpc_wire_header);
unsigned int ix;
rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
@@ -434,14 +435,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
_proto("Rx DATA %%%u { #%u f=%02x }",
sp->hdr.serial, seq, sp->hdr.flags);
- if (call->state >= RXRPC_CALL_COMPLETE)
+ state = READ_ONCE(call->state);
+ if (state >= RXRPC_CALL_COMPLETE)
return;
/* Received data implicitly ACKs all of the request packets we sent
* when we're acting as a client.
*/
- if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
- call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
+ if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
+ state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
!rxrpc_receiving_reply(call))
return;
@@ -650,6 +652,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_peer *peer;
unsigned int mtu;
+ bool wake = false;
u32 rwind = ntohl(ackinfo->rwind);
_proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
@@ -657,9 +660,14 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
rwind, ntohl(ackinfo->jumbo_max));
- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
- rwind = RXRPC_RXTX_BUFF_SIZE - 1;
- call->tx_winsize = rwind;
+ if (call->tx_winsize != rwind) {
+ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+ rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+ if (rwind > call->tx_winsize)
+ wake = true;
+ call->tx_winsize = rwind;
+ }
+
if (call->cong_ssthresh > rwind)
call->cong_ssthresh = rwind;
@@ -673,6 +681,9 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
spin_unlock_bh(&peer->lock);
_net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
}
+
+ if (wake)
+ wake_up(&call->waitq);
}
/*
@@ -799,7 +810,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
return rxrpc_proto_abort("AK0", call, 0);
/* Ignore ACKs unless we are or have just been transmitting. */
- switch (call->state) {
+ switch (READ_ONCE(call->state)) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
case RXRPC_CALL_SERVER_SEND_REPLY:
@@ -940,7 +951,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
struct rxrpc_call *call)
{
- switch (call->state) {
+ switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call);
break;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 6491ca46a03f..3e2f1a8e9c5b 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -527,7 +527,7 @@ try_again:
msg->msg_namelen = len;
}
- switch (call->state) {
+ switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_ACCEPTING:
ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
break;
@@ -640,7 +640,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
mutex_lock(&call->user_mutex);
- switch (call->state) {
+ switch (READ_ONCE(call->state)) {
case RXRPC_CALL_CLIENT_RECV_REPLY:
case RXRPC_CALL_SERVER_RECV_REQUEST:
case RXRPC_CALL_SERVER_ACK_REQUEST:
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index bc2d3dcff9de..97ab214ca411 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -488,6 +488,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
{
+ enum rxrpc_call_state state;
enum rxrpc_command cmd;
struct rxrpc_call *call;
unsigned long user_call_ID = 0;
@@ -526,13 +527,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
return PTR_ERR(call);
/* ... and we have the call lock. */
} else {
- ret = -EBUSY;
- if (call->state == RXRPC_CALL_UNINITIALISED ||
- call->state == RXRPC_CALL_CLIENT_AWAIT_CONN ||
- call->state == RXRPC_CALL_SERVER_PREALLOC ||
- call->state == RXRPC_CALL_SERVER_SECURING ||
- call->state == RXRPC_CALL_SERVER_ACCEPTING)
+ switch (READ_ONCE(call->state)) {
+ case RXRPC_CALL_UNINITIALISED:
+ case RXRPC_CALL_CLIENT_AWAIT_CONN:
+ case RXRPC_CALL_SERVER_PREALLOC:
+ case RXRPC_CALL_SERVER_SECURING:
+ case RXRPC_CALL_SERVER_ACCEPTING:
+ ret = -EBUSY;
goto error_release_sock;
+ default:
+ break;
+ }
ret = mutex_lock_interruptible(&call->user_mutex);
release_sock(&rx->sk);
@@ -542,10 +547,11 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
}
}
+ state = READ_ONCE(call->state);
_debug("CALL %d USR %lx ST %d on CONN %p",
- call->debug_id, call->user_call_ID, call->state, call->conn);
+ call->debug_id, call->user_call_ID, state, call->conn);
- if (call->state >= RXRPC_CALL_COMPLETE) {
+ if (state >= RXRPC_CALL_COMPLETE) {
/* it's too late for this call */
ret = -ESHUTDOWN;
} else if (cmd == RXRPC_CMD_SEND_ABORT) {
@@ -555,12 +561,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
} else if (cmd != RXRPC_CMD_SEND_DATA) {
ret = -EINVAL;
} else if (rxrpc_is_client_call(call) &&
- call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
+ state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
/* request phase complete for this client call */
ret = -EPROTO;
} else if (rxrpc_is_service_call(call) &&
- call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
- call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
+ state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+ state != RXRPC_CALL_SERVER_SEND_REPLY) {
/* Reply phase not begun or not complete for service call. */
ret = -EPROTO;
} else {
@@ -605,14 +611,21 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
_debug("CALL %d USR %lx ST %d on CONN %p",
call->debug_id, call->user_call_ID, call->state, call->conn);
- if (call->state >= RXRPC_CALL_COMPLETE) {
- ret = -ESHUTDOWN; /* it's too late for this call */
- } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
- call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
- call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
- ret = -EPROTO; /* request phase complete for this client call */
- } else {
+ switch (READ_ONCE(call->state)) {
+ case RXRPC_CALL_CLIENT_SEND_REQUEST:
+ case RXRPC_CALL_SERVER_ACK_REQUEST:
+ case RXRPC_CALL_SERVER_SEND_REPLY:
ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
+ break;
+ case RXRPC_CALL_COMPLETE:
+ read_lock_bh(&call->state_lock);
+ ret = -call->error;
+ read_unlock_bh(&call->state_lock);
+ break;
+ default:
+ /* Request phase complete for this client call */
+ ret = -EPROTO;
+ break;
}
mutex_unlock(&call->user_mutex);
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index ab8062909962..f9bb43c25697 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
if (ret < 0)
return ret;
+ if (!tb[TCA_CONNMARK_PARMS])
+ return -EINVAL;
+
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
if (!tcf_hash_check(tn, parm->index, a, bind)) {
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 3b7074e23024..c736627f8f4a 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
return skb->len;
nla_put_failure:
- rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 063baac5b9fe..961ee59f696a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -640,14 +640,15 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
/* Create and initialize a new sk for the socket to be returned by accept(). */
static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
- struct sctp_association *asoc)
+ struct sctp_association *asoc,
+ bool kern)
{
struct sock *newsk;
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct sctp6_sock *newsctp6sk;
struct ipv6_txoptions *opt;
- newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0);
+ newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern);
if (!newsk)
goto out;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1b6d4574d2b0..989a900383b5 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -575,10 +575,11 @@ static int sctp_v4_is_ce(const struct sk_buff *skb)
/* Create and initialize a new sk for the socket returned by accept(). */
static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
- struct sctp_association *asoc)
+ struct sctp_association *asoc,
+ bool kern)
{
struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
- sk->sk_prot, 0);
+ sk->sk_prot, kern);
struct inet_sock *newinet;
if (!newsk)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6f0a9be50f50..0f378ea2ae38 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4116,7 +4116,7 @@ static int sctp_disconnect(struct sock *sk, int flags)
* descriptor will be returned from accept() to represent the newly
* formed association.
*/
-static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
+static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
{
struct sctp_sock *sp;
struct sctp_endpoint *ep;
@@ -4151,7 +4151,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
*/
asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
- newsk = sp->pf->create_accept_sk(sk, asoc);
+ newsk = sp->pf->create_accept_sk(sk, asoc, kern);
if (!newsk) {
error = -ENOMEM;
goto out;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 85837ab90e89..093803786eac 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -944,7 +944,7 @@ out:
}
static int smc_accept(struct socket *sock, struct socket *new_sock,
- int flags)
+ int flags, bool kern)
{
struct sock *sk = sock->sk, *nsk;
DECLARE_WAITQUEUE(wait, current);
diff --git a/net/socket.c b/net/socket.c
index 2c1e8677ff2d..e034fe4164be 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1506,7 +1506,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
if (err)
goto out_fd;
- err = sock->ops->accept(sock, newsock, sock->file->f_flags);
+ err = sock->ops->accept(sock, newsock, sock->file->f_flags, false);
if (err < 0)
goto out_fd;
@@ -1731,6 +1731,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
/* We assume all kernel code knows the size of sockaddr_storage */
msg.msg_namelen = 0;
msg.msg_iocb = NULL;
+ msg.msg_flags = 0;
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
err = sock_recvmsg(sock, &msg, flags);
@@ -3238,7 +3239,7 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
if (err < 0)
goto done;
- err = sock->ops->accept(sock, *newsock, flags);
+ err = sock->ops->accept(sock, *newsock, flags, true);
if (err < 0) {
sock_release(*newsock);
*newsock = NULL;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 81cd31acf690..3b332b395045 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -503,7 +503,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
struct ib_cq *sendcq, *recvcq;
int rc;
- max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
+ max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
+ RPCRDMA_MAX_SEND_SGES);
if (max_sge < RPCRDMA_MIN_SEND_SGES) {
pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
return -ENOMEM;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 43e4045e72bc..7130e73bd42c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -115,7 +115,8 @@ static void tipc_data_ready(struct sock *sk);
static void tipc_write_space(struct sock *sk);
static void tipc_sock_destruct(struct sock *sk);
static int tipc_release(struct socket *sock);
-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
+ bool kern);
static void tipc_sk_timeout(unsigned long data);
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
struct tipc_name_seq const *seq);
@@ -2029,7 +2030,8 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
*
* Returns 0 on success, errno otherwise
*/
-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
+ bool kern)
{
struct sock *new_sk, *sk = sock->sk;
struct sk_buff *buf;
@@ -2051,7 +2053,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
buf = skb_peek(&sk->sk_receive_queue);
- res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0);
+ res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
if (res)
goto exit;
security_sk_clone(sock->sk, new_sock->sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ee37b390260a..928691c43408 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -636,7 +636,7 @@ static int unix_bind(struct socket *, struct sockaddr *, int);
static int unix_stream_connect(struct socket *, struct sockaddr *,
int addr_len, int flags);
static int unix_socketpair(struct socket *, struct socket *);
-static int unix_accept(struct socket *, struct socket *, int);
+static int unix_accept(struct socket *, struct socket *, int, bool);
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
static unsigned int unix_dgram_poll(struct file *, struct socket *,
@@ -1402,7 +1402,8 @@ static void unix_sock_inherit_flags(const struct socket *old,
set_bit(SOCK_PASSSEC, &new->flags);
}
-static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk;
struct sock *tsk;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9192ead66751..9f770f33c100 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1250,7 +1250,8 @@ out:
return err;
}
-static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
+static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *listener;
int err;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index fd28a49dbe8f..8b911c29860e 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -852,7 +852,8 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
return rc;
}
-static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
+static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk;
struct sock *newsk;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0806dccdf507..236cbbc0ab9c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1243,7 +1243,7 @@ static inline int policy_to_flow_dir(int dir)
}
static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
- const struct flowi *fl)
+ const struct flowi *fl, u16 family)
{
struct xfrm_policy *pol;
@@ -1251,8 +1251,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
again:
pol = rcu_dereference(sk->sk_policy[dir]);
if (pol != NULL) {
- bool match = xfrm_selector_match(&pol->selector, fl,
- sk->sk_family);
+ bool match = xfrm_selector_match(&pol->selector, fl, family);
int err = 0;
if (match) {
@@ -2239,7 +2238,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
sk = sk_const_to_full_sk(sk);
if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
num_pols = 1;
- pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
+ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
err = xfrm_expand_policies(fl, family, pols,
&num_pols, &num_xfrms);
if (err < 0)
@@ -2518,7 +2517,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
pol = NULL;
sk = sk_to_full_sk(sk);
if (sk && sk->sk_policy[dir]) {
- pol = xfrm_sk_policy_lookup(sk, dir, &fl);
+ pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
if (IS_ERR(pol)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
@@ -3069,6 +3068,11 @@ static int __net_init xfrm_net_init(struct net *net)
{
int rv;
+ /* Initialize the per-net locks here */
+ spin_lock_init(&net->xfrm.xfrm_state_lock);
+ spin_lock_init(&net->xfrm.xfrm_policy_lock);
+ mutex_init(&net->xfrm.xfrm_cfg_mutex);
+
rv = xfrm_statistics_init(net);
if (rv < 0)
goto out_statistics;
@@ -3085,11 +3089,6 @@ static int __net_init xfrm_net_init(struct net *net)
if (rv < 0)
goto out;
- /* Initialize the per-net locks here */
- spin_lock_init(&net->xfrm.xfrm_state_lock);
- spin_lock_init(&net->xfrm.xfrm_policy_lock);
- mutex_init(&net->xfrm.xfrm_cfg_mutex);
-
return 0;
out:
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c
index 9b0b5cbc5b89..0f98634c20a0 100644
--- a/scripts/gcc-plugins/sancov_plugin.c
+++ b/scripts/gcc-plugins/sancov_plugin.c
@@ -133,7 +133,7 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gc
#if BUILDING_GCC_VERSION < 6000
register_callback(plugin_name, PLUGIN_START_UNIT, &sancov_start_unit, NULL);
register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_sancov);
- register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_plugin_pass_info);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_pass_info);
#endif
return 0;
diff --git a/scripts/module-common.lds b/scripts/module-common.lds
index cf7e52e4781b..9b6e246a45d0 100644
--- a/scripts/module-common.lds
+++ b/scripts/module-common.lds
@@ -22,4 +22,6 @@ SECTIONS {
. = ALIGN(8);
.init_array 0 : { *(SORT(.init_array.*)) *(.init_array) }
+
+ __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
}
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index 0458b037c8a1..0545f5a8cabe 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -372,6 +372,8 @@ disassocation||disassociation
disapear||disappear
disapeared||disappeared
disappared||disappeared
+disble||disable
+disbled||disabled
disconnet||disconnect
discontinous||discontinuous
dispertion||dispersion
@@ -732,6 +734,7 @@ oustanding||outstanding
overaall||overall
overhread||overhead
overlaping||overlapping
+overide||override
overrided||overridden
overriden||overridden
overun||overrun
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index ec1067a679da..08b1399d1da2 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -89,7 +89,7 @@ static void acp_reg_write(u32 val, void __iomem *acp_mmio, u32 reg)
writel(val, acp_mmio + (reg * 4));
}
-/* Configure a given dma channel parameters - enable/disble,
+/* Configure a given dma channel parameters - enable/disable,
* number of descriptors, priority
*/
static void config_acp_dma_channel(void __iomem *acp_mmio, u8 ch_num,
diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h
new file mode 100644
index 000000000000..067427259820
--- /dev/null
+++ b/tools/include/uapi/linux/bpf_perf_event.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
+#define _UAPI__LINUX_BPF_PERF_EVENT_H__
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+struct bpf_perf_event_data {
+ struct pt_regs regs;
+ __u64 sample_period;
+};
+
+#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index 11c8d9bc762e..5d19fdf80292 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -1387,7 +1387,7 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
/* Allow writing to any other BAR, or expansion ROM */
iowrite(portoff, val, mask, &d->config_words[reg]);
return true;
- /* We let them overide latency timer and cacheline size */
+ /* We let them override latency timer and cacheline size */
} else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
/* Only let them change the first two fields. */
if (mask == 0xFFFFFFFF)
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index e2efddf10231..1f5300e56b44 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -132,7 +132,7 @@ else
Q = @
endif
-# Disable command line variables (CFLAGS) overide from top
+# Disable command line variables (CFLAGS) override from top
# level Makefile (perf), otherwise build Makefile will get
# the same command line setup.
MAKEOVERRIDES=
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 47076b15eebe..9b8555ea3459 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -135,7 +135,7 @@ else
Q = @
endif
-# Disable command line variables (CFLAGS) overide from top
+# Disable command line variables (CFLAGS) override from top
# level Makefile (perf), otherwise build Makefile will get
# the same command line setup.
MAKEOVERRIDES=
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 66342804161c..0c03538df74c 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -140,7 +140,7 @@ struct pevent_plugin_option {
* struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = {
* {
* .name = "option-name",
- * .plugin_alias = "overide-file-name", (optional)
+ * .plugin_alias = "override-file-name", (optional)
* .description = "description of option to show users",
* },
* {
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 4cfdbb5b6967..066086dd59a8 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -805,11 +805,20 @@ static struct rela *find_switch_table(struct objtool_file *file,
insn->jump_dest->offset > orig_insn->offset))
break;
+ /* look for a relocation which references .rodata */
text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
insn->len);
- if (text_rela && text_rela->sym == file->rodata->sym)
- return find_rela_by_dest(file->rodata,
- text_rela->addend);
+ if (!text_rela || text_rela->sym != file->rodata->sym)
+ continue;
+
+ /*
+ * Make sure the .rodata address isn't associated with a
+ * symbol. gcc jump tables are anonymous data.
+ */
+ if (find_symbol_containing(file->rodata, text_rela->addend))
+ continue;
+
+ return find_rela_by_dest(file->rodata, text_rela->addend);
}
return NULL;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 0d7983ac63ef..d897702ce742 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -85,6 +85,18 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
return NULL;
}
+struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
+{
+ struct symbol *sym;
+
+ list_for_each_entry(sym, &sec->symbol_list, list)
+ if (sym->type != STT_SECTION &&
+ offset >= sym->offset && offset < sym->offset + sym->len)
+ return sym;
+
+ return NULL;
+}
+
struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
unsigned int len)
{
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index aa1ff6596684..731973e1a3f5 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -79,6 +79,7 @@ struct elf {
struct elf *elf_open(const char *name);
struct section *find_section_by_name(struct elf *elf, const char *name);
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
+struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
unsigned int len);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index 7913363bde5c..4f3c758d875d 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -31,7 +31,7 @@
#error Instruction buffer size too small
#endif
-/* Based on branch_type() from perf_event_intel_lbr.c */
+/* Based on branch_type() from arch/x86/events/intel/lbr.c */
static void intel_pt_insn_decoder(struct insn *insn,
struct intel_pt_insn *intel_pt_insn)
{
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 70e389bc4af7..9b4d8ba22fed 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
/* Last entry */
if (curr->end == curr->start)
- curr->end = roundup(curr->start, 4096);
+ curr->end = roundup(curr->start, 4096) + 4096;
}
void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 6e4eb2fc2d1e..0c8b61f8398e 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -1880,6 +1880,7 @@ sub get_grub_index {
sub wait_for_input
{
my ($fp, $time) = @_;
+ my $start_time;
my $rin;
my $rout;
my $nr;
@@ -1895,17 +1896,22 @@ sub wait_for_input
vec($rin, fileno($fp), 1) = 1;
vec($rin, fileno(\*STDIN), 1) = 1;
+ $start_time = time;
+
while (1) {
$nr = select($rout=$rin, undef, undef, $time);
- if ($nr <= 0) {
- return undef;
- }
+ last if ($nr <= 0);
# copy data from stdin to the console
if (vec($rout, fileno(\*STDIN), 1) == 1) {
- sysread(\*STDIN, $buf, 1000);
- syswrite($fp, $buf, 1000);
+ $nr = sysread(\*STDIN, $buf, 1000);
+ syswrite($fp, $buf, $nr) if ($nr > 0);
+ }
+
+ # The timeout is based on time waiting for the fp data
+ if (vec($rout, fileno($fp), 1) != 1) {
+ last if (defined($time) && (time - $start_time > $time));
next;
}
@@ -1917,12 +1923,11 @@ sub wait_for_input
last if ($ch eq "\n");
}
- if (!length($line)) {
- return undef;
- }
+ last if (!length($line));
return $line;
}
+ return undef;
}
sub reboot_to {
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index f11315bedefc..6a9480c03cbd 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -1,6 +1,7 @@
CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
-LDFLAGS += -lpthread -lurcu
+LDFLAGS += -fsanitize=address
+LDLIBS+= -lpthread -lurcu
TARGETS = main idr-test multiorder
CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
@@ -10,23 +11,25 @@ ifndef SHIFT
SHIFT=3
endif
+ifeq ($(BUILD), 32)
+ CFLAGS += -m32
+ LDFLAGS += -m32
+endif
+
targets: mapshift $(TARGETS)
main: $(OFILES)
- $(CC) $(CFLAGS) $(LDFLAGS) $^ -o main
idr-test: idr-test.o $(CORE_OFILES)
- $(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test
multiorder: multiorder.o $(CORE_OFILES)
- $(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder
clean:
$(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h
vpath %.c ../../lib
-$(OFILES): *.h */*.h generated/map-shift.h \
+$(OFILES): Makefile *.h */*.h generated/map-shift.h \
../../include/linux/*.h \
../../include/asm/*.h \
../../../include/linux/radix-tree.h \
@@ -41,7 +44,7 @@ idr.c: ../../../lib/idr.c
.PHONY: mapshift
mapshift:
- @if ! grep -qw $(SHIFT) generated/map-shift.h; then \
+ @if ! grep -qws $(SHIFT) generated/map-shift.h; then \
echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \
generated/map-shift.h; \
fi
diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c
index 9b09ddfe462f..99c40f3ed133 100644
--- a/tools/testing/radix-tree/benchmark.c
+++ b/tools/testing/radix-tree/benchmark.c
@@ -17,6 +17,9 @@
#include <time.h>
#include "test.h"
+#define for_each_index(i, base, order) \
+ for (i = base; i < base + (1 << order); i++)
+
#define NSEC_PER_SEC 1000000000L
static long long benchmark_iter(struct radix_tree_root *root, bool tagged)
@@ -57,27 +60,176 @@ again:
return nsec;
}
+static void benchmark_insert(struct radix_tree_root *root,
+ unsigned long size, unsigned long step, int order)
+{
+ struct timespec start, finish;
+ unsigned long index;
+ long long nsec;
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+
+ for (index = 0 ; index < size ; index += step)
+ item_insert_order(root, index, order);
+
+ clock_gettime(CLOCK_MONOTONIC, &finish);
+
+ nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+ (finish.tv_nsec - start.tv_nsec);
+
+ printv(2, "Size: %8ld, step: %8ld, order: %d, insertion: %15lld ns\n",
+ size, step, order, nsec);
+}
+
+static void benchmark_tagging(struct radix_tree_root *root,
+ unsigned long size, unsigned long step, int order)
+{
+ struct timespec start, finish;
+ unsigned long index;
+ long long nsec;
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+
+ for (index = 0 ; index < size ; index += step)
+ radix_tree_tag_set(root, index, 0);
+
+ clock_gettime(CLOCK_MONOTONIC, &finish);
+
+ nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+ (finish.tv_nsec - start.tv_nsec);
+
+ printv(2, "Size: %8ld, step: %8ld, order: %d, tagging: %17lld ns\n",
+ size, step, order, nsec);
+}
+
+static void benchmark_delete(struct radix_tree_root *root,
+ unsigned long size, unsigned long step, int order)
+{
+ struct timespec start, finish;
+ unsigned long index, i;
+ long long nsec;
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+
+ for (index = 0 ; index < size ; index += step)
+ for_each_index(i, index, order)
+ item_delete(root, i);
+
+ clock_gettime(CLOCK_MONOTONIC, &finish);
+
+ nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+ (finish.tv_nsec - start.tv_nsec);
+
+ printv(2, "Size: %8ld, step: %8ld, order: %d, deletion: %16lld ns\n",
+ size, step, order, nsec);
+}
+
static void benchmark_size(unsigned long size, unsigned long step, int order)
{
RADIX_TREE(tree, GFP_KERNEL);
long long normal, tagged;
- unsigned long index;
- for (index = 0 ; index < size ; index += step) {
- item_insert_order(&tree, index, order);
- radix_tree_tag_set(&tree, index, 0);
- }
+ benchmark_insert(&tree, size, step, order);
+ benchmark_tagging(&tree, size, step, order);
tagged = benchmark_iter(&tree, true);
normal = benchmark_iter(&tree, false);
- printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n",
- size, step, order, tagged, normal);
+ printv(2, "Size: %8ld, step: %8ld, order: %d, tagged iteration: %8lld ns\n",
+ size, step, order, tagged);
+ printv(2, "Size: %8ld, step: %8ld, order: %d, normal iteration: %8lld ns\n",
+ size, step, order, normal);
+
+ benchmark_delete(&tree, size, step, order);
item_kill_tree(&tree);
rcu_barrier();
}
+static long long __benchmark_split(unsigned long index,
+ int old_order, int new_order)
+{
+ struct timespec start, finish;
+ long long nsec;
+ RADIX_TREE(tree, GFP_ATOMIC);
+
+ item_insert_order(&tree, index, old_order);
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ radix_tree_split(&tree, index, new_order);
+ clock_gettime(CLOCK_MONOTONIC, &finish);
+ nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+ (finish.tv_nsec - start.tv_nsec);
+
+ item_kill_tree(&tree);
+
+ return nsec;
+
+}
+
+static void benchmark_split(unsigned long size, unsigned long step)
+{
+ int i, j, idx;
+ long long nsec = 0;
+
+
+ for (idx = 0; idx < size; idx += step) {
+ for (i = 3; i < 11; i++) {
+ for (j = 0; j < i; j++) {
+ nsec += __benchmark_split(idx, i, j);
+ }
+ }
+ }
+
+ printv(2, "Size %8ld, step %8ld, split time %10lld ns\n",
+ size, step, nsec);
+
+}
+
+static long long __benchmark_join(unsigned long index,
+ unsigned order1, unsigned order2)
+{
+ unsigned long loc;
+ struct timespec start, finish;
+ long long nsec;
+ void *item, *item2 = item_create(index + 1, order1);
+ RADIX_TREE(tree, GFP_KERNEL);
+
+ item_insert_order(&tree, index, order2);
+ item = radix_tree_lookup(&tree, index);
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ radix_tree_join(&tree, index + 1, order1, item2);
+ clock_gettime(CLOCK_MONOTONIC, &finish);
+ nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+ (finish.tv_nsec - start.tv_nsec);
+
+ loc = find_item(&tree, item);
+ if (loc == -1)
+ free(item);
+
+ item_kill_tree(&tree);
+
+ return nsec;
+}
+
+static void benchmark_join(unsigned long step)
+{
+ int i, j, idx;
+ long long nsec = 0;
+
+ for (idx = 0; idx < 1 << 10; idx += step) {
+ for (i = 1; i < 15; i++) {
+ for (j = 0; j < i; j++) {
+ nsec += __benchmark_join(idx, i, j);
+ }
+ }
+ }
+
+ printv(2, "Size %8d, step %8ld, join time %10lld ns\n",
+ 1 << 10, step, nsec);
+}
+
void benchmark(void)
{
unsigned long size[] = {1 << 10, 1 << 20, 0};
@@ -95,4 +247,11 @@ void benchmark(void)
for (c = 0; size[c]; c++)
for (s = 0; step[s]; s++)
benchmark_size(size[c], step[s] << 9, 9);
+
+ for (c = 0; size[c]; c++)
+ for (s = 0; step[s]; s++)
+ benchmark_split(size[c], step[s]);
+
+ for (s = 0; step[s]; s++)
+ benchmark_join(step[s]);
}
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index a26098c6123d..30cd0b296f1a 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -153,6 +153,30 @@ void idr_nowait_test(void)
idr_destroy(&idr);
}
+void idr_get_next_test(void)
+{
+ unsigned long i;
+ int nextid;
+ DEFINE_IDR(idr);
+
+ int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
+
+ for(i = 0; indices[i]; i++) {
+ struct item *item = item_create(indices[i], 0);
+ assert(idr_alloc(&idr, item, indices[i], indices[i+1],
+ GFP_KERNEL) == indices[i]);
+ }
+
+ for(i = 0, nextid = 0; indices[i]; i++) {
+ idr_get_next(&idr, &nextid);
+ assert(nextid == indices[i]);
+ nextid++;
+ }
+
+ idr_for_each(&idr, item_idr_free, &idr);
+ idr_destroy(&idr);
+}
+
void idr_checks(void)
{
unsigned long i;
@@ -202,6 +226,7 @@ void idr_checks(void)
idr_alloc_test();
idr_null_test();
idr_nowait_test();
+ idr_get_next_test();
}
/*
@@ -338,7 +363,7 @@ void ida_check_random(void)
{
DEFINE_IDA(ida);
DECLARE_BITMAP(bitmap, 2048);
- int id;
+ int id, err;
unsigned int i;
time_t s = time(NULL);
@@ -352,8 +377,11 @@ void ida_check_random(void)
ida_remove(&ida, bit);
} else {
__set_bit(bit, bitmap);
- ida_pre_get(&ida, GFP_KERNEL);
- assert(!ida_get_new_above(&ida, bit, &id));
+ do {
+ ida_pre_get(&ida, GFP_KERNEL);
+ err = ida_get_new_above(&ida, bit, &id);
+ } while (err == -ENOMEM);
+ assert(!err);
assert(id == bit);
}
}
@@ -362,6 +390,24 @@ void ida_check_random(void)
goto repeat;
}
+void ida_simple_get_remove_test(void)
+{
+ DEFINE_IDA(ida);
+ unsigned long i;
+
+ for (i = 0; i < 10000; i++) {
+ assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
+ }
+ assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
+
+ for (i = 0; i < 10000; i++) {
+ ida_simple_remove(&ida, i);
+ }
+ assert(ida_is_empty(&ida));
+
+ ida_destroy(&ida);
+}
+
void ida_checks(void)
{
DEFINE_IDA(ida);
@@ -428,15 +474,41 @@ void ida_checks(void)
ida_check_max();
ida_check_conv();
ida_check_random();
+ ida_simple_get_remove_test();
radix_tree_cpu_dead(1);
}
+static void *ida_random_fn(void *arg)
+{
+ rcu_register_thread();
+ ida_check_random();
+ rcu_unregister_thread();
+ return NULL;
+}
+
+void ida_thread_tests(void)
+{
+ pthread_t threads[10];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(threads); i++)
+ if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) {
+ perror("creating ida thread");
+ exit(1);
+ }
+
+ while (i--)
+ pthread_join(threads[i], NULL);
+}
+
int __weak main(void)
{
radix_tree_init();
idr_checks();
ida_checks();
+ ida_thread_tests();
+ radix_tree_cpu_dead(1);
rcu_barrier();
if (nr_allocated)
printf("nr_allocated = %d\n", nr_allocated);
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
index b829127d5670..bc9a78449572 100644
--- a/tools/testing/radix-tree/main.c
+++ b/tools/testing/radix-tree/main.c
@@ -368,6 +368,7 @@ int main(int argc, char **argv)
iteration_test(0, 10 + 90 * long_run);
iteration_test(7, 10 + 90 * long_run);
single_thread_tests(long_run);
+ ida_thread_tests();
/* Free any remaining preallocated nodes */
radix_tree_cpu_dead(0);
diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c
index d4ff00989245..36dcf7d6945d 100644
--- a/tools/testing/radix-tree/tag_check.c
+++ b/tools/testing/radix-tree/tag_check.c
@@ -330,6 +330,34 @@ static void single_check(void)
item_kill_tree(&tree);
}
+void radix_tree_clear_tags_test(void)
+{
+ unsigned long index;
+ struct radix_tree_node *node;
+ struct radix_tree_iter iter;
+ void **slot;
+
+ RADIX_TREE(tree, GFP_KERNEL);
+
+ item_insert(&tree, 0);
+ item_tag_set(&tree, 0, 0);
+ __radix_tree_lookup(&tree, 0, &node, &slot);
+ radix_tree_clear_tags(&tree, node, slot);
+ assert(item_tag_get(&tree, 0, 0) == 0);
+
+ for (index = 0; index < 1000; index++) {
+ item_insert(&tree, index);
+ item_tag_set(&tree, index, 0);
+ }
+
+ radix_tree_for_each_slot(slot, &tree, &iter, 0) {
+ radix_tree_clear_tags(&tree, iter.node, slot);
+ assert(item_tag_get(&tree, iter.index, 0) == 0);
+ }
+
+ item_kill_tree(&tree);
+}
+
void tag_check(void)
{
single_check();
@@ -347,4 +375,5 @@ void tag_check(void)
thrash_tags();
rcu_barrier();
printv(2, "after thrash_tags: %d allocated\n", nr_allocated);
+ radix_tree_clear_tags_test();
}
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h
index b30e11d9d271..0f8220cc6166 100644
--- a/tools/testing/radix-tree/test.h
+++ b/tools/testing/radix-tree/test.h
@@ -36,6 +36,7 @@ void iteration_test(unsigned order, unsigned duration);
void benchmark(void);
void idr_checks(void);
void ida_checks(void);
+void ida_thread_tests(void);
struct item *
item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 4b498265dae6..67531f47781b 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,12 +1,14 @@
LIBDIR := ../../../lib
BPFOBJ := $(LIBDIR)/bpf/bpf.o
-CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR)
+CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) $(BPFOBJ)
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
TEST_PROGS := test_kmod.sh
+all: $(TEST_GEN_PROGS)
+
.PHONY: all clean force
# force a rebuild of BPFOBJ when its dependencies are updated
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index e1f5b9eea1e8..d1555e4240c0 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -8,6 +8,8 @@
* License as published by the Free Software Foundation.
*/
+#include <asm/types.h>
+#include <linux/types.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
@@ -4583,10 +4585,12 @@ static bool is_admin(void)
cap_flag_value_t sysadmin = CAP_CLEAR;
const cap_value_t cap_val = CAP_SYS_ADMIN;
+#ifdef CAP_IS_SUPPORTED
if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
perror("cap_get_flag");
return false;
}
+#endif
caps = cap_get_proc();
if (!caps) {
perror("cap_get_proc");
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index 248a820048df..66d31de60b9a 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -114,9 +114,11 @@ int test_harness(int (test_function)(void), char *name)
rc = run_test(test_function, name);
- if (rc == MAGIC_SKIP_RETURN_VALUE)
+ if (rc == MAGIC_SKIP_RETURN_VALUE) {
test_skip(name);
- else
+ /* so that skipped test is not marked as failed */
+ rc = 0;
+ } else
test_finish(name, rc);
return rc;
diff --git a/tools/testing/selftests/powerpc/include/vsx_asm.h b/tools/testing/selftests/powerpc/include/vsx_asm.h
index d828bfb6ef2d..54064ced9e95 100644
--- a/tools/testing/selftests/powerpc/include/vsx_asm.h
+++ b/tools/testing/selftests/powerpc/include/vsx_asm.h
@@ -16,56 +16,56 @@
*/
FUNC_START(load_vsx)
li r5,0
- lxvx vs20,r5,r3
+ lxvd2x vs20,r5,r3
addi r5,r5,16
- lxvx vs21,r5,r3
+ lxvd2x vs21,r5,r3
addi r5,r5,16
- lxvx vs22,r5,r3
+ lxvd2x vs22,r5,r3
addi r5,r5,16
- lxvx vs23,r5,r3
+ lxvd2x vs23,r5,r3
addi r5,r5,16
- lxvx vs24,r5,r3
+ lxvd2x vs24,r5,r3
addi r5,r5,16
- lxvx vs25,r5,r3
+ lxvd2x vs25,r5,r3
addi r5,r5,16
- lxvx vs26,r5,r3
+ lxvd2x vs26,r5,r3
addi r5,r5,16
- lxvx vs27,r5,r3
+ lxvd2x vs27,r5,r3
addi r5,r5,16
- lxvx vs28,r5,r3
+ lxvd2x vs28,r5,r3
addi r5,r5,16
- lxvx vs29,r5,r3
+ lxvd2x vs29,r5,r3
addi r5,r5,16
- lxvx vs30,r5,r3
+ lxvd2x vs30,r5,r3
addi r5,r5,16
- lxvx vs31,r5,r3
+ lxvd2x vs31,r5,r3
blr
FUNC_END(load_vsx)
FUNC_START(store_vsx)
li r5,0
- stxvx vs20,r5,r3
+ stxvd2x vs20,r5,r3
addi r5,r5,16
- stxvx vs21,r5,r3
+ stxvd2x vs21,r5,r3
addi r5,r5,16
- stxvx vs22,r5,r3
+ stxvd2x vs22,r5,r3
addi r5,r5,16
- stxvx vs23,r5,r3
+ stxvd2x vs23,r5,r3
addi r5,r5,16
- stxvx vs24,r5,r3
+ stxvd2x vs24,r5,r3
addi r5,r5,16
- stxvx vs25,r5,r3
+ stxvd2x vs25,r5,r3
addi r5,r5,16
- stxvx vs26,r5,r3
+ stxvd2x vs26,r5,r3
addi r5,r5,16
- stxvx vs27,r5,r3
+ stxvd2x vs27,r5,r3
addi r5,r5,16
- stxvx vs28,r5,r3
+ stxvd2x vs28,r5,r3
addi r5,r5,16
- stxvx vs29,r5,r3
+ stxvd2x vs29,r5,r3
addi r5,r5,16
- stxvx vs30,r5,r3
+ stxvd2x vs30,r5,r3
addi r5,r5,16
- stxvx vs31,r5,r3
+ stxvd2x vs31,r5,r3
blr
FUNC_END(store_vsx)
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 4cff7e7ddcc4..41642ba5e318 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,5 +1,9 @@
# Makefile for vm selftests
+ifndef OUTPUT
+ OUTPUT := $(shell pwd)
+endif
+
CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
LDLIBS = -lrt
TEST_GEN_FILES = compaction_test
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
index 5b2b4b3c634c..b4967d875236 100644
--- a/tools/testing/selftests/x86/fsgsbase.c
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -245,7 +245,7 @@ void do_unexpected_base(void)
long ret;
asm volatile ("int $0x80"
: "=a" (ret) : "a" (243), "b" (low_desc)
- : "flags");
+ : "r8", "r9", "r10", "r11");
memcpy(&desc, low_desc, sizeof(desc));
munmap(low_desc, sizeof(desc));
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 4af47079cf04..f6121612e769 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -45,6 +45,12 @@
#define AR_DB (1 << 22)
#define AR_G (1 << 23)
+#ifdef __x86_64__
+# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
+#else
+# define INT80_CLOBBERS
+#endif
+
static int nerrs;
/* Points to an array of 1024 ints, each holding its own index. */
@@ -588,7 +594,7 @@ static int invoke_set_thread_area(void)
asm volatile ("int $0x80"
: "=a" (ret), "+m" (low_user_desc) :
"a" (243), "b" (low_user_desc)
- : "flags");
+ : INT80_CLOBBERS);
return ret;
}
@@ -657,7 +663,7 @@ static void test_gdt_invalidation(void)
"+a" (eax)
: "m" (low_user_desc_clear),
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
- : "flags");
+ : INT80_CLOBBERS);
if (sel != 0) {
result = "FAIL";
@@ -688,7 +694,7 @@ static void test_gdt_invalidation(void)
"+a" (eax)
: "m" (low_user_desc_clear),
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
- : "flags");
+ : INT80_CLOBBERS);
if (sel != 0) {
result = "FAIL";
@@ -721,7 +727,7 @@ static void test_gdt_invalidation(void)
"+a" (eax)
: "m" (low_user_desc_clear),
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
- : "flags");
+ : INT80_CLOBBERS);
#ifdef __x86_64__
syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base);
@@ -774,7 +780,7 @@ static void test_gdt_invalidation(void)
"+a" (eax)
: "m" (low_user_desc_clear),
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
- : "flags");
+ : INT80_CLOBBERS);
#ifdef __x86_64__
syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base);
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
index b037ce9cf116..eaea92439708 100644
--- a/tools/testing/selftests/x86/ptrace_syscall.c
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -58,7 +58,8 @@ static void do_full_int80(struct syscall_args32 *args)
asm volatile ("int $0x80"
: "+a" (args->nr),
"+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
- "+S" (args->arg3), "+D" (args->arg4), "+r" (bp));
+ "+S" (args->arg3), "+D" (args->arg4), "+r" (bp)
+ : : "r8", "r9", "r10", "r11");
args->arg5 = bp;
#else
sys32_helper(args, int80_and_ret);
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index 50c26358e8b7..a48da95c18fd 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -56,9 +56,11 @@ static volatile sig_atomic_t sig_traps;
#ifdef __x86_64__
# define REG_IP REG_RIP
# define WIDTH "q"
+# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
#else
# define REG_IP REG_EIP
# define WIDTH "l"
+# define INT80_CLOBBERS
#endif
static unsigned long get_eflags(void)
@@ -140,7 +142,8 @@ int main()
printf("[RUN]\tSet TF and check int80\n");
set_eflags(get_eflags() | X86_EFLAGS_TF);
- asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid));
+ asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
+ : INT80_CLOBBERS);
check_result();
/*
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 571b64a01c50..8d1da1af4b09 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -360,29 +360,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
return ret;
}
-static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
- struct vgic_its *its,
- gpa_t addr, unsigned int len)
-{
- u32 reg = 0;
-
- mutex_lock(&its->cmd_lock);
- if (its->creadr == its->cwriter)
- reg |= GITS_CTLR_QUIESCENT;
- if (its->enabled)
- reg |= GITS_CTLR_ENABLE;
- mutex_unlock(&its->cmd_lock);
-
- return reg;
-}
-
-static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
- gpa_t addr, unsigned int len,
- unsigned long val)
-{
- its->enabled = !!(val & GITS_CTLR_ENABLE);
-}
-
static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
struct vgic_its *its,
gpa_t addr, unsigned int len)
@@ -1161,33 +1138,16 @@ static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
#define ITS_CMD_SIZE 32
#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
-/*
- * By writing to CWRITER the guest announces new commands to be processed.
- * To avoid any races in the first place, we take the its_cmd lock, which
- * protects our ring buffer variables, so that there is only one user
- * per ITS handling commands at a given time.
- */
-static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
- gpa_t addr, unsigned int len,
- unsigned long val)
+/* Must be called with the cmd_lock held. */
+static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
{
gpa_t cbaser;
u64 cmd_buf[4];
- u32 reg;
- if (!its)
- return;
-
- mutex_lock(&its->cmd_lock);
-
- reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
- reg = ITS_CMD_OFFSET(reg);
- if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
- mutex_unlock(&its->cmd_lock);
+ /* Commands are only processed when the ITS is enabled. */
+ if (!its->enabled)
return;
- }
- its->cwriter = reg;
cbaser = CBASER_ADDRESS(its->cbaser);
while (its->cwriter != its->creadr) {
@@ -1207,6 +1167,34 @@ static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
its->creadr = 0;
}
+}
+
+/*
+ * By writing to CWRITER the guest announces new commands to be processed.
+ * To avoid any races in the first place, we take the its_cmd lock, which
+ * protects our ring buffer variables, so that there is only one user
+ * per ITS handling commands at a given time.
+ */
+static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u64 reg;
+
+ if (!its)
+ return;
+
+ mutex_lock(&its->cmd_lock);
+
+ reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
+ reg = ITS_CMD_OFFSET(reg);
+ if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
+ mutex_unlock(&its->cmd_lock);
+ return;
+ }
+ its->cwriter = reg;
+
+ vgic_its_process_commands(kvm, its);
mutex_unlock(&its->cmd_lock);
}
@@ -1287,6 +1275,39 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
*regptr = reg;
}
+static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
+ struct vgic_its *its,
+ gpa_t addr, unsigned int len)
+{
+ u32 reg = 0;
+
+ mutex_lock(&its->cmd_lock);
+ if (its->creadr == its->cwriter)
+ reg |= GITS_CTLR_QUIESCENT;
+ if (its->enabled)
+ reg |= GITS_CTLR_ENABLE;
+ mutex_unlock(&its->cmd_lock);
+
+ return reg;
+}
+
+static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ mutex_lock(&its->cmd_lock);
+
+ its->enabled = !!(val & GITS_CTLR_ENABLE);
+
+ /*
+ * Try to process any pending commands. This function bails out early
+ * if the ITS is disabled or no commands have been queued.
+ */
+ vgic_its_process_commands(kvm, its);
+
+ mutex_unlock(&its->cmd_lock);
+}
+
#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
{ \
.reg_offset = off, \
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 3654b4c835ef..2a5db1352722 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -180,21 +180,37 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
bool new_active_state)
{
+ struct kvm_vcpu *requester_vcpu;
spin_lock(&irq->irq_lock);
+
+ /*
+ * The vcpu parameter here can mean multiple things depending on how
+ * this function is called; when handling a trap from the kernel it
+ * depends on the GIC version, and these functions are also called as
+ * part of save/restore from userspace.
+ *
+ * Therefore, we have to figure out the requester in a reliable way.
+ *
+ * When accessing VGIC state from user space, the requester_vcpu is
+ * NULL, which is fine, because we guarantee that no VCPUs are running
+ * when accessing VGIC state from user space so irq->vcpu->cpu is
+ * always -1.
+ */
+ requester_vcpu = kvm_arm_get_running_vcpu();
+
/*
* If this virtual IRQ was written into a list register, we
* have to make sure the CPU that runs the VCPU thread has
- * synced back LR state to the struct vgic_irq. We can only
- * know this for sure, when either this irq is not assigned to
- * anyone's AP list anymore, or the VCPU thread is not
- * running on any CPUs.
+ * synced back the LR state to the struct vgic_irq.
*
- * In the opposite case, we know the VCPU thread may be on its
- * way back from the guest and still has to sync back this
- * IRQ, so we release and re-acquire the spin_lock to let the
- * other thread sync back the IRQ.
+ * As long as the conditions below are true, we know the VCPU thread
+ * may be on its way back from the guest (we kicked the VCPU thread in
+ * vgic_change_active_prepare) and still has to sync back this IRQ,
+ * so we release and re-acquire the spin_lock to let the other thread
+ * sync back the IRQ.
*/
while (irq->vcpu && /* IRQ may have state in an LR somewhere */
+ irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
irq->vcpu->cpu != -1) /* VCPU thread is running */
cond_resched_lock(&irq->irq_lock);
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index edc6ee2dc852..be0f4c3e0142 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -229,10 +229,13 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
/*
* If we are emulating a GICv3, we do it in an non-GICv2-compatible
* way, so we force SRE to 1 to demonstrate this to the guest.
+ * Also, we don't support any form of IRQ/FIQ bypass.
* This goes with the spec allowing the value to be RAO/WI.
*/
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
- vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
+ vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
+ ICC_SRE_EL1_DFB |
+ ICC_SRE_EL1_SRE);
vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
} else {
vgic_v3->vgic_sre = 0;