summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-08-01 10:07:50 -0700
committerDavid S. Miller <davem@davemloft.net>2017-08-01 10:07:50 -0700
commit29fda25a2d31098044f8dfa177c4d2834071828e (patch)
tree9e4be11c49a4405c19ece8f81fbb1db478da1055 /drivers
parentbb1182bc3e5956a93ab3ef8a3cbfb7966c42a94a (diff)
parentbc78d646e708dabd1744ca98744dea316f459497 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Two minor conflicts in virtio_net driver (bug fix overlapping addition of a helper) and MAINTAINERS (new driver edit overlapping revamp of PHY entry). Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ec.c39
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/sleep.c6
-rw-r--r--drivers/android/binder.c17
-rw-r--r--drivers/ata/Kconfig4
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c7
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/sata_rcar.c8
-rw-r--r--drivers/base/dma-coherent.c164
-rw-r--r--drivers/base/dma-mapping.c2
-rw-r--r--drivers/block/nbd.c20
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkfront.c25
-rw-r--r--drivers/clocksource/timer-of.c12
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/bcm/spu2.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel.c5
-rw-r--r--drivers/dax/super.c6
-rw-r--r--drivers/dma-buf/dma-fence.c17
-rw-r--r--drivers/dma-buf/sync_debug.c2
-rw-r--r--drivers/dma-buf/sync_file.c8
-rw-r--r--drivers/fsi/fsi-core.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c5
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c41
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c24
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c86
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c1
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig19
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c66
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c4
-rw-r--r--drivers/gpu/host1x/dev.c8
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-ortek.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c16
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hwmon/applesmc.c13
-rw-r--r--drivers/ide/ide-timings.c18
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h9
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c119
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c60
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c60
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c19
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h2
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c16
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c6
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-gic-realview.c2
-rw-r--r--drivers/irqchip/irq-mips-cpu.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/lightnvm/pblk-rb.c4
-rw-r--r--drivers/lightnvm/pblk-read.c23
-rw-r--r--drivers/lightnvm/pblk.h2
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-integrity.c22
-rw-r--r--drivers/md/dm-raid.c29
-rw-r--r--drivers/md/dm-table.c35
-rw-r--r--drivers/md/dm-verity-fec.c21
-rw-r--r--drivers/md/dm-zoned-metadata.c12
-rw-r--r--drivers/md/dm-zoned-reclaim.c2
-rw-r--r--drivers/md/dm-zoned-target.c8
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/md.h54
-rw-r--r--drivers/md/raid1-10.c81
-rw-r--r--drivers/md/raid1.c68
-rw-r--r--drivers/md/raid10.c25
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c11
-rw-r--r--drivers/mmc/host/sunxi-mmc.c8
-rw-r--r--drivers/mux/Kconfig19
-rw-r--r--drivers/mux/mux-core.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c7
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c27
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c5
-rw-r--r--drivers/net/ethernet/sun/sunhme.h6
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c43
-rw-r--r--drivers/net/irda/mcs7780.c16
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/nvme/host/core.c8
-rw-r--r--drivers/nvme/host/fc.c121
-rw-r--r--drivers/nvme/host/pci.c49
-rw-r--r--drivers/nvme/target/admin-cmd.c16
-rw-r--r--drivers/nvme/target/configfs.c30
-rw-r--r--drivers/nvme/target/core.c5
-rw-r--r--drivers/nvme/target/fc.c109
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvmem/rockchip-efuse.c2
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/parisc/pdc_stable.c2
-rw-r--r--drivers/perf/arm_pmu.c41
-rw-r--r--drivers/perf/arm_pmu_platform.c9
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/phy/broadcom/Kconfig2
-rw-r--r--drivers/s390/cio/chp.c1
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/sg.c7
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/spmi/spmi-pmic-arb.c17
-rw-r--r--drivers/spmi/spmi.c12
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c19
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/sm750fb/sm750.c24
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/spk_priv.h2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c22
-rw-r--r--drivers/staging/vboxvideo/Kconfig12
-rw-r--r--drivers/staging/vboxvideo/Makefile7
-rw-r--r--drivers/staging/vboxvideo/TODO9
-rw-r--r--drivers/staging/vboxvideo/hgsmi_base.c246
-rw-r--r--drivers/staging/vboxvideo/hgsmi_ch_setup.h66
-rw-r--r--drivers/staging/vboxvideo/hgsmi_channels.h53
-rw-r--r--drivers/staging/vboxvideo/hgsmi_defs.h92
-rw-r--r--drivers/staging/vboxvideo/modesetting.c142
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c286
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h296
-rw-r--r--drivers/staging/vboxvideo/vbox_err.h50
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c412
-rw-r--r--drivers/staging/vboxvideo/vbox_hgsmi.c115
-rw-r--r--drivers/staging/vboxvideo/vbox_irq.c197
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c534
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c877
-rw-r--r--drivers/staging/vboxvideo/vbox_prime.c74
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c472
-rw-r--r--drivers/staging/vboxvideo/vboxvideo.h491
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_guest.h95
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_vbe.h84
-rw-r--r--drivers/staging/vboxvideo/vbva_base.c233
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c10
-rw-r--r--drivers/thunderbolt/switch.c11
-rw-r--r--drivers/thunderbolt/tb.h4
-rw-r--r--drivers/thunderbolt/tb_msgs.h12
-rw-r--r--drivers/tty/pty.c85
-rw-r--r--drivers/tty/serial/fsl_lpuart.c24
-rw-r--r--drivers/tty/serial/imx.c27
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/serial/st-asc.c1
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/dwc2/gadget.c3
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c18
-rw-r--r--drivers/usb/dwc3/gadget.c8
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c20
-rw-r--r--drivers/usb/gadget/function/f_uac2.c25
-rw-r--r--drivers/usb/gadget/udc/Kconfig5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c14
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c6
-rw-r--r--drivers/usb/host/pci-quirks.c54
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-hub.c14
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/renesas_usbhs/common.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c31
-rw-r--r--drivers/usb/storage/isd200.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h1
-rw-r--r--drivers/vhost/vhost.c28
-rw-r--r--drivers/vhost/vhost.h3
-rw-r--r--drivers/virtio/virtio_balloon.c28
-rw-r--r--drivers/w1/masters/omap_hdq.c3
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/xen/balloon.c3
-rw-r--r--drivers/xen/events/events_base.c13
-rw-r--r--drivers/xen/grant-table.c9
-rw-r--r--drivers/xen/xen-balloon.c22
-rw-r--r--drivers/xen/xen-selfballoon.c4
-rw-r--r--drivers/xen/xenfs/super.c1
293 files changed, 7098 insertions, 1339 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ddb01e9fa5b2..62068a5e814f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -151,6 +151,10 @@ static bool ec_freeze_events __read_mostly = false;
module_param(ec_freeze_events, bool, 0644);
MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
+static bool ec_no_wakeup __read_mostly;
+module_param(ec_no_wakeup, bool, 0644);
+MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
+
struct acpi_ec_query_handler {
struct list_head node;
acpi_ec_query_func func;
@@ -535,6 +539,14 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
spin_unlock_irqrestore(&ec->lock, flags);
__acpi_ec_flush_event(ec);
}
+
+void acpi_ec_flush_work(void)
+{
+ if (first_ec)
+ __acpi_ec_flush_event(first_ec);
+
+ flush_scheduled_work();
+}
#endif /* CONFIG_PM_SLEEP */
static bool acpi_ec_guard_event(struct acpi_ec *ec)
@@ -1880,6 +1892,32 @@ static int acpi_ec_suspend(struct device *dev)
return 0;
}
+static int acpi_ec_suspend_noirq(struct device *dev)
+{
+ struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+
+ /*
+ * The SCI handler doesn't run at this point, so the GPE can be
+ * masked at the low level without side effects.
+ */
+ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
+
+ return 0;
+}
+
+static int acpi_ec_resume_noirq(struct device *dev)
+{
+ struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+
+ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
+
+ return 0;
+}
+
static int acpi_ec_resume(struct device *dev)
{
struct acpi_ec *ec =
@@ -1891,6 +1929,7 @@ static int acpi_ec_resume(struct device *dev)
#endif
static const struct dev_pm_ops acpi_ec_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
};
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 9531d3276f65..58dd7ab3c653 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -193,6 +193,10 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
void *data);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+#ifdef CONFIG_PM_SLEEP
+void acpi_ec_flush_work(void);
+#endif
+
/*--------------------------------------------------------------------------
Suspend/Resume
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index edb0c79f7c64..917f1cc0fda4 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -443,7 +443,7 @@ int __init acpi_numa_init(void)
* So go over all cpu entries in SRAT to get apicid to node mapping.
*/
- /* SRAT: Static Resource Affinity Table */
+ /* SRAT: System Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
struct acpi_subtable_proc srat_proc[3];
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index be17664736b2..fa8243c5c062 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -777,11 +777,11 @@ static void acpi_freeze_sync(void)
/*
* Process all pending events in case there are any wakeup ones.
*
- * The EC driver uses the system workqueue, so that one needs to be
- * flushed too.
+ * The EC driver uses the system workqueue and an additional special
+ * one, so those need to be flushed too.
*/
+ acpi_ec_flush_work();
acpi_os_wait_events_complete();
- flush_scheduled_work();
s2idle_wakeup = false;
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index aae4d8d4be36..f7665c31feca 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2200,8 +2200,12 @@ static void binder_transaction(struct binder_proc *proc,
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
- if (target_wait)
- wake_up_interruptible(target_wait);
+ if (target_wait) {
+ if (reply || !(t->flags & TF_ONE_WAY))
+ wake_up_interruptible_sync(target_wait);
+ else
+ wake_up_interruptible(target_wait);
+ }
return;
err_translate_failed:
@@ -3247,10 +3251,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
- if (unlikely(current->mm != proc->vma_vm_mm)) {
- pr_err("current mm mismatch proc mm\n");
- return -EINVAL;
- }
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -3464,9 +3464,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
- get_task_struct(current);
- proc->tsk = current;
- proc->vma_vm_mm = current->mm;
+ get_task_struct(current->group_leader);
+ proc->tsk = current->group_leader;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 948fc86980a1..363fc5330c21 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -215,7 +215,7 @@ config SATA_FSL
config SATA_GEMINI
tristate "Gemini SATA bridge support"
- depends on PATA_FTIDE010
+ depends on ARCH_GEMINI || COMPILE_TEST
default ARCH_GEMINI
help
This enabled support for the FTIDE010 to SATA bridge
@@ -613,7 +613,7 @@ config PATA_FTIDE010
tristate "Faraday Technology FTIDE010 PATA support"
depends on OF
depends on ARM
- default ARCH_GEMINI
+ depends on SATA_GEMINI
help
This option enables support for the Faraday FTIDE010
PATA controller found in the Cortina Gemini SoCs.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8453f9a4682f..fa7dd4394c02 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2083,7 +2083,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
retry:
ata_tf_init(dev, &tf);
if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
- !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
+ !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
tf.command = ATA_CMD_READ_LOG_DMA_EXT;
tf.protocol = ATA_PROT_DMA;
dma = true;
@@ -2102,8 +2102,8 @@ retry:
buf, sectors * ATA_SECT_SIZE, 0);
if (err_mask && dma) {
- dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
- ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
+ dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
+ ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
goto retry;
}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b70bcf6d2914..3dbd05532c09 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1434,7 +1434,7 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
/**
* ata_eh_done - EH action complete
-* @ap: target ATA port
+ * @link: ATA link for which EH actions are complete
* @dev: target ATA dev for per-dev action (can be NULL)
* @action: action just completed
*
@@ -1576,7 +1576,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
+ * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
* @cmd: scsi command for which the sense code should be set
*
* Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
@@ -4175,7 +4175,6 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
- int rc = 0;
/* are we resuming? */
spin_lock_irqsave(ap->lock, flags);
@@ -4202,7 +4201,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
ata_acpi_set_state(ap, ap->pm_mesg);
if (ap->ops->port_resume)
- rc = ap->ops->port_resume(ap);
+ ap->ops->port_resume(ap);
/* tell ACPI that we're resuming */
ata_acpi_on_resume(ap);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d462c5a3a7ef..44ba292f2cd7 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3030,10 +3030,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
{
if (!sata_pmp_attached(ap)) {
- if (likely(devno < ata_link_max_devices(&ap->link)))
+ if (likely(devno >= 0 &&
+ devno < ata_link_max_devices(&ap->link)))
return &ap->link.device[devno];
} else {
- if (likely(devno < ap->nr_pmp_links))
+ if (likely(devno >= 0 &&
+ devno < ap->nr_pmp_links))
return &ap->pmp_link[devno].device[0];
}
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index ee9844758736..537d11869069 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -858,6 +858,14 @@ static const struct of_device_id sata_rcar_match[] = {
.compatible = "renesas,sata-r8a7795",
.data = (void *)RCAR_GEN2_SATA
},
+ {
+ .compatible = "renesas,rcar-gen2-sata",
+ .data = (void *)RCAR_GEN2_SATA
+ },
+ {
+ .compatible = "renesas,rcar-gen3-sata",
+ .data = (void *)RCAR_GEN2_SATA
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sata_rcar_match);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 2ae24c28e70c..1c152aed6b82 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
{
if (dev && dev->dma_mem)
return dev->dma_mem;
- return dma_coherent_default_memory;
+ return NULL;
}
static inline dma_addr_t dma_get_device_base(struct device *dev,
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-/**
- * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
- *
- * @dev: device from which we allocate memory
- * @size: size of requested memory area
- * @dma_handle: This will be filled with the correct dma handle
- * @ret: This pointer will be filled with the virtual address
- * to allocated area.
- *
- * This function should be only called from per-arch dma_alloc_coherent()
- * to support allocation from per-device coherent memory pools.
- *
- * Returns 0 if dma_alloc_coherent should continue with allocating from
- * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
- */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret)
+static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
+ ssize_t size, dma_addr_t *dma_handle)
{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
int order = get_order(size);
unsigned long flags;
int pageno;
int dma_memory_map;
+ void *ret;
- if (!mem)
- return 0;
-
- *ret = NULL;
spin_lock_irqsave(&mem->spinlock, flags);
if (unlikely(size > (mem->size << PAGE_SHIFT)))
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
goto err;
/*
- * Memory was found in the per-device area.
+ * Memory was found in the coherent area.
*/
- *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
- *ret = mem->virt_base + (pageno << PAGE_SHIFT);
+ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ ret = mem->virt_base + (pageno << PAGE_SHIFT);
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
spin_unlock_irqrestore(&mem->spinlock, flags);
if (dma_memory_map)
- memset(*ret, 0, size);
+ memset(ret, 0, size);
else
- memset_io(*ret, 0, size);
+ memset_io(ret, 0, size);
- return 1;
+ return ret;
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
+ return NULL;
+}
+
+/**
+ * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
+ * @dev: device from which we allocate memory
+ * @size: size of requested memory area
+ * @dma_handle: This will be filled with the correct dma handle
+ * @ret: This pointer will be filled with the virtual address
+ * to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret)
+{
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ if (!mem)
+ return 0;
+
+ *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+ if (*ret)
+ return 1;
+
/*
* In the case where the allocation can not be satisfied from the
* per-device area, try to fall back to generic memory if the
@@ -225,25 +235,20 @@ err:
*/
return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
-EXPORT_SYMBOL(dma_alloc_from_coherent);
+EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
-/**
- * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
- * @dev: device from which the memory was allocated
- * @order: the order of pages allocated
- * @vaddr: virtual address of allocated pages
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, releases that memory.
- *
- * Returns 1 if we correctly released the memory, or 0 if
- * dma_release_coherent() should proceed with releasing memory from
- * generic pools.
- */
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+ if (!dma_coherent_default_memory)
+ return NULL;
+
+ return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
+ dma_handle);
+}
+static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
+ int order, void *vaddr)
+{
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
}
return 0;
}
-EXPORT_SYMBOL(dma_release_from_coherent);
/**
- * dma_mmap_from_coherent() - try to mmap the memory allocated from
- * per-device coherent memory pool to userspace
+ * dma_release_from_dev_coherent() - free memory to device coherent memory pool
* @dev: device from which the memory was allocated
- * @vma: vm_area for the userspace memory
- * @vaddr: cpu address returned by dma_alloc_from_coherent
- * @size: size of the memory buffer allocated by dma_alloc_from_coherent
- * @ret: result from remap_pfn_range()
+ * @order: the order of pages allocated
+ * @vaddr: virtual address of allocated pages
*
* This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, maps that memory to the provided vma.
+ * coherent memory pool and if so, releases that memory.
*
- * Returns 1 if we correctly mapped the memory, or 0 if the caller should
- * proceed with mapping memory from generic pools.
+ * Returns 1 if we correctly released the memory, or 0 if the caller should
+ * proceed with releasing memory from generic pools.
*/
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
- void *vaddr, size_t size, int *ret)
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
{
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+ return __dma_release_from_coherent(mem, order, vaddr);
+}
+EXPORT_SYMBOL(dma_release_from_dev_coherent);
+
+int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ if (!dma_coherent_default_memory)
+ return 0;
+
+ return __dma_release_from_coherent(dma_coherent_default_memory, order,
+ vaddr);
+}
+
+static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+ struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
+{
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
}
return 0;
}
-EXPORT_SYMBOL(dma_mmap_from_coherent);
+
+/**
+ * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
+ * @dev: device from which the memory was allocated
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
+ * @size: size of the memory buffer allocated
+ * @ret: result from remap_pfn_range()
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
+ */
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *vaddr, size_t size, int *ret)
+{
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
+}
+EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
+
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
+ size_t size, int *ret)
+{
+ if (!dma_coherent_default_memory)
+ return 0;
+
+ return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
+ vaddr, size, ret);
+}
/*
* Support for reserved memory regions defined in device tree
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 5096755d185e..b555ff9dd8fc 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index dea7d85134ee..5bdf923294a5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -626,7 +626,6 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
struct nbd_cmd *cmd;
- int ret = 0;
while (1) {
cmd = nbd_read_stat(nbd, args->index);
@@ -636,7 +635,6 @@ static void recv_work(struct work_struct *work)
mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
- ret = PTR_ERR(cmd);
break;
}
@@ -910,7 +908,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
continue;
}
sk_set_memalloc(sock->sk);
- sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
+ if (nbd->tag_set.timeout)
+ sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
old = nsock->sock;
@@ -924,6 +923,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
mutex_unlock(&nsock->tx_lock);
sockfd_put(old);
+ clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
+
/* We take the tx_mutex in an error path in the recv_work, so we
* need to queue_work outside of the tx_mutex.
*/
@@ -980,11 +981,15 @@ static void send_disconnects(struct nbd_device *nbd)
int i, ret;
for (i = 0; i < config->num_connections; i++) {
+ struct nbd_sock *nsock = config->socks[i];
+
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
+ mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
if (ret <= 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
+ mutex_unlock(&nsock->tx_lock);
}
}
@@ -993,9 +998,8 @@ static int nbd_disconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
- if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
- &config->runtime_flags))
- send_disconnects(nbd);
+ set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
+ send_disconnects(nbd);
return 0;
}
@@ -1076,7 +1080,9 @@ static int nbd_start_device(struct nbd_device *nbd)
return -ENOMEM;
}
sk_set_memalloc(config->socks[i]->sock->sk);
- config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
+ if (nbd->tag_set.timeout)
+ config->socks[i]->sock->sk->sk_sndtimeo =
+ nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4e02aa5fdac0..1498b899a593 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -541,12 +541,9 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
int i;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
- for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
- if (sysfs_streq(buf, virtblk_cache_types[i]))
- break;
-
+ i = sysfs_match_string(virtblk_cache_types, buf);
if (i < 0)
- return -EINVAL;
+ return i;
virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
virtblk_update_cache_mode(vdev);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c852ed3c01d5..98e34e4c62b8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -111,7 +111,7 @@ struct blk_shadow {
};
struct blkif_req {
- int error;
+ blk_status_t error;
};
static inline struct blkif_req *blkif_req(struct request *rq)
@@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* existing persistent grants, or if we have to get new grants,
* as there are not sufficiently many free.
*/
+ bool new_persistent_gnts = false;
struct scatterlist *sg;
int num_sg, max_grefs, num_grant;
@@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
*/
max_grefs += INDIRECT_GREFS(max_grefs);
- /*
- * We have to reserve 'max_grefs' grants because persistent
- * grants are shared by all rings.
- */
- if (max_grefs > 0)
- if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
+ /* Check if we have enough persistent grants to allocate a requests */
+ if (rinfo->persistent_gnts_c < max_grefs) {
+ new_persistent_gnts = true;
+
+ if (gnttab_alloc_grant_references(
+ max_grefs - rinfo->persistent_gnts_c,
+ &setup.gref_head) < 0) {
gnttab_request_free_callback(
&rinfo->callback,
blkif_restart_queue_callback,
rinfo,
- max_grefs);
+ max_grefs - rinfo->persistent_gnts_c);
return 1;
}
+ }
/* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &ring_req);
@@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
if (unlikely(require_extra_req))
rinfo->shadow[extra_id].req = *extra_ring_req;
- if (max_grefs > 0)
+ if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);
return 0;
@@ -906,8 +909,8 @@ out_err:
return BLK_STS_IOERR;
out_busy:
- spin_unlock_irqrestore(&rinfo->ring_lock, flags);
blk_mq_stop_hw_queue(hctx);
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_RESOURCE;
}
@@ -1616,7 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- blkif_req(req)->error = -EOPNOTSUPP;
+ blkif_req(req)->error = BLK_STS_NOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index f6e7491c873c..d509b500a7b5 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -41,8 +41,16 @@ static __init int timer_irq_init(struct device_node *np,
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
struct clock_event_device *clkevt = &to->clkevt;
- of_irq->irq = of_irq->name ? of_irq_get_byname(np, of_irq->name):
- irq_of_parse_and_map(np, of_irq->index);
+ if (of_irq->name) {
+ of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
+ if (ret < 0) {
+ pr_err("Failed to get interrupt %s for %s\n",
+ of_irq->name, np->full_name);
+ return ret;
+ }
+ } else {
+ of_irq->irq = irq_of_parse_and_map(np, of_irq->index);
+ }
if (!of_irq->irq) {
pr_err("Failed to map interrupt for %s\n", np->full_name);
return -EINVAL;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 193204dfbf3a..4b75084fabad 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -655,7 +655,7 @@ source "drivers/crypto/virtio/Kconfig"
config CRYPTO_DEV_BCM_SPU
tristate "Broadcom symmetric crypto/hash acceleration support"
depends on ARCH_BCM_IPROC
- depends on BCM_PDC_MBOX
+ depends on MAILBOX
default m
select CRYPTO_DES
select CRYPTO_MD5
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index ef04c9748317..bf7ac621c591 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
break;
case HASH_ALG_SHA3_512:
*spu2_type = SPU2_HASH_TYPE_SHA3_512;
+ break;
case HASH_ALG_LAST:
default:
err = -EINVAL;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index ae44a464cd2d..9ccefb9b7232 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -18,8 +18,9 @@
#define SE_GROUP 0
#define DRIVER_VERSION "1.0"
+#define FW_DIR "cavium/"
/* SE microcode */
-#define SE_FW "cnn55xx_se.fw"
+#define SE_FW FW_DIR "cnn55xx_se.fw"
static const char nitrox_driver_name[] = "CNN55XX";
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index e7f87ac12685..1fabd4aee81b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -773,7 +773,6 @@ static int safexcel_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct safexcel_crypto_priv *priv;
- u64 dma_mask;
int i, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -802,9 +801,7 @@ static int safexcel_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask))
- dma_mask = DMA_BIT_MASK(64);
- ret = dma_set_mask_and_coherent(dev, dma_mask);
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
goto err_clk;
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index ce9e563e6e1d..938eb4868f7f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -278,6 +278,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc)
}
EXPORT_SYMBOL_GPL(dax_write_cache);
+bool dax_write_cache_enabled(struct dax_device *dax_dev)
+{
+ return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
+
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 57da14c15987..56e0a0e1b600 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence)
if (WARN_ON(!fence))
return -EINVAL;
- if (!ktime_to_ns(fence->timestamp)) {
- fence->timestamp = ktime_get();
- smp_mb__before_atomic();
- }
-
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
@@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence)
* we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks
*/
- } else
+ } else {
+ fence->timestamp = ktime_get();
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence);
+ }
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
@@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence)
if (!fence)
return -EINVAL;
- if (!ktime_to_ns(fence->timestamp)) {
- fence->timestamp = ktime_get();
- smp_mb__before_atomic();
- }
-
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
+ fence->timestamp = ktime_get();
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 82a6e7f6d37f..59a3b2f8ee91 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s,
show ? "_" : "",
sync_status_str(status));
- if (status) {
+ if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
struct timespec64 ts64 =
ktime_to_timespec64(fence->timestamp);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 545e2c5c4815..d7e219d2669d 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -391,7 +391,13 @@ static void sync_fill_fence_info(struct dma_fence *fence,
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
- info->timestamp_ns = ktime_to_ns(fence->timestamp);
+ while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+ !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+ cpu_relax();
+ info->timestamp_ns =
+ test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
+ ktime_to_ns(fence->timestamp) :
+ ktime_set(0, 0);
}
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index a485864cb512..06432d84cbf8 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -532,7 +532,7 @@ static inline uint32_t fsi_smode_sid(int x)
return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
}
-static const uint32_t fsi_slave_smode(int id)
+static uint32_t fsi_slave_smode(int id)
{
return FSI_SMODE_WSC | FSI_SMODE_ECRC
| fsi_smode_sid(id)
@@ -883,17 +883,16 @@ struct bus_type fsi_bus_type = {
};
EXPORT_SYMBOL_GPL(fsi_bus_type);
-static int fsi_init(void)
+static int __init fsi_init(void)
{
return bus_register(&fsi_bus_type);
}
+postcore_initcall(fsi_init);
static void fsi_exit(void)
{
bus_unregister(&fsi_bus_type);
}
-
-module_init(fsi_init);
module_exit(fsi_exit);
module_param(discard_errors, int, 0664);
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5f8ada1d872b..37971d9402e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -101,7 +101,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
if (adev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = 0xFF00,
- .num_mec = adev->gfx.mec.num_mec,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
};
@@ -122,7 +121,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
* nbits is not compile time constant */
- last_valid_bit = adev->gfx.mec.num_mec
+ last_valid_bit = 1 /* only first MEC can have compute queues */
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index f621ee115c98..5e771bc11b00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -198,12 +198,16 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
result = idr_find(&fpriv->bo_list_handles, id);
if (result) {
- if (kref_get_unless_zero(&result->refcount))
+ if (kref_get_unless_zero(&result->refcount)) {
+ rcu_read_unlock();
mutex_lock(&result->lock);
- else
+ } else {
+ rcu_read_unlock();
result = NULL;
+ }
+ } else {
+ rcu_read_unlock();
}
- rcu_read_unlock();
return result;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3a0b69b09ed6..c9b9c88231aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1475,21 +1475,23 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ u32 data;
- if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (se_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
+
+ if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (sh_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- } else {
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- }
+
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 88187bfc5ea3..3f95f7cb4019 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -226,10 +226,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
- /* We only use the first MEC */
- if (kfd->shared_resources.num_mec > 1)
- kfd->shared_resources.num_mec = 1;
-
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 955aa304ff48..602769ced3bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -77,13 +77,6 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
return false;
}
-unsigned int get_mec_num(struct device_queue_manager *dqm)
-{
- BUG_ON(!dqm || !dqm->dev);
-
- return dqm->dev->shared_resources.num_mec;
-}
-
unsigned int get_queues_num(struct device_queue_manager *dqm)
{
BUG_ON(!dqm || !dqm->dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 66b9615bc3c1..faf820a06400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,7 +180,6 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-unsigned int get_mec_num(struct device_queue_manager *dqm);
unsigned int get_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 91ef1484b3bb..36f376677a53 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -63,9 +63,6 @@ struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap;
- /* number of mec available from the hardware */
- uint32_t num_mec;
-
/* number of pipes per mec */
uint32_t num_pipe_per_mec;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d6f097f44b6c..197174e562d2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2128,15 +2128,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->AvfsGbCksOff.m2_shift = 12;
pp_table->AvfsGbCksOff.b_shift = 0;
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].sclk_offset == 0)
- pp_table->StaticVoltageOffsetVid[i] = 248;
- else
- pp_table->StaticVoltageOffsetVid[i] =
- (uint8_t)(dep_table->entries[i].sclk_offset *
- VOLTAGE_VID_OFFSET_SCALE2 /
- VOLTAGE_VID_OFFSET_SCALE1);
- }
+ for (i = 0; i < dep_table->count; i++)
+ pp_table->StaticVoltageOffsetVid[i] =
+ convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->disp_clk_quad_eqn_a) &&
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 213fb837e1c4..08af8d6b844b 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -544,7 +544,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
DP_DETAILED_CAP_INFO_AVAILABLE;
int clk;
int bpc;
- char id[6];
+ char id[7];
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
@@ -583,6 +583,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
seq_puts(m, "\t\tType: N/A\n");
}
+ memset(id, 0, sizeof(id));
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
@@ -591,7 +592,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
+ len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
if (len > 0)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index bfd237c15e76..ae5f06895562 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
return false;
}
+ /*
+ * ignore out-of-order messages or messages that are part of a
+ * failed transaction
+ */
+ if (!recv_hdr.somt && !msg->have_somt)
+ return false;
+
/* get length contained in this portion */
msg->curchunk_len = recv_hdr.msg_len;
msg->curchunk_hdrlen = hdrlen;
@@ -2164,7 +2171,7 @@ out_unlock:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
@@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
- return;
+ return false;
}
ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
- return;
+ return false;
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
@@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len);
if (ret != len) {
- DRM_DEBUG_KMS("failed to read a chunk\n");
+ DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
+ len, ret);
+ return false;
}
+
ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
- if (ret == false)
+ if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n");
+ return false;
+ }
+
curreply += len;
replylen -= len;
}
+ return true;
}
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, false);
+ if (!drm_dp_get_one_sb_msg(mgr, false)) {
+ memset(&mgr->down_rep_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->down_rep_recv.have_eomt) {
struct drm_dp_sideband_msg_tx *txmsg;
@@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, true);
+
+ if (!drm_dp_get_one_sb_msg(mgr, true)) {
+ memset(&mgr->up_req_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
@@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
- drm_dp_put_mst_branch_device(mstb);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
return ret;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 1d185347c64c..305dc3d4ff77 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -75,6 +75,7 @@ config DRM_EXYNOS_DP
config DRM_EXYNOS_HDMI
bool "HDMI"
depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you want to use Exynos HDMI for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 35a8dfc93836..242bd50faa26 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -453,7 +453,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
struct component_match *match;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
match = exynos_drm_match_add(&pdev->dev);
if (IS_ERR(match))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a11b79596e2f..b6a46d9a016e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1651,8 +1651,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
return ret;
dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
- if (!dsi->bridge_node)
- return -EINVAL;
return 0;
}
@@ -1687,9 +1685,11 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
return ret;
}
- bridge = of_drm_find_bridge(dsi->bridge_node);
- if (bridge)
- drm_bridge_attach(encoder, bridge, NULL);
+ if (dsi->bridge_node) {
+ bridge = of_drm_find_bridge(dsi->bridge_node);
+ if (bridge)
+ drm_bridge_attach(encoder, bridge, NULL);
+ }
return mipi_dsi_host_register(&dsi->dsi_host);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index e45720543a45..16bbee897e0d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -340,16 +340,10 @@ static int exynos_mic_bind(struct device *dev, struct device *master,
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
- int ret;
- mic->bridge.funcs = &mic_bridge_funcs;
- mic->bridge.of_node = dev->of_node;
mic->bridge.driver_private = mic;
- ret = drm_bridge_add(&mic->bridge);
- if (ret)
- DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
- return ret;
+ return 0;
}
static void exynos_mic_unbind(struct device *dev, struct device *master,
@@ -365,8 +359,6 @@ static void exynos_mic_unbind(struct device *dev, struct device *master,
already_disabled:
mutex_unlock(&mic_mutex);
-
- drm_bridge_remove(&mic->bridge);
}
static const struct component_ops exynos_mic_component_ops = {
@@ -461,6 +453,15 @@ static int exynos_mic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mic);
+ mic->bridge.funcs = &mic_bridge_funcs;
+ mic->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&mic->bridge);
+ if (ret) {
+ DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
+ return ret;
+ }
+
pm_runtime_enable(dev);
ret = component_add(dev, &exynos_mic_component_ops);
@@ -479,8 +480,13 @@ err:
static int exynos_mic_remove(struct platform_device *pdev)
{
+ struct exynos_mic *mic = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &exynos_mic_component_ops);
pm_runtime_disable(&pdev->dev);
+
+ drm_bridge_remove(&mic->bridge);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 06bfbe400cf1..d3b69d66736f 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1501,8 +1501,6 @@ static void hdmi_disable(struct drm_encoder *encoder)
*/
cancel_delayed_work(&hdata->hotplug_work);
cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
-
- hdmiphy_disable(hdata);
}
static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
@@ -1676,7 +1674,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
return hdmi_bridge_init(hdata);
}
-static struct of_device_id hdmi_match_types[] = {
+static const struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos4210-hdmi",
.data = &exynos4210_hdmi_driver_data,
@@ -1934,8 +1932,7 @@ static int hdmi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int exynos_hdmi_suspend(struct device *dev)
+static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
@@ -1944,7 +1941,7 @@ static int exynos_hdmi_suspend(struct device *dev)
return 0;
}
-static int exynos_hdmi_resume(struct device *dev)
+static int __maybe_unused exynos_hdmi_resume(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
int ret;
@@ -1955,7 +1952,6 @@ static int exynos_hdmi_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops exynos_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6bed4f3ffcd6..a998a8dd783c 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1094,28 +1094,28 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
.atomic_check = mixer_atomic_check,
};
-static struct mixer_drv_data exynos5420_mxr_drv_data = {
+static const struct mixer_drv_data exynos5420_mxr_drv_data = {
.version = MXR_VER_128_0_0_184,
.is_vp_enabled = 0,
};
-static struct mixer_drv_data exynos5250_mxr_drv_data = {
+static const struct mixer_drv_data exynos5250_mxr_drv_data = {
.version = MXR_VER_16_0_33_0,
.is_vp_enabled = 0,
};
-static struct mixer_drv_data exynos4212_mxr_drv_data = {
+static const struct mixer_drv_data exynos4212_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
};
-static struct mixer_drv_data exynos4210_mxr_drv_data = {
+static const struct mixer_drv_data exynos4210_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
.has_sclk = 1,
};
-static struct of_device_id mixer_match_types[] = {
+static const struct of_device_id mixer_match_types[] = {
{
.compatible = "samsung,exynos4210-mixer",
.data = &exynos4210_mxr_drv_data,
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 2deb05f618fb..7cb0818a13de 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
- bool have_enabled_pipe = false;
int pipe, id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
- hrtimer_cancel(&irq->vblank_timer.timer);
-
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- have_enabled_pipe =
- pipe_is_enabled(vgpu, pipe);
- if (have_enabled_pipe)
- break;
+ if (pipe_is_enabled(vgpu, pipe))
+ goto out;
}
}
- if (have_enabled_pipe)
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
+ /* all the pipes are disabled */
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ return;
+
+out:
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index 152f16c11878..348b29a845c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence,
return NOTIFY_DONE;
}
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags)
{
struct clflush *clflush;
@@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
*/
if (!i915_gem_object_has_struct_page(obj)) {
obj->cache_dirty = false;
- return;
+ return false;
}
/* If the GPU is snooping the contents of the CPU cache,
@@ -140,7 +140,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* tracking.
*/
if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
- return;
+ return false;
trace_i915_gem_object_clflush(obj);
@@ -179,4 +179,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
}
obj->cache_dirty = false;
+ return true;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h
index 2455a7820937..f390247561b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.h
@@ -28,7 +28,7 @@
struct drm_i915_private;
struct drm_i915_gem_object;
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags);
#define I915_CLFLUSH_FORCE BIT(0)
#define I915_CLFLUSH_SYNC BIT(1)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 054b2e54cdaf..e9503f6d1100 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -560,9 +560,6 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
eb->args->flags |= __EXEC_HAS_RELOC;
}
- entry->flags |= __EXEC_OBJECT_HAS_PIN;
- GEM_BUG_ON(eb_vma_misplaced(entry, vma));
-
if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_get_fence(vma);
if (unlikely(err)) {
@@ -574,6 +571,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
}
+ entry->flags |= __EXEC_OBJECT_HAS_PIN;
+ GEM_BUG_ON(eb_vma_misplaced(entry, vma));
+
return 0;
}
@@ -1458,7 +1458,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* to read. However, if the array is not writable the user loses
* the updated relocation values.
*/
- if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs))))
+ if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
return -EFAULT;
do {
@@ -1775,7 +1775,7 @@ out:
}
}
- return err ?: have_copy;
+ return err;
}
static int eb_relocate(struct i915_execbuffer *eb)
@@ -1825,7 +1825,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
int err;
for (i = 0; i < count; i++) {
- const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
+ struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct i915_vma *vma = exec_to_vma(entry);
struct drm_i915_gem_object *obj = vma->obj;
@@ -1841,12 +1841,14 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
eb->request->capture_list = capture;
}
+ if (unlikely(obj->cache_dirty && !obj->cache_coherent)) {
+ if (i915_gem_clflush_object(obj, 0))
+ entry->flags &= ~EXEC_OBJECT_ASYNC;
+ }
+
if (entry->flags & EXEC_OBJECT_ASYNC)
goto skip_flushes;
- if (unlikely(obj->cache_dirty && !obj->cache_coherent))
- i915_gem_clflush_object(obj, 0);
-
err = i915_gem_request_await_object
(eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
if (err)
@@ -2209,7 +2211,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_unlock;
err = eb_relocate(&eb);
- if (err)
+ if (err) {
/*
* If the user expects the execobject.offset and
* reloc.presumed_offset to be an exact match,
@@ -2218,8 +2220,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* relocation.
*/
args->flags &= ~__EXEC_HAS_RELOC;
- if (err < 0)
goto err_vma;
+ }
if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4a673fc1a432..20cf272c97b1 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -284,12 +284,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma)
static inline void __i915_vma_unpin(struct i915_vma *vma)
{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
vma->flags--;
}
static inline void i915_vma_unpin(struct i915_vma *vma)
{
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
__i915_vma_unpin(vma);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 80e96f1f49d2..9edeaaef77ad 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1896,8 +1896,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
val &= ~LOADGEN_SELECT;
- if (((rate < 600000) && (width == 4) && (ln >= 1)) ||
- ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) {
+ if ((rate <= 600000 && width == 4 && ln >= 1) ||
+ (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dec9e58545a1..9471c88d449e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3427,26 +3427,6 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
intel_finish_page_flip_cs(dev_priv, crtc->pipe);
}
-static void intel_update_primary_planes(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
-
- for_each_crtc(dev, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->base.visible) {
- trace_intel_update_plane(&plane->base,
- to_intel_crtc(crtc));
-
- plane->update_plane(plane,
- to_intel_crtc_state(crtc->state),
- plane_state);
- }
- }
-}
-
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -3499,6 +3479,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
struct drm_atomic_state *state;
int ret;
+
+ /* reset doesn't touch the display */
+ if (!i915.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
+ return;
+
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
@@ -3512,12 +3498,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
drm_modeset_backoff(ctx);
}
-
- /* reset doesn't touch the display, but flips might get nuked anyway, */
- if (!i915.force_reset_modeset_test &&
- !gpu_reset_clobbers_display(dev_priv))
- return;
-
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
@@ -3547,6 +3527,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
struct drm_atomic_state *state = dev_priv->modeset_restore_state;
int ret;
+ /* reset doesn't touch the display */
+ if (!i915.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
+ return;
+
+ if (!state)
+ goto unlock;
+
/*
* Flips in the rings will be nuked by the reset,
* so complete all pending flips so that user space
@@ -3558,22 +3546,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(dev_priv)) {
- if (!state) {
- /*
- * Flips in the rings have been nuked by the reset,
- * so update the base address of all primary
- * planes to the the last fb to make sure we're
- * showing the correct fb after a reset.
- *
- * FIXME: Atomic will make this obsolete since we won't schedule
- * CS-based flips (which might get lost in gpu resets) any more.
- */
- intel_update_primary_planes(dev);
- } else {
- ret = __intel_display_resume(dev, state, ctx);
+ /* for testing only restore the display */
+ ret = __intel_display_resume(dev, state, ctx);
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- }
} else {
/*
* The display has been reset as well,
@@ -3597,8 +3573,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
}
- if (state)
- drm_atomic_state_put(state);
+ drm_atomic_state_put(state);
+unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
@@ -9117,6 +9093,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
u64 power_domain_mask;
bool active;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ intel_crtc_init_scalers(crtc, pipe_config);
+
+ pipe_config->scaler_state.scaler_id = -1;
+ pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
+ }
+
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
@@ -9145,13 +9128,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (INTEL_GEN(dev_priv) >= 9) {
- intel_crtc_init_scalers(crtc, pipe_config);
-
- pipe_config->scaler_state.scaler_id = -1;
- pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
- }
-
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT_ULL(power_domain);
@@ -9540,7 +9516,16 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* On some platforms writing CURCNTR first will also
* cause CURPOS to be armed by the CURBASE write.
* Without the CURCNTR write the CURPOS write would
- * arm itself.
+ * arm itself. Thus we always start the full update
+ * with a CURCNTR write.
+ *
+ * On other platforms CURPOS always requires the
+ * CURBASE write to arm the update. Additonally
+ * a write to any of the cursor register will cancel
+ * an already armed cursor update. Thus leaving out
+ * the CURBASE write after CURPOS could lead to a
+ * cursor that doesn't appear to move, or even change
+ * shape. Thus we always write CURBASE.
*
* CURCNTR and CUR_FBC_CTL are always
* armed by the CURBASE write only.
@@ -9559,6 +9544,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
plane->cursor.cntl = cntl;
} else {
I915_WRITE_FW(CURPOS(pipe), pos);
+ I915_WRITE_FW(CURBASE(pipe), base);
}
POSTING_READ_FW(CURBASE(pipe));
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 52d5b82790d9..c17ed0e62b67 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_SKYLAKE(dev_priv))
return true;
- if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
+ if (IS_KABYLAKE(dev_priv))
return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 48ea0fca1f72..40b224b44d1b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4463,8 +4463,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
(plane_bytes_per_line / 512 < 1))
selected_result = method2;
- else if ((ddb_allocation && ddb_allocation /
- fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
+ else if (ddb_allocation >=
+ fixed_16_16_to_u32_round_up(plane_blocks_per_line))
selected_result = min_fixed_16_16(method1, method2);
else if (latency >= linetime_us)
selected_result = min_fixed_16_16(method1, method2);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 627e2aa09766..8cdec455cf7d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -206,7 +206,7 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->ring_mask = BIT(0);
i915->engine[RCS] = mock_engine(i915, "mock");
if (!i915->engine[RCS])
- goto err_dependencies;
+ goto err_priorities;
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 49546222c6d3..6276bb834b4f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -54,7 +54,7 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888,
- DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 636031a30e17..8aca20209cb8 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
/* port@1 is the output port */
ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
imxpd->dev = dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 147b22163f9f..dab78c660dd6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1158,8 +1158,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
return -ENODEV;
if (WARN_ON(msg->size > 16))
return -E2BIG;
- if (msg->size == 0)
- return msg->size;
ret = nvkm_i2c_aux_acquire(aux);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8d1df5678eaa..f362c9fa8b3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -409,7 +409,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
- struct drm_crtc *crtc;
if (!suspend) {
if (drm_drv_uses_atomic_modeset(dev))
@@ -418,10 +417,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
drm_crtc_force_disable_all(dev);
}
- /* Make sure that drm and hw vblank irqs get properly disabled. */
- drm_for_each_crtc(crtc, dev)
- drm_crtc_vblank_off(crtc);
-
/* disable flip completion events */
nvif_notify_put(&drm->flip);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index e3132a2ce34d..2bc0dc985214 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3674,15 +3674,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_mode_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
if (aux) {
- nv_encoder->i2c = &nv_connector->aux.ddc;
+ if (disp->disp->oclass < GF110_DISP) {
+ /* HW has no support for address-only
+ * transactions, so we're required to
+ * use custom I2C-over-AUX code.
+ */
+ nv_encoder->i2c = &aux->i2c;
+ } else {
+ nv_encoder->i2c = &nv_connector->aux.ddc;
+ }
nv_encoder->aux = aux;
}
/*TODO: Use DP Info Table to check for support. */
- if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
+ if (disp->disp->oclass >= GF110_DISP) {
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
nv_connector->base.base.id,
&nv_encoder->dp.mstm);
@@ -3931,6 +3940,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
asyh->clr.mask, asyh->set.mask);
+ if (crtc_state->active && !asyh->state.active)
+ drm_crtc_vblank_off(crtc);
if (asyh->clr.mask) {
nv50_head_flush_clr(head, asyh, atom->flush_disable);
@@ -4016,11 +4027,13 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
nv50_head_flush_set(head, asyh);
interlock_core = 1;
}
- }
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc->state->event)
- drm_crtc_vblank_get(crtc);
+ if (asyh->state.active) {
+ if (!crtc_state->active)
+ drm_crtc_vblank_on(crtc);
+ if (asyh->state.event)
+ drm_crtc_vblank_get(crtc);
+ }
}
/* Update plane(s). */
@@ -4067,12 +4080,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
if (crtc->state->event) {
unsigned long flags;
/* Get correct count/ts if racing with vblank irq */
- drm_accurate_vblank_count(crtc);
+ if (crtc->state->active)
+ drm_accurate_vblank_count(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL;
- drm_crtc_vblank_put(crtc);
+ if (crtc->state->active)
+ drm_crtc_vblank_put(crtc);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index a24312fb0228..a1e8bf48b778 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -22,6 +22,7 @@ struct nvkm_ior {
unsigned proto_evo:4;
enum nvkm_ior_proto {
CRT,
+ TV,
TMDS,
LVDS,
DP,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 19c635663399..6ea19466f436 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -22,7 +22,7 @@ struct nv50_disp {
u8 type[3];
} pior;
- struct nv50_disp_chan *chan[17];
+ struct nv50_disp_chan *chan[21];
};
void nv50_disp_super_1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 85aff85394ac..be9e7f8c3b23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -62,6 +62,7 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type)
case 0:
switch (outp->info.type) {
case DCB_OUTPUT_ANALOG: *type = DAC; return CRT;
+ case DCB_OUTPUT_TV : *type = DAC; return TV;
case DCB_OUTPUT_TMDS : *type = SOR; return TMDS;
case DCB_OUTPUT_LVDS : *type = SOR; return LVDS;
case DCB_OUTPUT_DP : *type = SOR; return DP;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index c794b2c2d21e..6d8f21290aa2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base)
if (bar->bar[0].mem) {
addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
- nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
+ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 48f01e40b8fc..b768e66a472b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o
nvkm-y += nvkm/subdev/i2c/aux.o
nvkm-y += nvkm/subdev/i2c/auxg94.o
+nvkm-y += nvkm/subdev/i2c/auxgf119.o
nvkm-y += nvkm/subdev/i2c/auxgm200.o
nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index d172e42dd228..4c1f547da463 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -117,6 +117,10 @@ int
nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
u32 addr, u8 *data, u8 *size)
{
+ if (!*size && !aux->func->address_only) {
+ AUX_ERR(aux, "address-only transaction dropped");
+ return -ENOSYS;
+ }
return aux->func->xfer(aux, retry, type, addr, data, size);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 27a4a39c87f0..9587ab456d9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -3,6 +3,7 @@
#include "pad.h"
struct nvkm_i2c_aux_func {
+ bool address_only;
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
@@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
+int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+ int, u8, struct nvkm_i2c_aux **);
+
int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *);
+int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
#define AUX_MSG(b,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index ab8cb196c34e..c8ab1b5741a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux)
return 0;
}
-static int
+int
g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
@@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
- ctrl &= ~0x0001f0ff;
+ ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
- ctrl |= *size - 1;
+ ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
@@ -160,14 +160,10 @@ out:
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
-static const struct nvkm_i2c_aux_func
-g94_i2c_aux_func = {
- .xfer = g94_i2c_aux_xfer,
-};
-
int
-g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
- struct nvkm_i2c_aux **paux)
+g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
+ struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
{
struct g94_i2c_aux *aux;
@@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
return -ENOMEM;
*paux = &aux->base;
- nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
+ nvkm_i2c_aux_ctor(func, pad, index, &aux->base);
aux->ch = drive;
aux->base.intr = 1 << aux->ch;
return 0;
}
+
+static const struct nvkm_i2c_aux_func
+g94_i2c_aux = {
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
new file mode 100644
index 000000000000..dab40cd8fe3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "aux.h"
+
+static const struct nvkm_i2c_aux_func
+gf119_i2c_aux = {
+ .address_only = true,
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index ee091fa79628..7ef60895f43a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
ctrl = nvkm_rd32(device, 0x00d954 + base);
- ctrl &= ~0x0001f0ff;
+ ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
- ctrl |= *size - 1;
+ ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
@@ -162,6 +162,7 @@ out:
static const struct nvkm_i2c_aux_func
gm200_i2c_aux_func = {
+ .address_only = true,
.xfer = gm200_i2c_aux_xfer,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
index d53212f1aa52..3bc4d0310076 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -28,7 +28,7 @@
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_s_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = g94_i2c_aux_new,
+ .aux_new_6 = gf119_i2c_aux_new,
.mode = g94_i2c_pad_mode,
};
@@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_x_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = g94_i2c_aux_new,
+ .aux_new_6 = gf119_i2c_aux_new,
};
int
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 699fe7f9b8bf..a2ab6dcdf4a2 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -184,7 +184,6 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
if (rdev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = 0xFF00,
- .num_mec = 1,
.num_pipe_per_mec = 4,
.num_queue_per_pipe = 8
};
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 50c41c0a50ef..dcc539ba85d6 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -5,6 +5,10 @@ config DRM_ROCKCHIP
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
+ select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
+ select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
+ select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
help
Choose this option if you have a Rockchip soc chipset.
This driver provides kernel mode setting and buffer
@@ -12,10 +16,10 @@ config DRM_ROCKCHIP
2D or 3D acceleration; acceleration is performed by other
IP found on the SoC.
+if DRM_ROCKCHIP
+
config ROCKCHIP_ANALOGIX_DP
bool "Rockchip specific extensions for Analogix DP driver"
- depends on DRM_ROCKCHIP
- select DRM_ANALOGIX_DP
help
This selects support for Rockchip SoC specific extensions
for the Analogix Core DP driver. If you want to enable DP
@@ -23,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP
config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
- depends on DRM_ROCKCHIP
- depends on EXTCON
- select SND_SOC_HDMI_CODEC if SND_SOC
+ depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
help
This selects support for Rockchip SoC specific extensions
for the cdn DP driver. If you want to enable Dp on
@@ -34,8 +36,6 @@ config ROCKCHIP_CDN_DP
config ROCKCHIP_DW_HDMI
bool "Rockchip specific extensions for Synopsys DW HDMI"
- depends on DRM_ROCKCHIP
- select DRM_DW_HDMI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
@@ -44,8 +44,6 @@ config ROCKCHIP_DW_HDMI
config ROCKCHIP_DW_MIPI_DSI
bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
- depends on DRM_ROCKCHIP
- select DRM_MIPI_DSI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
@@ -54,8 +52,9 @@ config ROCKCHIP_DW_MIPI_DSI
config ROCKCHIP_INNO_HDMI
bool "Rockchip specific extensions for Innosilicon HDMI"
- depends on DRM_ROCKCHIP
help
This selects support for Rockchip SoC specific extensions
for the Innosilicon HDMI driver. If you want to enable
HDMI on RK3036 based SoC, you should select this option.
+
+endif
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 403bbd5f99a9..a12cc7ea99b6 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
SCALER_DISPSTATX_EMPTY);
}
+static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+
+ if (crtc->state->event) {
+ unsigned long flags;
+
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ vc4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+ }
+}
+
static void vc4_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
require_hvs_enabled(dev);
+ /* Enable vblank irq handling before crtc is started otherwise
+ * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist().
+ */
+ drm_crtc_vblank_on(crtc);
+ vc4_crtc_update_dlist(crtc);
+
/* Turn on the scaler, which will wait for vstart to start
* compositing.
*/
@@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
/* Turn on the pixel valve, which will emit the vstart signal. */
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
-
- /* Enable vblank irq handling after crtc is started. */
- drm_crtc_vblank_on(crtc);
}
static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_plane *plane;
bool debug_dump_regs = false;
@@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- if (crtc->state->event) {
- unsigned long flags;
-
- crtc->state->event->pipe = drm_crtc_index(crtc);
-
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-
- spin_lock_irqsave(&dev->event_lock, flags);
- vc4_crtc->event = crtc->state->event;
- crtc->state->event = NULL;
-
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
- } else {
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
- }
+ /* Only update DISPLIST if the CRTC was already running and is not
+ * being disabled.
+ * vc4_crtc_enable() takes care of updating the dlist just after
+ * re-enabling VBLANK interrupts and before enabling the engine.
+ * If the CRTC is being disabled, there's no point in updating this
+ * information.
+ */
+ if (crtc->state->active && old_state->active)
+ vc4_crtc_update_dlist(crtc);
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 35bf781e418e..c7056322211c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -30,49 +30,49 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
-static struct ttm_place vram_placement_flags = {
+static const struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
};
-static struct ttm_place vram_ne_placement_flags = {
+static const struct ttm_place vram_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place sys_placement_flags = {
+static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
};
-static struct ttm_place sys_ne_placement_flags = {
+static const struct ttm_place sys_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place gmr_placement_flags = {
+static const struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
-static struct ttm_place gmr_ne_placement_flags = {
+static const struct ttm_place gmr_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place mob_placement_flags = {
+static const struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
-static struct ttm_place mob_ne_placement_flags = {
+static const struct ttm_place mob_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
@@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = {
.busy_placement = &vram_placement_flags
};
-static struct ttm_place vram_gmr_placement_flags[] = {
+static const struct ttm_place vram_gmr_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = {
}
};
-static struct ttm_place gmr_vram_placement_flags[] = {
+static const struct ttm_place gmr_vram_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags
};
-static struct ttm_place vram_gmr_ne_placement_flags[] = {
+static const struct ttm_place vram_gmr_ne_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = {
.busy_placement = &sys_ne_placement_flags
};
-static struct ttm_place evictable_placement_flags[] = {
+static const struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 99a7f4ab7d97..86178796de6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -779,8 +779,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
if (ret)
return ret;
- header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
- &header->handle);
+ header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
+ &header->handle);
if (!header->cb_header) {
ret = -ENOMEM;
goto out_no_cb_header;
@@ -790,7 +790,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
cb_hdr = header->cb_header;
offset = header->node.start << PAGE_SHIFT;
header->cmd = man->map + offset;
- memset(cb_hdr, 0, sizeof(*cb_hdr));
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
@@ -827,8 +826,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
return -ENOMEM;
- dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
- &header->handle);
+ dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
+ &header->handle);
if (!dheader)
return -ENOMEM;
@@ -837,7 +836,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
cb_hdr = &dheader->cb_header;
header->cb_header = cb_hdr;
header->cmd = dheader->cmd;
- memset(dheader, 0, sizeof(*dheader));
cb_hdr->status = SVGA_CB_STATUS_NONE;
cb_hdr->flags = SVGA_CB_FLAG_NONE;
cb_hdr->ptr.pa = (u64)header->handle +
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 1f013d45c9e9..36c7b6c839c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
int ret;
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
- if (unlikely(cres == NULL))
+ if (unlikely(!cres))
return -ENOMEM;
cres->hash.key = user_key | (res_type << 24);
@@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
int ret;
man = kzalloc(sizeof(*man), GFP_KERNEL);
- if (man == NULL)
+ if (!man)
return ERR_PTR(-ENOMEM);
man->dev_priv = dev_priv;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index bcc6d4136c87..4212b3e673bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
- if (unlikely(uctx->cotables[i] == NULL)) {
- ret = -ENOMEM;
+ if (unlikely(IS_ERR(uctx->cotables[i]))) {
+ ret = PTR_ERR(uctx->cotables[i]);
goto out_cotables;
}
}
@@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(ctx == NULL)) {
+ if (unlikely(!ctx)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 6c026d75c180..d87861bbe971 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
return ERR_PTR(ret);
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
- if (unlikely(vcotbl == NULL)) {
+ if (unlikely(!vcotbl)) {
ret = -ENOMEM;
goto out_no_alloc;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4a641555b960..4436d53ae16c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -227,7 +227,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_AUTH | DRM_RENDER_ALLOW),
};
-static struct pci_device_id vmw_pci_id_list[] = {
+static const struct pci_device_id vmw_pci_id_list[] = {
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
{0, 0, 0}
};
@@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
char host_log[100] = {0};
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (unlikely(dev_priv == NULL)) {
+ if (unlikely(!dev_priv)) {
DRM_ERROR("Failed allocating a device private struct.\n");
return -ENOMEM;
}
@@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
int ret = -ENOMEM;
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
- if (unlikely(vmw_fp == NULL))
+ if (unlikely(!vmw_fp))
return ret;
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
@@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev,
struct vmw_master *vmaster;
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
- if (unlikely(vmaster == NULL))
+ if (unlikely(!vmaster))
return -ENOMEM;
vmw_master_init(vmaster);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c7b53d987f06..2cfb3c93f42a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(node == NULL)) {
+ if (unlikely(!node)) {
DRM_ERROR("Failed to allocate a resource validation "
"entry.\n");
return -ENOMEM;
@@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
struct vmw_resource_relocation *rel;
rel = kmalloc(sizeof(*rel), GFP_KERNEL);
- if (unlikely(rel == NULL)) {
+ if (unlikely(!rel)) {
DRM_ERROR("Failed to allocate a resource relocation.\n");
return -ENOMEM;
}
@@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- return capable(CAP_SYS_ADMIN) ? : -EINVAL;
+ return -EINVAL;
}
static int vmw_cmd_ok(struct vmw_private *dev_priv,
@@ -2584,7 +2584,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
/**
* vmw_cmd_dx_ia_set_vertex_buffers - Validate an
- * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 6b2708b4eafe..b8bc5bc7de7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
- if (unlikely(fman == NULL))
+ if (unlikely(!fman))
return NULL;
fman->dev_priv = dev_priv;
@@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (unlikely(fence == NULL))
+ if (unlikely(!fence))
return -ENOMEM;
ret = vmw_fence_obj_init(fman, fence, seqno,
@@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
return ret;
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
- if (unlikely(ufence == NULL)) {
+ if (unlikely(!ufence)) {
ret = -ENOMEM;
goto out_no_object;
}
@@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
struct vmw_fence_manager *fman = fman_from_fence(fence);
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
- if (unlikely(eaction == NULL))
+ if (unlikely(!eaction))
return -ENOMEM;
eaction->event = event;
@@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
int ret;
event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (unlikely(event == NULL)) {
+ if (unlikely(!event)) {
DRM_ERROR("Failed to allocate an event.\n");
ret = -ENOMEM;
goto out_no_space;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index c1900f4390a4..d2b03d4a3c86 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
- if (unlikely(gman == NULL))
+ if (unlikely(!gman))
return -ENOMEM;
spin_lock_init(&gman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3d94ea67a825..61e06f0e8cd3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y;
+
+ if (plane->fb) {
+ hotspot_x += plane->fb->hot_x;
+ hotspot_y += plane->fb->hot_y;
+ }
+
du->cursor_surface = vps->surf;
du->cursor_dmabuf = vps->dmabuf;
@@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + hotspot_x,
du->cursor_y + hotspot_y);
+
+ du->core_hotspot_x = hotspot_x - du->hotspot_x;
+ du->core_hotspot_y = hotspot_y - du->hotspot_y;
} else {
DRM_ERROR("Failed to update cursor image\n");
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 941bcfd131ff..b17f08fc50d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
if (dev_priv->has_dx) {
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
- if (*otables == NULL)
+ if (!(*otables))
return -ENOMEM;
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
} else {
*otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
GFP_KERNEL);
- if (*otables == NULL)
+ if (!(*otables))
return -ENOMEM;
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
@@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
{
struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
- if (unlikely(mob == NULL))
+ if (unlikely(!mob))
return NULL;
mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 6063c9636d4a..97000996b8dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply_len = ebx;
reply = kzalloc(reply_len + 1, GFP_KERNEL);
- if (reply == NULL) {
+ if (!reply) {
DRM_ERROR("Cannot allocate memory for reply\n");
return -ENOMEM;
}
@@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
msg = kzalloc(msg_len, GFP_KERNEL);
- if (msg == NULL) {
+ if (!msg) {
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
return -ENOMEM;
}
@@ -400,7 +400,7 @@ int vmw_host_log(const char *log)
msg_len = strlen(log) + strlen("log ") + 1;
msg = kzalloc(msg_len, GFP_KERNEL);
- if (msg == NULL) {
+ if (!msg) {
DRM_ERROR("Cannot allocate memory for log message\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7d591f653dfa..a96f90f017d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
- if (unlikely(user_bo == NULL)) {
+ if (unlikely(!user_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
}
@@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
}
backup = kzalloc(sizeof(*backup), GFP_KERNEL);
- if (unlikely(backup == NULL))
+ if (unlikely(!backup))
return -ENOMEM;
ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 68f135c5b0d8..9b832f136813 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
}
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
- if (unlikely(ushader == NULL)) {
+ if (unlikely(!ushader)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
@@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
}
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
- if (unlikely(shader == NULL)) {
+ if (unlikely(!shader)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_shader_size);
ret = -ENOMEM;
@@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
/* Allocate and pin a DMA buffer */
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (unlikely(buf == NULL))
+ if (unlikely(!buf))
return -ENOMEM;
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 50be1f034f9e..5284e8d2f7ba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
* something arbitrarily large and we will reject any layout
* that doesn't fit prim_bb_mem later
*/
- dev->mode_config.max_width = 16384;
- dev->mode_config.max_height = 16384;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
}
vmw_kms_create_implicit_placement_property(dev_priv, false);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 2c58a390123a..778272514164 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -186,8 +186,13 @@ static int host1x_probe(struct platform_device *pdev)
return -ENOMEM;
err = iommu_attach_device(host->domain, &pdev->dev);
- if (err)
+ if (err == -ENODEV) {
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+ goto skip_iommu;
+ } else if (err) {
goto fail_free_domain;
+ }
geometry = &host->domain->geometry;
@@ -198,6 +203,7 @@ static int host1x_probe(struct platform_device *pdev)
host->iova_end = geometry->aperture_end;
}
+skip_iommu:
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6fd01a692197..9017dcc14502 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2216,6 +2216,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_ORTEK)
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
#endif
#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3d911bfd91cf..c9ba4c6db74c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -824,6 +824,7 @@
#define USB_VENDOR_ID_ORTEK 0x05a4
#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
+#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
#define USB_VENDOR_ID_PLANTRONICS 0x047f
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 6620f15fec22..8783a064cdcf 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -5,6 +5,7 @@
*
* Ortek PKB-1700
* Ortek WKB-2000
+ * iHome IMAC-A210S
* Skycable wireless presenter
*
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
@@ -28,10 +29,10 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
- hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n");
+ hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n");
rdesc[55] = 0x92;
} else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) {
- hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n");
+ hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n");
rdesc[53] = 0x65;
}
return rdesc;
@@ -40,6 +41,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id ortek_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
{ }
};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 76013eb5cb7f..c008847e0b20 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -680,18 +680,21 @@ static int usbhid_open(struct hid_device *hid)
struct usbhid_device *usbhid = hid->driver_data;
int res;
+ set_bit(HID_OPENED, &usbhid->iofl);
+
if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
return 0;
res = usb_autopm_get_interface(usbhid->intf);
/* the device must be awake to reliably request remote wakeup */
- if (res < 0)
+ if (res < 0) {
+ clear_bit(HID_OPENED, &usbhid->iofl);
return -EIO;
+ }
usbhid->intf->needs_remote_wakeup = 1;
set_bit(HID_RESUME_RUNNING, &usbhid->iofl);
- set_bit(HID_OPENED, &usbhid->iofl);
set_bit(HID_IN_POLLING, &usbhid->iofl);
res = hid_start_in(hid);
@@ -727,19 +730,20 @@ static void usbhid_close(struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
- return;
-
/*
* Make sure we don't restart data acquisition due to
* a resumption we no longer care about by avoiding racing
* with hid_start_in().
*/
spin_lock_irq(&usbhid->lock);
- clear_bit(HID_IN_POLLING, &usbhid->iofl);
clear_bit(HID_OPENED, &usbhid->iofl);
+ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL))
+ clear_bit(HID_IN_POLLING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
+ if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
+ return;
+
hid_cancel_delayed_stuff(usbhid);
usb_kill_urb(usbhid->urbin);
usbhid->intf->needs_remote_wakeup = 0;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index e9bf0bb87ac4..e57cc40cb768 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -606,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out:
+ /* re-enable tasklet for use on re-open */
+ tasklet_enable(&channel->callback_event);
return ret;
}
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0af7fd311979..76c34f4fde13 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -566,6 +566,8 @@ static int applesmc_init_smcreg_try(void)
if (ret)
return ret;
s->fan_count = tmp[0];
+ if (s->fan_count > 10)
+ s->fan_count = 10;
ret = applesmc_get_lower_bound(&s->temp_begin, "T");
if (ret)
@@ -811,7 +813,8 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
char newkey[5];
u8 buffer[2];
- sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
+ scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)],
+ to_index(attr));
ret = applesmc_read_key(newkey, buffer, 2);
speed = ((buffer[0] << 8 | buffer[1]) >> 2);
@@ -834,7 +837,8 @@ static ssize_t applesmc_store_fan_speed(struct device *dev,
if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
return -EINVAL; /* Bigger than a 14-bit value */
- sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
+ scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)],
+ to_index(attr));
buffer[0] = (speed >> 6) & 0xff;
buffer[1] = (speed << 2) & 0xff;
@@ -903,7 +907,7 @@ static ssize_t applesmc_show_fan_position(struct device *dev,
char newkey[5];
u8 buffer[17];
- sprintf(newkey, FAN_ID_FMT, to_index(attr));
+ scnprintf(newkey, sizeof(newkey), FAN_ID_FMT, to_index(attr));
ret = applesmc_read_key(newkey, buffer, 16);
buffer[16] = 0;
@@ -1116,7 +1120,8 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
}
for (i = 0; i < num; i++) {
node = &grp->nodes[i];
- sprintf(node->name, grp->format, i + 1);
+ scnprintf(node->name, sizeof(node->name), grp->format,
+ i + 1);
node->sda.index = (grp->option << 16) | (i & 0xffff);
node->sda.dev_attr.show = grp->show;
node->sda.dev_attr.store = grp->store;
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
index 0e05f75934c9..1858e3ce3993 100644
--- a/drivers/ide/ide-timings.c
+++ b/drivers/ide/ide-timings.c
@@ -104,19 +104,19 @@ u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1)
-#define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0)
+#define EZ(v, unit) ((v) ? ENOUGH((v) * 1000, unit) : 0)
static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
int T, int UT)
{
- q->setup = EZ(t->setup * 1000, T);
- q->act8b = EZ(t->act8b * 1000, T);
- q->rec8b = EZ(t->rec8b * 1000, T);
- q->cyc8b = EZ(t->cyc8b * 1000, T);
- q->active = EZ(t->active * 1000, T);
- q->recover = EZ(t->recover * 1000, T);
- q->cycle = EZ(t->cycle * 1000, T);
- q->udma = EZ(t->udma * 1000, UT);
+ q->setup = EZ(t->setup, T);
+ q->act8b = EZ(t->act8b, T);
+ q->rec8b = EZ(t->rec8b, T);
+ q->cyc8b = EZ(t->cyc8b, T);
+ q->active = EZ(t->active, T);
+ q->recover = EZ(t->recover, T);
+ q->cycle = EZ(t->cycle, T);
+ q->udma = EZ(t->udma, UT);
}
void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 11aff923b633..0eb393237ba2 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1033,6 +1033,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
} else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
qp_attr_mask);
+ qp_attr->port_num = id_priv->id.port_num;
+ *qp_attr_mask |= IB_QP_PORT;
} else
ret = -ENOSYS;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3f55d18a3791..2c98533a0203 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1296,7 +1296,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
struct ib_cq *cq;
struct ib_ucq_object *obj;
- struct ib_uverbs_event_queue *ev_queue;
int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1313,7 +1312,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
*/
uverbs_uobject_get(uobj);
cq = uobj->object;
- ev_queue = cq->cq_context;
obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
memset(&resp, 0, sizeof(resp));
@@ -1935,7 +1933,8 @@ static int modify_qp(struct ib_uverbs_file *file,
goto out;
}
- if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) {
+ if ((cmd->base.attr_mask & IB_QP_PORT) &&
+ !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
ret = -EINVAL;
goto release_qp;
}
@@ -2088,7 +2087,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_qp cmd;
struct ib_uverbs_destroy_qp_resp resp;
struct ib_uobject *uobj;
- struct ib_qp *qp;
struct ib_uqp_object *obj;
int ret = -EINVAL;
@@ -2102,7 +2100,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- qp = uobj->object;
obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
/*
* Make sure we don't free the memory in remove_commit as we still
@@ -3004,7 +3001,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
{
struct ib_uverbs_ex_destroy_wq cmd = {};
struct ib_uverbs_ex_destroy_wq_resp resp = {};
- struct ib_wq *wq;
struct ib_uobject *uobj;
struct ib_uwq_object *obj;
size_t required_cmd_sz;
@@ -3038,7 +3034,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- wq = uobj->object;
obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
/*
* Make sure we don't free the memory in remove_commit as we still
@@ -3728,10 +3723,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_srq cmd;
struct ib_uverbs_destroy_srq_resp resp;
struct ib_uobject *uobj;
- struct ib_srq *srq;
struct ib_uevent_object *obj;
int ret = -EINVAL;
- enum ib_srq_type srq_type;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
@@ -3741,9 +3734,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- srq = uobj->object;
obj = container_of(uobj, struct ib_uevent_object, uobject);
- srq_type = srq->srq_type;
/*
* Make sure we don't free the memory in remove_commit as we still
* needs the uobject memory to create the response.
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 08772836fded..85527532c49d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -51,6 +51,8 @@
#define BNXT_RE_PAGE_SIZE_8M BIT(23)
#define BNXT_RE_PAGE_SIZE_1G BIT(30)
+#define BNXT_RE_MAX_MR_SIZE BIT(30)
+
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
@@ -60,6 +62,13 @@
#define BNXT_RE_RQ_WQE_THRESHOLD 32
+/*
+ * Setting the default ack delay value to 16, which means
+ * the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
+ */
+
+#define BNXT_RE_DEFAULT_ACK_DELAY 16
+
struct bnxt_re_work {
struct work_struct work;
unsigned long event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index c7bd68311d0c..f0e01b3ac711 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -145,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
bnxt_qplib_get_guid(rdev->netdev->dev_addr,
(u8 *)&ib_attr->sys_image_guid);
- ib_attr->max_mr_size = ~0ull;
- ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K |
- BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
- BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
+ ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
+ ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -174,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_mr = dev_attr->max_mr;
ib_attr->max_pd = dev_attr->max_pd;
ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
- ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom;
- ib_attr->atomic_cap = IB_ATOMIC_HCA;
- ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
+ ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
+ if (dev_attr->is_atomic) {
+ ib_attr->atomic_cap = IB_ATOMIC_HCA;
+ ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
+ }
ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0;
@@ -201,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
ib_attr->max_pkeys = 1;
- ib_attr->local_ca_ack_delay = 0;
+ ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
return 0;
}
@@ -390,15 +390,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
return -EINVAL;
ctx->refcnt--;
if (!ctx->refcnt) {
- rc = bnxt_qplib_del_sgid
- (sgid_tbl,
- &sgid_tbl->tbl[ctx->idx], true);
- if (rc)
+ rc = bnxt_qplib_del_sgid(sgid_tbl,
+ &sgid_tbl->tbl[ctx->idx],
+ true);
+ if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc);
- ctx_tbl = sgid_tbl->ctx;
- ctx_tbl[ctx->idx] = NULL;
- kfree(ctx);
+ } else {
+ ctx_tbl = sgid_tbl->ctx;
+ ctx_tbl[ctx->idx] = NULL;
+ kfree(ctx);
+ }
}
} else {
return -EINVAL;
@@ -588,10 +590,10 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
/* Create a fence MW only for kernel consumers */
mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
- if (!mw) {
+ if (IS_ERR(mw)) {
dev_err(rdev_to_dev(rdev),
"Failed to create fence-MW for PD: %p\n", pd);
- rc = -EINVAL;
+ rc = PTR_ERR(mw);
goto fail;
}
fence->mw = mw;
@@ -612,30 +614,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
int rc;
bnxt_re_destroy_fence_mr(pd);
- if (ib_pd->uobject && pd->dpi.dbr) {
- struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
- struct bnxt_re_ucontext *ucntx;
- /* Free DPI only if this is the first PD allocated by the
- * application and mark the context dpi as NULL
- */
- ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
-
- rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
- &rdev->qplib_res.dpi_tbl,
- &pd->dpi);
+ if (pd->qplib_pd.id) {
+ rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
+ &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd);
if (rc)
- dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI");
- /* Don't fail, continue*/
- ucntx->dpi = NULL;
- }
-
- rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
- &rdev->qplib_res.pd_tbl,
- &pd->qplib_pd);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
- return rc;
+ dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
}
kfree(pd);
@@ -667,23 +652,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
if (udata) {
struct bnxt_re_pd_resp resp;
- if (!ucntx->dpi) {
+ if (!ucntx->dpi.dbr) {
/* Allocate DPI in alloc_pd to avoid failing of
* ibv_devinfo and family of application when DPIs
* are depleted.
*/
if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
- &pd->dpi, ucntx)) {
+ &ucntx->dpi, ucntx)) {
rc = -ENOMEM;
goto dbfail;
}
- ucntx->dpi = &pd->dpi;
}
resp.pdid = pd->qplib_pd.id;
/* Still allow mapping this DBR to the new user PD. */
- resp.dpi = ucntx->dpi->dpi;
- resp.dbr = (u64)ucntx->dpi->umdbr;
+ resp.dpi = ucntx->dpi.dpi;
+ resp.dbr = (u64)ucntx->dpi.umdbr;
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
@@ -960,7 +944,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->rq.nmap = umem->nmap;
}
- qplib_qp->dpi = cntx->dpi;
+ qplib_qp->dpi = &cntx->dpi;
return 0;
rqfail:
ib_umem_release(qp->sumem);
@@ -1530,13 +1514,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
- qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic;
+ /* Cap the max_rd_atomic to device max */
+ qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
+ dev_attr->max_qp_rd_atom);
}
if (qp_attr_mask & IB_QP_SQ_PSN) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
qp->qplib_qp.sq.psn = qp_attr->sq_psn;
}
if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (qp_attr->max_dest_rd_atomic >
+ dev_attr->max_qp_init_rd_atom) {
+ dev_err(rdev_to_dev(rdev),
+ "max_dest_rd_atomic requested%d is > dev_max%d",
+ qp_attr->max_dest_rd_atomic,
+ dev_attr->max_qp_init_rd_atom);
+ return -EINVAL;
+ }
+
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
@@ -2403,7 +2398,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
}
cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
cq->qplib_cq.nmap = cq->umem->nmap;
- cq->qplib_cq.dpi = uctx->dpi;
+ cq->qplib_cq.dpi = &uctx->dpi;
} else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
@@ -2905,6 +2900,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->cq_lock, flags);
budget = min_t(u32, num_entries, cq->max_cql);
+ num_entries = budget;
if (!cq->cql) {
dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
goto exit;
@@ -3031,6 +3027,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
else if (ib_cqn_flags & IB_CQ_SOLICITED)
type = DBR_DBR_TYPE_CQ_ARMSE;
+ /* Poll to see if there are missed events */
+ if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
+ return 1;
+
bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
return 0;
@@ -3245,6 +3246,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct scatterlist *sg;
int entry;
+ if (length > BNXT_RE_MAX_MR_SIZE) {
+ dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
+ length, BNXT_RE_MAX_MR_SIZE);
+ return ERR_PTR(-ENOMEM);
+ }
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -3388,8 +3395,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
struct bnxt_re_ucontext,
ib_uctx);
+
+ struct bnxt_re_dev *rdev = uctx->rdev;
+ int rc = 0;
+
if (uctx->shpg)
free_page((unsigned long)uctx->shpg);
+
+ if (uctx->dpi.dbr) {
+ /* Free DPI only if this is the first PD allocated by the
+ * application and mark the context dpi as NULL
+ */
+ rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ &rdev->qplib_res.dpi_tbl,
+ &uctx->dpi);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
+ /* Don't fail, continue*/
+ uctx->dpi.dbr = NULL;
+ }
+
kfree(uctx);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 6c160f6a5398..a0bb7e33d7ca 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -59,7 +59,6 @@ struct bnxt_re_pd {
struct bnxt_re_dev *rdev;
struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd;
- struct bnxt_qplib_dpi dpi;
struct bnxt_re_fence_data fence;
};
@@ -127,7 +126,7 @@ struct bnxt_re_mw {
struct bnxt_re_ucontext {
struct bnxt_re_dev *rdev;
struct ib_ucontext ib_uctx;
- struct bnxt_qplib_dpi *dpi;
+ struct bnxt_qplib_dpi dpi;
void *shpg;
spinlock_t sh_lock; /* protect shpg */
};
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 1fce5e73216b..ceae2d92fb08 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -333,6 +333,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map);
+ req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index f05500bcdcf1..9af1514e5944 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1128,6 +1128,11 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
}
/* Each SGE entry = 1 WQE size16 */
wqe_size16 = wqe->num_sge;
+ /* HW requires wqe size has room for atleast one SGE even if
+ * none was supplied by ULP
+ */
+ if (!wqe->num_sge)
+ wqe_size16++;
}
/* Specifics */
@@ -1364,6 +1369,11 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
rqe->flags = wqe->flags;
rqe->wqe_size = wqe->num_sge +
((offsetof(typeof(*rqe), data) + 15) >> 4);
+ /* HW requires wqe size has room for atleast one SGE even if none
+ * was supplied by ULP
+ */
+ if (!wqe->num_sge)
+ rqe->wqe_size++;
/* Supply the rqe->wr_id index to the wr_id_tbl for now */
rqe->wr_id[0] = cpu_to_le32(sw_prod);
@@ -1885,6 +1895,25 @@ flush_rq:
return rc;
}
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
+{
+ struct cq_base *hw_cqe, **hw_cqe_ptr;
+ unsigned long flags;
+ u32 sw_cons, raw_cons;
+ bool rc = true;
+
+ spin_lock_irqsave(&cq->hwq.lock, flags);
+ raw_cons = cq->hwq.cons;
+ sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
+ hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
+ hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
+
+ /* Check for Valid bit. If the CQE is valid, return false */
+ rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
+ spin_unlock_irqrestore(&cq->hwq.lock, flags);
+ return rc;
+}
+
static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
struct cq_res_raweth_qp1 *hwcqe,
struct bnxt_qplib_cqe **pcqe,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 36b7b7db0e3f..19176e06c98a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -449,6 +449,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num, struct bnxt_qplib_qp **qp);
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index fde18cf0e406..ef91ab786dd4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -51,6 +51,19 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 } };
/* Device */
+
+static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
+{
+ int rc;
+ u16 pcie_ctl2;
+
+ rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
+ &pcie_ctl2);
+ if (rc)
+ return false;
+ return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
+
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr)
{
@@ -81,6 +94,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
+ /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
+ attr->max_qp += 1;
attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -129,6 +144,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
}
+ attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index a543f959098b..2ce7e2a32cf0 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -42,6 +42,8 @@
#define BNXT_QPLIB_RESERVED_QP_WRS 128
+#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
+
struct bnxt_qplib_dev_attr {
char fw_ver[32];
u16 max_sgid;
@@ -70,6 +72,7 @@ struct bnxt_qplib_dev_attr {
u32 max_inline_data;
u32 l2_db_size;
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
+ bool is_atomic;
};
struct bnxt_qplib_pd {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 29d30744d6c9..0cd0c1fa27d4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -718,7 +718,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
struct iwch_mr *mhp;
u32 mmid;
u32 stag = 0;
- int ret = 0;
+ int ret = -ENOMEM;
if (mr_type != IB_MR_TYPE_MEM_REG ||
max_num_sg > T3_MAX_FASTREG_DEPTH)
@@ -731,10 +731,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
goto err;
mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
- if (!mhp->pages) {
- ret = -ENOMEM;
+ if (!mhp->pages)
goto pl_err;
- }
mhp->rhp = rhp;
ret = iwch_alloc_pbl(mhp, max_num_sg);
@@ -751,7 +749,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
mhp->attr.state = 1;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
+ ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+ if (ret)
goto err3;
pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e16fcaf6b5a3..be07da1997e6 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -963,6 +963,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
goto err3;
if (ucontext) {
+ ret = -ENOMEM;
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm)
goto err4;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bfc77596acbe..cb7fc0d35d1d 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -569,7 +569,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{
if (wr->num_sge > 1)
return -EINVAL;
- if (wr->num_sge) {
+ if (wr->num_sge && wr->sg_list[0].length) {
wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
>> 32));
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index da2eb5a281fa..9b1566468744 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
+void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
void i40iw_rem_devusecount(struct i40iw_device *iwdev);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 6ae98aa7f74e..5a2fa743676c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -3487,7 +3487,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
(original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
(last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
- (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
+ (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
+ iwdev->reset)) {
issue_close = 1;
iwqp->cm_id = NULL;
if (!iwqp->flush_issued) {
@@ -4265,6 +4266,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
attr.qp_state = IB_QPS_ERR;
i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (iwdev->reset)
+ i40iw_cm_disconn(cm_node->iwqp);
i40iw_rem_ref_cm_node(cm_node);
}
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index a027e2072477..9ec1ae9a82c9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1970,6 +1970,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
}
+ cqp->process_cqp_sds = i40iw_update_sds_noccq;
+
return ret_code;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index e0f47cc2effc..ae8463ff59a7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -243,6 +243,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
if (free_hwcqp)
dev->cqp_ops->cqp_destroy(dev->cqp);
+ i40iw_cleanup_pending_cqp_op(iwdev);
+
i40iw_free_dma_mem(dev->hw, &cqp->sq);
kfree(cqp->scratch_array);
iwdev->cqp.scratch_array = NULL;
@@ -274,13 +276,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
/**
* i40iw_destroy_aeq - destroy aeq
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Issue a destroy aeq request and
* free the resources associated with the aeq
* The function is called during driver unload
*/
-static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
+static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
{
enum i40iw_status_code status = I40IW_ERR_NOT_READY;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -288,7 +289,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
if (!iwdev->msix_shared)
i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
- if (reset)
+ if (iwdev->reset)
goto exit;
if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
@@ -304,19 +305,17 @@ exit:
* i40iw_destroy_ceq - destroy ceq
* @iwdev: iwarp device
* @iwceq: ceq to be destroyed
- * @reset: true if called before reset
*
* Issue a destroy ceq request and
* free the resources associated with the ceq
*/
static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
- struct i40iw_ceq *iwceq,
- bool reset)
+ struct i40iw_ceq *iwceq)
{
enum i40iw_status_code status;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- if (reset)
+ if (iwdev->reset)
goto exit;
status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
@@ -335,12 +334,11 @@ exit:
/**
* i40iw_dele_ceqs - destroy all ceq's
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Go through all of the device ceq's and for each ceq
* disable the ceq interrupt and destroy the ceq
*/
-static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
+static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
{
u32 i = 0;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -349,32 +347,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
if (iwdev->msix_shared) {
i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
- i40iw_destroy_ceq(iwdev, iwceq, reset);
+ i40iw_destroy_ceq(iwdev, iwceq);
iwceq++;
i++;
}
for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
- i40iw_destroy_ceq(iwdev, iwceq, reset);
+ i40iw_destroy_ceq(iwdev, iwceq);
}
}
/**
* i40iw_destroy_ccq - destroy control cq
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Issue destroy ccq request and
* free the resources associated with the ccq
*/
-static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
+static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
{
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_ccq *ccq = &iwdev->ccq;
enum i40iw_status_code status = 0;
- if (!reset)
+ if (!iwdev->reset)
status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
if (status)
i40iw_pr_err("ccq destroy failed %d\n", status);
@@ -810,7 +807,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
iwceq->msix_idx = msix_vec->idx;
status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
if (status) {
- i40iw_destroy_ceq(iwdev, iwceq, false);
+ i40iw_destroy_ceq(iwdev, iwceq);
break;
}
i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
@@ -912,7 +909,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
status = i40iw_configure_aeq_vector(iwdev);
if (status) {
- i40iw_destroy_aeq(iwdev, false);
+ i40iw_destroy_aeq(iwdev);
return status;
}
@@ -1442,12 +1439,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
/**
* i40iw_deinit_device - clean up the device resources
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
* destroy the device queues and free the pble and the hmc objects
*/
-static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+static void i40iw_deinit_device(struct i40iw_device *iwdev)
{
struct i40e_info *ldev = iwdev->ldev;
@@ -1464,7 +1460,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
i40iw_destroy_rdma_device(iwdev->iwibdev);
/* fallthrough */
case IP_ADDR_REGISTERED:
- if (!reset)
+ if (!iwdev->reset)
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
/* fallthrough */
case INET_NOTIFIER:
@@ -1474,26 +1470,26 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
}
/* fallthrough */
+ case PBLE_CHUNK_MEM:
+ i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+ /* fallthrough */
case CEQ_CREATED:
- i40iw_dele_ceqs(iwdev, reset);
+ i40iw_dele_ceqs(iwdev);
/* fallthrough */
case AEQ_CREATED:
- i40iw_destroy_aeq(iwdev, reset);
+ i40iw_destroy_aeq(iwdev);
/* fallthrough */
case IEQ_CREATED:
- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
/* fallthrough */
case ILQ_CREATED:
- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
/* fallthrough */
case CCQ_CREATED:
- i40iw_destroy_ccq(iwdev, reset);
- /* fallthrough */
- case PBLE_CHUNK_MEM:
- i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+ i40iw_destroy_ccq(iwdev);
/* fallthrough */
case HMC_OBJS_CREATED:
- i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
+ i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
/* fallthrough */
case CQP_CREATED:
i40iw_destroy_cqp(iwdev, true);
@@ -1670,6 +1666,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
if (status)
break;
+ iwdev->init_state = PBLE_CHUNK_MEM;
iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
i40iw_register_notifiers();
iwdev->init_state = INET_NOTIFIER;
@@ -1693,7 +1690,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
} while (0);
i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
- i40iw_deinit_device(iwdev, false);
+ i40iw_deinit_device(iwdev);
return -ERESTART;
}
@@ -1774,9 +1771,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
iwdev = &hdl->device;
iwdev->closing = true;
+ if (reset)
+ iwdev->reset = true;
+
i40iw_cm_disconnect_all(iwdev);
destroy_workqueue(iwdev->virtchnl_wq);
- i40iw_deinit_device(iwdev, reset);
+ i40iw_deinit_device(iwdev);
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index db41ab40da9c..71050c5d29a0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
set_64bit_val(wqe, 0, info->paddr);
set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
set_64bit_val(wqe, 16, header[0]);
+
+ /* Ensure all data is written before writing valid bit */
+ wmb();
set_64bit_val(wqe, 24, header[1]);
i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
@@ -1411,10 +1414,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
if (!list_empty(rxlist)) {
tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
- plist = &tmpbuf->list;
while ((struct list_head *)tmpbuf != rxlist) {
if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
break;
+ plist = &tmpbuf->list;
tmpbuf = (struct i40iw_puda_buf *)plist->next;
}
/* Insert buf before tmpbuf */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 56d986924a4c..e311ec559f4e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait
*/
void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
{
+ struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
unsigned long flags;
if (cqp_request->dynamic) {
@@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp
list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
spin_unlock_irqrestore(&cqp->req_lock, flags);
}
+ wake_up(&iwdev->close_wq);
}
/**
@@ -365,6 +367,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
}
/**
+ * i40iw_free_pending_cqp_request -free pending cqp request objs
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
+ struct i40iw_cqp_request *cqp_request)
+{
+ struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
+
+ if (cqp_request->waiting) {
+ cqp_request->compl_info.error = true;
+ cqp_request->request_done = true;
+ wake_up(&cqp_request->waitq);
+ }
+ i40iw_put_cqp_request(cqp, cqp_request);
+ wait_event_timeout(iwdev->close_wq,
+ !atomic_read(&cqp_request->refcount),
+ 1000);
+}
+
+/**
+ * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
+ * @iwdev: iwarp device
+ */
+void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_cqp *cqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request = NULL;
+ struct cqp_commands_info *pcmdinfo = NULL;
+ u32 i, pending_work, wqe_idx;
+
+ pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
+ wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
+ for (i = 0; i < pending_work; i++) {
+ cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
+ if (cqp_request)
+ i40iw_free_pending_cqp_request(cqp, cqp_request);
+ wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
+ }
+
+ while (!list_empty(&dev->cqp_cmd_head)) {
+ pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
+ cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
+ if (cqp_request)
+ i40iw_free_pending_cqp_request(cqp, cqp_request);
+ }
+}
+
+/**
* i40iw_free_qp - callback after destroy cqp completes
* @cqp_request: cqp request for destroy qp
* @num: not used
@@ -546,8 +598,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.qp_destroy.remove_hash_idx = true;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Destroy QP fail");
+ if (!status)
+ return;
+
+ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+ i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+ i40iw_rem_devusecount(iwdev);
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4dbe61ec7a77..02d871db7ca5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp,
u32 qp_num)
{
+ struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
+
i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
if (qp_num)
i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
+ if (iwpbl->pbl_allocated)
+ i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
kfree(iwqp->kqp.wrid_mem);
@@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp,
struct i40iw_qp_init_info *init_info)
{
- struct i40iw_pbl *iwpbl = iwqp->iwpbl;
+ struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
iwqp->page = qpmr->sq_page;
@@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
ucontext = to_ucontext(ibpd->uobject->context);
if (req.user_wqe_buffers) {
+ struct i40iw_pbl *iwpbl;
+
spin_lock_irqsave(
&ucontext->qp_reg_mem_list_lock, flags);
- iwqp->iwpbl = i40iw_get_pbl(
+ iwpbl = i40iw_get_pbl(
(unsigned long)req.user_wqe_buffers,
&ucontext->qp_reg_mem_list);
spin_unlock_irqrestore(
&ucontext->qp_reg_mem_list_lock, flags);
- if (!iwqp->iwpbl) {
+ if (!iwpbl) {
err_code = -ENODATA;
i40iw_pr_err("no pbl info\n");
goto error;
}
+ memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
}
}
err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
@@ -1161,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
memset(&req, 0, sizeof(req));
iwcq->user_mode = true;
ucontext = to_ucontext(context);
- if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
+ if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
+ err_code = -EFAULT;
goto cq_free_resources;
+ }
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
@@ -2063,7 +2072,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
ucontext = to_ucontext(ibpd->uobject->context);
i40iw_del_memlist(iwmr, ucontext);
}
- if (iwpbl->pbl_allocated)
+ if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
i40iw_free_pble(iwdev->pble_rsrc, palloc);
kfree(iwmr);
return 0;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 07c3fec77de6..9067443cd311 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -170,7 +170,7 @@ struct i40iw_qp {
struct i40iw_qp_kmode kqp;
struct i40iw_dma_mem host_ctx;
struct timer_list terminate_timer;
- struct i40iw_pbl *iwpbl;
+ struct i40iw_pbl iwpbl;
struct i40iw_dma_mem q2_ctx_mem;
struct i40iw_dma_mem ietf_mem;
struct completion sq_drained;
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 1e6c526450d9..fedaf8260105 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -323,6 +323,9 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
sl_cm_id = get_local_comm_id(mad);
+ id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
+ if (id)
+ goto cont;
id = id_map_alloc(ibdev, slave_id, sl_cm_id);
if (IS_ERR(id)) {
mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
@@ -343,6 +346,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
return -EINVAL;
}
+cont:
set_local_comm_id(mad, id->pv_cm_id);
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8ab2f1360a45..2c40a2e989d2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -835,7 +835,7 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
access_flags, 0);
err = PTR_ERR_OR_ZERO(*umem);
if (err < 0) {
- mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
+ mlx5_ib_err(dev, "umem get failed (%d)\n", err);
return err;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 2f30bda8457a..27d5e8d9f08d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -744,7 +744,8 @@ err:
if (is_uctx_pd) {
ocrdma_release_ucontext_pd(uctx);
} else {
- status = _ocrdma_dealloc_pd(dev, pd);
+ if (_ocrdma_dealloc_pd(dev, pd))
+ pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
}
exit:
return ERR_PTR(status);
@@ -1901,6 +1902,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
goto err;
if (udata == NULL) {
+ status = -ENOMEM;
srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
GFP_KERNEL);
if (srq->rqe_wr_id_tbl == NULL)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 548e4d1e998f..2ae71b8f1ba8 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -53,6 +53,14 @@
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
+ size_t len)
+{
+ size_t min_len = min_t(size_t, len, udata->outlen);
+
+ return ib_copy_to_udata(udata, src, min_len);
+}
+
int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
if (index > QEDR_ROCE_PKEY_TABLE_LEN)
@@ -378,7 +386,7 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
uresp.max_cqes = QEDR_MAX_CQES;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
goto err;
@@ -499,7 +507,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
uresp.pd_id = pd_id;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
@@ -729,7 +737,7 @@ static int qedr_copy_cq_uresp(struct qedr_dev *dev,
uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
uresp.icid = cq->icid;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
@@ -1238,7 +1246,7 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
uresp.qp_id = qp->qp_id;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
DP_ERR(dev,
"create qp: failed a copy to user space with qp icid=0x%x.\n",
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 459865439a0b..8876ee7bc326 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1258,9 +1258,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_TIMEOUT) {
qp->timeout = attr->timeout;
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
+ qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
}
if (attr_mask & IB_QP_QKEY)
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index be944d5aa9af..a958ee918a49 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1219,6 +1219,9 @@ void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
kfree_skb(skb);
}
+ if (notify)
+ return;
+
while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
advance_consumer(qp->rq.queue);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 07511718d98d..af90a7d42b96 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -914,6 +914,9 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags);
+ if (qp->resp.state == QP_STATE_ERROR)
+ rxe_run_task(&qp->resp.task, 1);
+
err1:
return err;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 70dacaf9044e..4ce315c92b48 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2239,6 +2239,7 @@ static struct net_device *ipoib_add_port(const char *format,
goto register_failed;
}
+ result = -ENOMEM;
if (ipoib_cm_add_mode_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(priv->dev))
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 12ed62ce9ff7..2a07692007bd 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -137,8 +137,10 @@ iser_prepare_write_cmd(struct iscsi_task *task,
if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV;
- hdr->write_stag = cpu_to_be32(mem_reg->rkey);
- hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+ if (buf_out->data_len > imm_sz) {
+ hdr->write_stag = cpu_to_be32(mem_reg->rkey);
+ hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+ }
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
"VA:%#llX + unsol:%d\n",
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index dad85e74c37c..3aae015469a5 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -71,7 +71,7 @@ static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base,
static int __init digicolor_of_init(struct device_node *node,
struct device_node *parent)
{
- static void __iomem *reg_base;
+ void __iomem *reg_base;
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct regmap *ucregs;
int ret;
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index 54c296401525..18d58d2b4ffe 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -43,7 +43,7 @@ static const struct of_device_id syscon_pldset_of_match[] = {
static int __init
realview_gic_of_init(struct device_node *node, struct device_node *parent)
{
- static struct regmap *map;
+ struct regmap *map;
struct device_node *np;
const struct of_device_id *gic_id;
u32 pld1_ctrl;
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 0a8ed1c05518..14461cbfab2f 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -154,7 +154,7 @@ asmlinkage void __weak plat_irq_dispatch(void)
static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
- static struct irq_chip *chip;
+ struct irq_chip *chip;
if (hw < 2 && cpu_has_mipsmt) {
/* Software interrupts are used for MT/CMT IPI */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 832ebf4062f7..6ab1d3afec02 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -950,7 +950,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
&gic_irq_domain_ops, NULL);
if (!gic_irq_domain)
panic("Failed to add GIC IRQ domain");
- gic_irq_domain->name = "mips-gic-irq";
gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
IRQ_DOMAIN_FLAG_IPI_PER_CPU,
@@ -959,7 +958,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
if (!gic_ipi_domain)
panic("Failed to add GIC IPI domain");
- gic_ipi_domain->name = "mips-gic-ipi";
irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
if (node &&
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 5ecc154f6831..9bc32578a766 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -657,7 +657,7 @@ try:
* be directed to disk.
*/
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter)
+ struct ppa_addr ppa, int bio_iter, bool advanced_bio)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entry;
@@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
* filled with data from the cache). If part of the data resides on the
* media, we will read later on
*/
- if (unlikely(!bio->bi_iter.bi_idx))
+ if (unlikely(!advanced_bio))
bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
data = bio_data(bio);
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 4e5c48f3de62..d682e89e6493 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -26,7 +26,7 @@
*/
static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
sector_t lba, struct ppa_addr ppa,
- int bio_iter)
+ int bio_iter, bool advanced_bio)
{
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a cache address */
@@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
BUG_ON(!pblk_addr_in_cache(ppa));
#endif
- return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter);
+ return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
+ bio_iter, advanced_bio);
}
static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
sector_t blba = pblk_get_lba(bio);
int nr_secs = rqd->nr_ppas;
- int advanced_bio = 0;
+ bool advanced_bio = false;
int i, j = 0;
/* logic error: lba out-of-bounds. Ignore read request */
@@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
retry:
if (pblk_ppa_empty(p)) {
WARN_ON(test_and_set_bit(i, read_bitmap));
- continue;
+
+ if (unlikely(!advanced_bio)) {
+ bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
+ advanced_bio = true;
+ }
+
+ goto next;
}
/* Try to read from write buffer. The address is later checked
* on the write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(p)) {
- if (!pblk_read_from_cache(pblk, bio, lba, p, i)) {
+ if (!pblk_read_from_cache(pblk, bio, lba, p, i,
+ advanced_bio)) {
pblk_lookup_l2p_seq(pblk, &p, lba, 1);
goto retry;
}
WARN_ON(test_and_set_bit(i, read_bitmap));
- advanced_bio = 1;
+ advanced_bio = true;
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
@@ -83,6 +91,7 @@ retry:
rqd->ppa_list[j++] = p;
}
+next:
if (advanced_bio)
bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
}
@@ -282,7 +291,7 @@ retry:
* write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(ppa)) {
- if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) {
+ if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
goto retry;
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 0c5692cc2f60..67e623bd5c2d 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
struct list_head *list,
unsigned int max);
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter);
+ struct ppa_addr ppa, int bio_iter, bool advanced_bio);
unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 850ff6c67994..44f4a8ac95bd 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1258,8 +1258,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
*/
int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
{
- blk_status_t a;
- int f;
+ int a, f;
unsigned long buffers_processed = 0;
struct dm_buffer *b, *tmp;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 1b224aa9cf15..3acce09bba35 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1587,16 +1587,18 @@ retry:
if (likely(ic->mode == 'J')) {
if (dio->write) {
unsigned next_entry, i, pos;
- unsigned ws, we;
+ unsigned ws, we, range_sectors;
- dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors);
+ dio->range.n_sectors = min(dio->range.n_sectors,
+ ic->free_sectors << ic->sb->log2_sectors_per_block);
if (unlikely(!dio->range.n_sectors))
goto sleep;
- ic->free_sectors -= dio->range.n_sectors;
+ range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
+ ic->free_sectors -= range_sectors;
journal_section = ic->free_section;
journal_entry = ic->free_section_entry;
- next_entry = ic->free_section_entry + dio->range.n_sectors;
+ next_entry = ic->free_section_entry + range_sectors;
ic->free_section_entry = next_entry % ic->journal_section_entries;
ic->free_section += next_entry / ic->journal_section_entries;
ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
@@ -1727,6 +1729,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
wraparound_section(ic, &ic->free_section);
ic->n_uncommitted_sections++;
}
+ WARN_ON(ic->journal_sections * ic->journal_section_entries !=
+ (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
}
static void integrity_commit(struct work_struct *w)
@@ -1821,6 +1825,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
{
unsigned i, j, n;
struct journal_completion comp;
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
comp.ic = ic;
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
@@ -1945,6 +1952,8 @@ skip_io:
dm_bufio_write_dirty_buffers_async(ic->bufio);
+ blk_finish_plug(&plug);
+
complete_journal_op(&comp);
wait_for_completion_io(&comp.comp);
@@ -3019,6 +3028,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Block size doesn't match the information in superblock";
goto bad;
}
+ if (!le32_to_cpu(ic->sb->journal_sections)) {
+ r = -EINVAL;
+ ti->error = "Corrupted superblock, journal_sections is 0";
+ goto bad;
+ }
/* make sure that ti->max_io_len doesn't overflow */
if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2e10c2f13a34..5bfe285ea9d1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -208,6 +208,7 @@ struct raid_dev {
#define RT_FLAG_RS_BITMAP_LOADED 2
#define RT_FLAG_UPDATE_SBS 3
#define RT_FLAG_RESHAPE_RS 4
+#define RT_FLAG_RS_SUSPENDED 5
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -564,9 +565,10 @@ static const char *raid10_md_layout_to_format(int layout)
if (__raid10_near_copies(layout) > 1)
return "near";
- WARN_ON(__raid10_far_copies(layout) < 2);
+ if (__raid10_far_copies(layout) > 1)
+ return "far";
- return "far";
+ return "unknown";
}
/* Return md raid10 algorithm for @name */
@@ -2540,11 +2542,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (!freshest)
return 0;
- if (validate_raid_redundancy(rs)) {
- rs->ti->error = "Insufficient redundancy to activate array";
- return -EINVAL;
- }
-
/*
* Validation of the freshest device provides the source of
* validation for the remaining devices.
@@ -2553,6 +2550,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (super_validate(rs, freshest))
return -EINVAL;
+ if (validate_raid_redundancy(rs)) {
+ rs->ti->error = "Insufficient redundancy to activate array";
+ return -EINVAL;
+ }
+
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
rdev != freshest &&
@@ -3168,6 +3170,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
mddev_suspend(&rs->md);
+ set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
/* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
if (rs_is_raid456(rs)) {
@@ -3625,7 +3628,7 @@ static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- if (!rs->md.suspended)
+ if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_suspend(&rs->md);
rs->md.ro = 1;
@@ -3759,7 +3762,7 @@ static int rs_start_reshape(struct raid_set *rs)
return r;
/* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
- if (mddev->suspended)
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_resume(mddev);
/*
@@ -3786,8 +3789,8 @@ static int rs_start_reshape(struct raid_set *rs)
}
/* Suspend because a resume will happen in raid_resume() */
- if (!mddev->suspended)
- mddev_suspend(mddev);
+ set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
+ mddev_suspend(mddev);
/*
* Now reshape got set up, update superblocks to
@@ -3883,13 +3886,13 @@ static void raid_resume(struct dm_target *ti)
if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- if (mddev->suspended)
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_resume(mddev);
}
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 11, 1},
+ .version = {1, 12, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a39bcd9b982a..28a4071cdf85 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>
+#include <linux/dax.h>
#define DM_MSG_PREFIX "table"
@@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
return false;
}
+static int device_dax_write_cache_enabled(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct dax_device *dax_dev = dev->dax_dev;
+
+ if (!dax_dev)
+ return false;
+
+ if (dax_write_cache_enabled(dax_dev))
+ return true;
+ return false;
+}
+
+static int dm_table_supports_dax_write_cache(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti,
+ device_dax_write_cache_enabled, NULL))
+ return true;
+ }
+
+ return false;
+}
+
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
+ if (dm_table_supports_dax_write_cache(t))
+ dax_write_cache(t->md->dax_dev, true);
+
/* Ensure that all underlying devices are non-rotational. */
if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 504ba3fa328b..e13f90832b6b 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -308,19 +308,14 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{
unsigned n;
- if (!fio->rs) {
- fio->rs = mempool_alloc(v->fec->rs_pool, 0);
- if (unlikely(!fio->rs)) {
- DMERR("failed to allocate RS");
- return -ENOMEM;
- }
- }
+ if (!fio->rs)
+ fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO);
fec_for_each_prealloc_buffer(n) {
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO);
+ fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT);
if (unlikely(!fio->bufs[n])) {
DMERR("failed to allocate FEC buffer");
return -ENOMEM;
@@ -332,22 +327,16 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO);
+ fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT);
/* we can manage with even one buffer if necessary */
if (unlikely(!fio->bufs[n]))
break;
}
fio->nbufs = n;
- if (!fio->output) {
+ if (!fio->output)
fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
- if (!fio->output) {
- DMERR("failed to allocate FEC page");
- return -ENOMEM;
- }
- }
-
return 0;
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 884ff7c170a0..a4fa2ada6883 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
return ret;
}
@@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
return ret;
}
@@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
goto out;
}
@@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
(zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
}
- page = alloc_page(GFP_KERNEL);
+ page = alloc_page(GFP_NOIO);
if (!page)
return -ENOMEM;
@@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
/* Get zone information from disk */
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
- &blkz, &nr_blkz, GFP_KERNEL);
+ &blkz, &nr_blkz, GFP_NOIO);
if (ret) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
@@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
ret = blkdev_reset_zones(dev->bdev,
dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_KERNEL);
+ dev->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
dmz_id(zmd, zone), ret);
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 05c0a126f5c8..44a119e12f1a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
nr_blocks = block - wp_block;
ret = blkdev_issue_zeroout(zrc->dev->bdev,
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
- dmz_blk2sect(nr_blocks), GFP_NOFS, false);
+ dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
if (ret) {
dmz_dev_err(zrc->dev,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 2b538fa817f4..b08bbbd4d902 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
int ret;
/* Create a new chunk work */
- cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS);
+ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
if (!cw)
goto out;
@@ -588,7 +588,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = dev->bdev;
- if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE))
+ if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
return DM_MAPIO_REMAPPED;
/* The BIO should be block aligned */
@@ -603,7 +603,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
- if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) {
+ if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
spin_lock(&dmz->flush_lock);
bio_list_add(&dmz->flush_list, bio);
spin_unlock(&dmz->flush_lock);
@@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Chunk BIO work */
mutex_init(&dmz->chunk_lock);
- INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS);
+ INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
0, dev->name);
if (!dmz->chunk_wq) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8cdca0296749..c99634612fc4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2287,7 +2287,7 @@ static void export_array(struct mddev *mddev)
static bool set_in_sync(struct mddev *mddev)
{
- WARN_ON_ONCE(!spin_is_locked(&mddev->lock));
+ WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
if (!mddev->in_sync) {
mddev->sync_checkers++;
spin_unlock(&mddev->lock);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b50eb4ac1b82..09db03455801 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -731,58 +731,4 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
!bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0;
}
-
-/* Maximum size of each resync request */
-#define RESYNC_BLOCK_SIZE (64*1024)
-#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-
-/* for managing resync I/O pages */
-struct resync_pages {
- unsigned idx; /* for get/put page from the pool */
- void *raid_bio;
- struct page *pages[RESYNC_PAGES];
-};
-
-static inline int resync_alloc_pages(struct resync_pages *rp,
- gfp_t gfp_flags)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++) {
- rp->pages[i] = alloc_page(gfp_flags);
- if (!rp->pages[i])
- goto out_free;
- }
-
- return 0;
-
-out_free:
- while (--i >= 0)
- put_page(rp->pages[i]);
- return -ENOMEM;
-}
-
-static inline void resync_free_pages(struct resync_pages *rp)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++)
- put_page(rp->pages[i]);
-}
-
-static inline void resync_get_all_pages(struct resync_pages *rp)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++)
- get_page(rp->pages[i]);
-}
-
-static inline struct page *resync_fetch_page(struct resync_pages *rp,
- unsigned idx)
-{
- if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
- return NULL;
- return rp->pages[idx];
-}
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
new file mode 100644
index 000000000000..9f2670b45f31
--- /dev/null
+++ b/drivers/md/raid1-10.c
@@ -0,0 +1,81 @@
+/* Maximum size of each resync request */
+#define RESYNC_BLOCK_SIZE (64*1024)
+#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
+
+/* for managing resync I/O pages */
+struct resync_pages {
+ void *raid_bio;
+ struct page *pages[RESYNC_PAGES];
+};
+
+static inline int resync_alloc_pages(struct resync_pages *rp,
+ gfp_t gfp_flags)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++) {
+ rp->pages[i] = alloc_page(gfp_flags);
+ if (!rp->pages[i])
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ while (--i >= 0)
+ put_page(rp->pages[i]);
+ return -ENOMEM;
+}
+
+static inline void resync_free_pages(struct resync_pages *rp)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++)
+ put_page(rp->pages[i]);
+}
+
+static inline void resync_get_all_pages(struct resync_pages *rp)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++)
+ get_page(rp->pages[i]);
+}
+
+static inline struct page *resync_fetch_page(struct resync_pages *rp,
+ unsigned idx)
+{
+ if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
+ return NULL;
+ return rp->pages[idx];
+}
+
+/*
+ * 'strct resync_pages' stores actual pages used for doing the resync
+ * IO, and it is per-bio, so make .bi_private points to it.
+ */
+static inline struct resync_pages *get_resync_pages(struct bio *bio)
+{
+ return bio->bi_private;
+}
+
+/* generally called after bio_reset() for reseting bvec */
+static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
+ int size)
+{
+ int idx = 0;
+
+ /* initialize bvec table again */
+ do {
+ struct page *page = resync_fetch_page(rp, idx);
+ int len = min_t(int, size, PAGE_SIZE);
+
+ /*
+ * won't fail because the vec table is big
+ * enough to hold all these pages
+ */
+ bio_add_page(bio, page, len, 0);
+ size -= len;
+ } while (idx++ < RESYNC_PAGES && size > 0);
+}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3febfc8391fb..f50958ded9f0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
#define raid1_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
-/*
- * 'strct resync_pages' stores actual pages used for doing the resync
- * IO, and it is per-bio, so make .bi_private points to it.
- */
-static inline struct resync_pages *get_resync_pages(struct bio *bio)
-{
- return bio->bi_private;
-}
+#include "raid1-10.c"
/*
* for resync bio, r1bio pointer can be retrieved from the per-bio
@@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_get_all_pages(rp);
}
- rp->idx = 0;
rp->raid_bio = r1_bio;
bio->bi_private = rp;
}
@@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio)
}
if (behind) {
- /* we release behind master bio when all write are done */
- if (r1_bio->behind_master_bio == bio)
- to_put = NULL;
-
if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
@@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
@@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf)
wake_up(&conf->wait_barrier);
}
-static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
+static void alloc_behind_master_bio(struct r1bio *r1_bio,
struct bio *bio)
{
int size = bio->bi_iter.bi_size;
@@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
if (!behind_bio)
- goto fail;
+ return;
/* discard op, we don't support writezero/writesame yet */
- if (!bio_has_data(bio))
+ if (!bio_has_data(bio)) {
+ behind_bio->bi_iter.bi_size = size;
goto skip_copy;
+ }
while (i < vcnt && size) {
struct page *page;
@@ -1123,14 +1112,13 @@ skip_copy:
r1_bio->behind_master_bio = behind_bio;;
set_bit(R1BIO_BehindIO, &r1_bio->state);
- return behind_bio;
+ return;
free_pages:
pr_debug("%dB behind alloc failed, doing sync I/O\n",
bio->bi_iter.bi_size);
bio_free_pages(behind_bio);
-fail:
- return behind_bio;
+ bio_put(behind_bio);
}
struct raid1_plug_cb {
@@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
(atomic_read(&bitmap->behind_writes)
< mddev->bitmap_info.max_write_behind) &&
!waitqueue_active(&bitmap->behind_wait)) {
- mbio = alloc_behind_master_bio(r1_bio, bio);
+ alloc_behind_master_bio(r1_bio, bio);
}
bitmap_startwrite(bitmap, r1_bio->sector,
@@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
first_clone = 0;
}
- if (!mbio) {
- if (r1_bio->behind_master_bio)
- mbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO,
- mddev->bio_set);
- else
- mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
- }
+ if (r1_bio->behind_master_bio)
+ mbio = bio_clone_fast(r1_bio->behind_master_bio,
+ GFP_NOIO, mddev->bio_set);
+ else
+ mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
if (r1_bio->behind_master_bio) {
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
@@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio)
/* Fix variable parts of all bios */
vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
for (i = 0; i < conf->raid_disks * 2; i++) {
- int j;
- int size;
blk_status_t status;
- struct bio_vec *bi;
struct bio *b = r1_bio->bios[i];
struct resync_pages *rp = get_resync_pages(b);
if (b->bi_end_io != end_sync_read)
@@ -2098,8 +2080,6 @@ static void process_checks(struct r1bio *r1_bio)
status = b->bi_status;
bio_reset(b);
b->bi_status = status;
- b->bi_vcnt = vcnt;
- b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
b->bi_bdev = conf->mirrors[i].rdev->bdev;
@@ -2107,15 +2087,8 @@ static void process_checks(struct r1bio *r1_bio)
rp->raid_bio = r1_bio;
b->bi_private = rp;
- size = b->bi_iter.bi_size;
- bio_for_each_segment_all(bi, b, j) {
- bi->bv_offset = 0;
- if (size > PAGE_SIZE)
- bi->bv_len = PAGE_SIZE;
- else
- bi->bv_len = size;
- size -= PAGE_SIZE;
- }
+ /* initialize bvec table again */
+ md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
}
for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
wbio = bio_clone_fast(r1_bio->behind_master_bio,
GFP_NOIO,
mddev->bio_set);
- /* We really need a _all clone */
- wbio->bi_iter = (struct bvec_iter){ 0 };
} else {
wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
mddev->bio_set);
@@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
int good_sectors = RESYNC_SECTORS;
int min_bad = 0; /* number of sectors that are bad in all devices */
int idx = sector_to_idx(sector_nr);
+ int page_idx = 0;
if (!conf->r1buf_pool)
if (init_resync(conf))
@@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i];
rp = get_resync_pages(bio);
if (bio->bi_end_io) {
- page = resync_fetch_page(rp, rp->idx++);
+ page = resync_fetch_page(rp, page_idx);
/*
* won't fail because the vec table is big
@@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
nr_sectors += len>>9;
sector_nr += len>>9;
sync_blocks -= (len>>9);
- } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES);
+ } while (++page_idx < RESYNC_PAGES);
r1_bio->sectors = nr_sectors;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5026e7ad51d3..f55d4cc085f6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf);
#define raid10_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
-/*
- * 'strct resync_pages' stores actual pages used for doing the resync
- * IO, and it is per-bio, so make .bi_private points to it.
- */
-static inline struct resync_pages *get_resync_pages(struct bio *bio)
-{
- return bio->bi_private;
-}
+#include "raid1-10.c"
/*
* for resync bio, r10bio pointer can be retrieved from the per-bio
@@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_get_all_pages(rp);
}
- rp->idx = 0;
rp->raid_bio = r10_bio;
bio->bi_private = rp;
if (rbio) {
@@ -913,8 +905,7 @@ static void flush_pending_writes(struct r10conf *conf)
bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
@@ -1098,8 +1089,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
@@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
rp = get_resync_pages(tbio);
bio_reset(tbio);
- tbio->bi_vcnt = vcnt;
- tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
+ md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
+
rp->raid_bio = r10_bio;
tbio->bi_private = rp;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
@@ -2853,6 +2843,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
sector_t chunk_mask = conf->geo.chunk_mask;
+ int page_idx = 0;
if (!conf->r10buf_pool)
if (init_resync(conf))
@@ -3355,7 +3346,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
break;
for (bio= biolist ; bio ; bio=bio->bi_next) {
struct resync_pages *rp = get_resync_pages(bio);
- page = resync_fetch_page(rp, rp->idx++);
+ page = resync_fetch_page(rp, page_idx);
/*
* won't fail because the vec table is big enough
* to hold all these pages
@@ -3364,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
nr_sectors += len>>9;
sector_nr += len>>9;
- } while (get_resync_pages(biolist)->idx < RESYNC_PAGES);
+ } while (++page_idx < RESYNC_PAGES);
r10_bio->sectors = nr_sectors;
while (biolist) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index aeeb8d6854e2..0fc2748aaf95 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3381,9 +3381,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
md_write_end(conf->mddev);
- bio_endio(bi);
+ bio_io_error(bi);
bi = nextbi;
}
if (bitmap_end)
@@ -3403,9 +3402,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
md_write_end(conf->mddev);
- bio_endio(bi);
+ bio_io_error(bi);
bi = bi2;
}
@@ -3429,8 +3427,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
- bio_endio(bi);
+ bio_io_error(bi);
bi = nextbi;
}
}
@@ -6237,6 +6234,8 @@ static void raid5_do_work(struct work_struct *work)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
+
+ async_tx_issue_pending_all();
blk_finish_plug(&plug);
pr_debug("--- raid5worker inactive\n");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index a9dfb26972f2..250dc6ec4c82 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2957,7 +2957,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
}
/* find out number of slots supported */
- if (device_property_read_u32(dev, "num-slots", &pdata->num_slots))
+ if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots))
dev_info(dev, "'num-slots' was deprecated.\n");
if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 7c12f3715676..04ff3c97a535 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -356,9 +356,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
struct mmc_host *mmc = host->mmc;
int ret = 0;
- if (mmc_pdata(host)->set_power)
- return mmc_pdata(host)->set_power(host->dev, power_on, vdd);
-
/*
* If we don't see a Vcc regulator, assume it's a fixed
* voltage always-on regulator.
@@ -366,9 +363,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (IS_ERR(mmc->supply.vmmc))
return 0;
- if (mmc_pdata(host)->before_set_reg)
- mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd);
-
ret = omap_hsmmc_set_pbias(host, false, 0);
if (ret)
return ret;
@@ -400,9 +394,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
return ret;
}
- if (mmc_pdata(host)->after_set_reg)
- mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd);
-
return 0;
err_set_voltage:
@@ -469,8 +460,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
int ret;
struct mmc_host *mmc = host->mmc;
- if (mmc_pdata(host)->set_power)
- return 0;
ret = mmc_regulator_get_supply(mmc);
if (ret == -EPROBE_DEFER)
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d6fa2214aaae..0fb4e4c119e1 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
}
mmc_writel(host, REG_CLKCR, rval);
- if (host->cfg->needs_new_timings)
- mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE);
+ if (host->cfg->needs_new_timings) {
+ /* Don't touch the delay bits */
+ rval = mmc_readl(host, REG_SD_NTSR);
+ rval |= SDXC_2X_TIMING_MODE;
+ mmc_writel(host, REG_SD_NTSR, rval);
+ }
ret = sunxi_mmc_clk_set_phase(host, ios, rate);
if (ret)
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 7c754a0f14bb..19e4e904c9bf 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -2,20 +2,11 @@
# Multiplexer devices
#
-menuconfig MULTIPLEXER
- tristate "Multiplexer subsystem"
- help
- Multiplexer controller subsystem. Multiplexers are used in a
- variety of settings, and this subsystem abstracts their use
- so that the rest of the kernel sees a common interface. When
- multiple parallel multiplexers are controlled by one single
- multiplexer controller, this subsystem also coordinates the
- multiplexer accesses.
-
- To compile the subsystem as a module, choose M here: the module will
- be called mux-core.
+config MULTIPLEXER
+ tristate
-if MULTIPLEXER
+menu "Multiplexer drivers"
+ depends on MULTIPLEXER
config MUX_ADG792A
tristate "Analog Devices ADG792A/ADG792G Multiplexers"
@@ -56,4 +47,4 @@ config MUX_MMIO
To compile the driver as a module, choose M here: the module will
be called mux-mmio.
-endif
+endmenu
diff --git a/drivers/mux/mux-core.c b/drivers/mux/mux-core.c
index 90b8995f07cb..2fe96c470112 100644
--- a/drivers/mux/mux-core.c
+++ b/drivers/mux/mux-core.c
@@ -46,7 +46,7 @@ static int __init mux_init(void)
static void __exit mux_exit(void)
{
- class_register(&mux_class);
+ class_unregister(&mux_class);
ida_destroy(&mux_ida);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 181839d6fbea..9bee6c1c70cc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2050,6 +2050,7 @@ static int bond_miimon_inspect(struct bonding *bond)
continue;
bond_propose_link_state(slave, BOND_LINK_FAIL);
+ commit++;
slave->delay = bond->params.downdelay;
if (slave->delay) {
netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -2088,6 +2089,7 @@ static int bond_miimon_inspect(struct bonding *bond)
continue;
bond_propose_link_state(slave, BOND_LINK_BACK);
+ commit++;
slave->delay = bond->params.updelay;
if (slave->delay) {
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 041cfb7952f8..e94159507847 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
mac_mode |= HALF_DUPLEX;
if (gigabit) {
- if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_is_rgmii(dev->phydev))
mac_mode |= RGMII_MODE;
mac_mode |= GMAC_MODE;
@@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev)
break;
case PHY_INTERFACE_MODE_RGMII:
- pad_mode = PAD_MODE_RGMII;
- break;
-
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
+ pad_mode = PAD_MODE_RGMII;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 7b0b399aaedd..a981c4ee9d72 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3669,7 +3669,7 @@ static int bcmgenet_resume(struct device *d)
phy_init_hw(priv->phydev);
/* Speed settings must be restored */
- bcmgenet_mii_config(priv->dev);
+ bcmgenet_mii_config(priv->dev, false);
/* disable ethernet MAC while updating its registers */
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 4775999ee016..b1fdd3cc10d1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -699,7 +699,7 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev, bool init);
int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_mii_reset(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 33d3f60ac74b..18f5723be2c9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -186,7 +186,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_fixed_phy_link_update);
}
-int bcmgenet_mii_config(struct net_device *dev)
+int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -275,7 +275,8 @@ int bcmgenet_mii_config(struct net_device *dev)
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
- dev_info_once(kdev, "configuring instance for %s\n", phy_name);
+ if (init)
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
return 0;
}
@@ -323,7 +324,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately.
*/
- ret = bcmgenet_mii_config(dev);
+ ret = bcmgenet_mii_config(dev, true);
if (ret) {
phy_disconnect(priv->phydev);
return ret;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 79112563a25a..5e5c4d7796b8 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -292,11 +292,30 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
u64 cmr_cfg;
u64 port_cfg = 0;
u64 misc_ctl = 0;
+ bool tx_en, rx_en;
cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
- cmr_cfg &= ~CMR_EN;
+ tx_en = cmr_cfg & CMR_PKT_TX_EN;
+ rx_en = cmr_cfg & CMR_PKT_RX_EN;
+ cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+ /* Wait for BGX RX to be idle */
+ if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
+ GMI_PORT_CFG_RX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n",
+ bgx->bgx_id, lmac->lmacid);
+ return;
+ }
+
+ /* Wait for BGX TX to be idle */
+ if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
+ GMI_PORT_CFG_TX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n",
+ bgx->bgx_id, lmac->lmacid);
+ return;
+ }
+
port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
@@ -347,10 +366,8 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
- port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
-
- /* Re-enable lmac */
- cmr_cfg |= CMR_EN;
+ /* Restore CMR config settings */
+ cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 6b7fe6fdd13b..23acdc5ab896 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -170,6 +170,8 @@
#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
+#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12)
+#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13)
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 95bf5e89cfd1..34dae51effd4 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -125,7 +125,7 @@ static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
priv->base + FTGMAC100_OFFSET_MACCR);
- for (i = 0; i < 50; i++) {
+ for (i = 0; i < 200; i++) {
unsigned int maccr;
maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
@@ -392,7 +392,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
struct net_device *netdev = priv->netdev;
struct sk_buff *skb;
dma_addr_t map;
- int err;
+ int err = 0;
skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
if (unlikely(!skb)) {
@@ -428,7 +428,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
else
rxdes->rxdes0 = 0;
- return 0;
+ return err;
}
static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
@@ -1682,6 +1682,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id);
+ priv->mii_bus->parent = priv->dev;
priv->mii_bus->priv = priv->netdev;
priv->mii_bus->read = ftgmac100_mdiobus_read;
priv->mii_bus->write = ftgmac100_mdiobus_write;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5794d98d946f..9c94ea9b2b80 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2734,7 +2734,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
ppd.shared = pdev;
memset(&res, 0, sizeof(res));
- if (!of_irq_to_resource(pnp, 0, &res)) {
+ if (of_irq_to_resource(pnp, 0, &res) <= 0) {
dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7e95cf547ff1..f1886e1bdd82 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -22,6 +22,7 @@
#include <linux/if_vlan.h>
#include <linux/reset.h>
#include <linux/tcp.h>
+#include <linux/interrupt.h>
#include "mtk_eth_soc.h"
@@ -947,6 +948,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
RX_DMA_FPORT_MASK;
mac--;
+ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+ !eth->netdev[mac]))
+ goto release_desc;
+
netdev = eth->netdev[mac];
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index f5a2c605749f..31cbe5e86a01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -786,6 +786,10 @@ static void cb_timeout_handler(struct work_struct *work)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
}
+static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
+static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_msg *msg);
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@@ -796,17 +800,28 @@ static void cmd_work_handler(struct work_struct *work)
struct semaphore *sem;
unsigned long flags;
bool poll_cmd = ent->polling;
+ int alloc_ret;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
- ent->idx = alloc_ent(cmd);
- if (ent->idx < 0) {
+ alloc_ret = alloc_ent(cmd);
+ if (alloc_ret < 0) {
mlx5_core_err(dev, "failed to allocate command entry\n");
+ if (ent->callback) {
+ ent->callback(-EAGAIN, ent->context);
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+ free_cmd(ent);
+ } else {
+ ent->ret = -EAGAIN;
+ complete(&ent->done);
+ }
up(sem);
return;
}
+ ent->idx = alloc_ret;
} else {
ent->idx = cmd->max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
@@ -967,7 +982,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
err = wait_func(dev, ent);
if (err == -ETIMEDOUT)
- goto out_free;
+ goto out;
ds = ent->ts2 - ent->ts1;
op = MLX5_GET(mbox_in, in->first.data, opcode);
@@ -1430,6 +1445,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
ent->idx);
free_ent(cmd, ent->idx);
+ free_cmd(ent);
}
continue;
}
@@ -1488,7 +1504,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
free_msg(dev, ent->in);
err = err ? err : ent->status;
- free_cmd(ent);
+ if (!forced)
+ free_cmd(ent);
callback(err, context);
} else {
complete(&ent->done);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index e1b7ddfecd01..0039b4725405 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -266,6 +266,14 @@ struct mlx5e_dcbx {
};
#endif
+#define MAX_PIN_NUM 8
+struct mlx5e_pps {
+ u8 pin_caps[MAX_PIN_NUM];
+ struct work_struct out_work;
+ u64 start[MAX_PIN_NUM];
+ u8 enabled;
+};
+
struct mlx5e_tstamp {
rwlock_t lock;
struct cyclecounter cycles;
@@ -277,7 +285,7 @@ struct mlx5e_tstamp {
struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
- u8 *pps_pin_caps;
+ struct mlx5e_pps pps_info;
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 66f432385dbb..84dd63e74041 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -53,6 +53,15 @@ enum {
MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
};
+enum {
+ MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
+ MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
+ MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
+ MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
+ MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
+ MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
+};
+
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
struct skb_shared_hwtstamps *hwts)
{
@@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
}
+static void mlx5e_pps_out(struct work_struct *work)
+{
+ struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
+ out_work);
+ struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
+ pps_info);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
+ u64 tstart;
+
+ write_lock_irqsave(&tstamp->lock, flags);
+ tstart = tstamp->pps_info.start[i];
+ tstamp->pps_info.start[i] = 0;
+ write_unlock_irqrestore(&tstamp->lock, flags);
+ if (!tstart)
+ continue;
+
+ MLX5_SET(mtpps_reg, in, pin, i);
+ MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
+ MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
+ mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
+ }
+}
+
static void mlx5e_timestamp_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
overflow_work);
+ struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
unsigned long flags;
write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock);
write_unlock_irqrestore(&tstamp->lock, flags);
- schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
+ queue_delayed_work(priv->wq, &tstamp->overflow_work,
+ msecs_to_jiffies(tstamp->overflow_period * 1000));
}
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -213,18 +251,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
- struct mlx5e_priv *priv =
- container_of(tstamp, struct mlx5e_priv, tstamp);
-
- if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
-
- /* For future use need to add a loop for finding all 1PPS out pins */
- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
- MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
-
- mlx5_set_mtpps(priv->mdev, in, sizeof(in));
- }
if (delta < 0) {
neg_adj = 1;
@@ -253,12 +279,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u32 field_select = 0;
+ u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
- if (!MLX5_CAP_GEN(priv->mdev, pps) ||
- !MLX5_CAP_GEN(priv->mdev, pps_modify))
+ if (!MLX5_PPS_CAP(priv->mdev))
return -EOPNOTSUPP;
if (rq->extts.index >= tstamp->ptp_info.n_pins)
@@ -268,15 +295,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
return -EBUSY;
+ pin_mode = MLX5E_PIN_MODE_IN;
+ pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
+ field_select = MLX5E_MTPPS_FS_PIN_MODE |
+ MLX5E_MTPPS_FS_PATTERN |
+ MLX5E_MTPPS_FS_ENABLE;
+ } else {
+ pin = rq->extts.index;
+ field_select = MLX5E_MTPPS_FS_ENABLE;
}
- if (rq->extts.flags & PTP_FALLING_EDGE)
- pattern = 1;
-
MLX5_SET(mtpps_reg, in, pin, pin);
- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN);
+ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET(mtpps_reg, in, field_select, field_select);
err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
if (err)
@@ -295,14 +328,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- u64 nsec_now, nsec_delta, time_stamp;
+ u64 nsec_now, nsec_delta, time_stamp = 0;
u64 cycles_now, cycles_delta;
struct timespec64 ts;
unsigned long flags;
+ u32 field_select = 0;
+ u8 pin_mode = 0;
+ u8 pattern = 0;
int pin = -1;
+ int err = 0;
s64 ns;
- if (!MLX5_CAP_GEN(priv->mdev, pps_modify))
+ if (!MLX5_PPS_CAP(priv->mdev))
return -EOPNOTSUPP;
if (rq->perout.index >= tstamp->ptp_info.n_pins)
@@ -313,32 +350,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
rq->perout.index);
if (pin < 0)
return -EBUSY;
- }
- ts.tv_sec = rq->perout.period.sec;
- ts.tv_nsec = rq->perout.period.nsec;
- ns = timespec64_to_ns(&ts);
- if (on)
+ pin_mode = MLX5E_PIN_MODE_OUT;
+ pattern = MLX5E_OUT_PATTERN_PERIODIC;
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+
if ((ns >> 1) != 500000000LL)
return -EINVAL;
- ts.tv_sec = rq->perout.start.sec;
- ts.tv_nsec = rq->perout.start.nsec;
- ns = timespec64_to_ns(&ts);
- cycles_now = mlx5_read_internal_timer(tstamp->mdev);
- write_lock_irqsave(&tstamp->lock, flags);
- nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
- tstamp->cycles.mult);
- write_unlock_irqrestore(&tstamp->lock, flags);
- time_stamp = cycles_now + cycles_delta;
+
+ ts.tv_sec = rq->perout.start.sec;
+ ts.tv_nsec = rq->perout.start.nsec;
+ ns = timespec64_to_ns(&ts);
+ cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+ write_lock_irqsave(&tstamp->lock, flags);
+ nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+ tstamp->cycles.mult);
+ write_unlock_irqrestore(&tstamp->lock, flags);
+ time_stamp = cycles_now + cycles_delta;
+ field_select = MLX5E_MTPPS_FS_PIN_MODE |
+ MLX5E_MTPPS_FS_PATTERN |
+ MLX5E_MTPPS_FS_ENABLE |
+ MLX5E_MTPPS_FS_TIME_STAMP;
+ } else {
+ pin = rq->perout.index;
+ field_select = MLX5E_MTPPS_FS_ENABLE;
+ }
+
MLX5_SET(mtpps_reg, in, pin, pin);
- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
- MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC);
+ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
+ MLX5_SET(mtpps_reg, in, field_select, field_select);
+
+ err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+ if (err)
+ return err;
- return mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+ return mlx5_set_mtppse(priv->mdev, pin, 0,
+ MLX5E_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5e_tstamp *tstamp =
+ container_of(ptp, struct mlx5e_tstamp, ptp_info);
+
+ tstamp->pps_info.enabled = !!on;
+ return 0;
}
static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
@@ -350,6 +415,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
return mlx5e_extts_configure(ptp, rq, on);
case PTP_CLK_REQ_PEROUT:
return mlx5e_perout_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PPS:
+ return mlx5e_pps_configure(ptp, rq, on);
default:
return -EOPNOTSUPP;
}
@@ -395,6 +462,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
return -ENOMEM;
tstamp->ptp_info.enable = mlx5e_ptp_enable;
tstamp->ptp_info.verify = mlx5e_ptp_verify;
+ tstamp->ptp_info.pps = 1;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
snprintf(tstamp->ptp_info.pin_config[i].name,
@@ -422,22 +490,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins);
- tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
- tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
- tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
- tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
- tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
- tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
- tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
- tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+ tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+ tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+ tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+ tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+ tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+ tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+ tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+ tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
}
void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event)
{
+ struct net_device *netdev = priv->netdev;
struct mlx5e_tstamp *tstamp = &priv->tstamp;
+ struct timespec64 ts;
+ u64 nsec_now, nsec_delta;
+ u64 cycles_now, cycles_delta;
+ int pin = event->index;
+ s64 ns;
+ unsigned long flags;
- ptp_clock_event(tstamp->ptp, event);
+ switch (tstamp->ptp_info.pin_config[pin].func) {
+ case PTP_PF_EXTTS:
+ if (tstamp->pps_info.enabled) {
+ event->type = PTP_CLOCK_PPSUSR;
+ event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
+ } else {
+ event->type = PTP_CLOCK_EXTTS;
+ }
+ ptp_clock_event(tstamp->ptp, event);
+ break;
+ case PTP_PF_PEROUT:
+ mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
+ cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+ ts.tv_sec += 1;
+ ts.tv_nsec = 0;
+ ns = timespec64_to_ns(&ts);
+ write_lock_irqsave(&tstamp->lock, flags);
+ nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+ tstamp->cycles.mult);
+ tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
+ queue_work(priv->wq, &tstamp->pps_info.out_work);
+ write_unlock_irqrestore(&tstamp->lock, flags);
+ break;
+ default:
+ netdev_err(netdev, "%s: Unhandled event\n", __func__);
+ }
}
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
@@ -473,9 +575,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
do_div(ns, NSEC_PER_SEC / 2 / HZ);
tstamp->overflow_period = ns;
+ INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
if (tstamp->overflow_period)
- schedule_delayed_work(&tstamp->overflow_work, 0);
+ queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
else
mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
@@ -484,16 +587,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
/* Initialize 1PPS data structures */
-#define MAX_PIN_NUM 8
- tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
- if (tstamp->pps_pin_caps) {
- if (MLX5_CAP_GEN(priv->mdev, pps))
- mlx5e_get_pps_caps(priv, tstamp);
- if (tstamp->ptp_info.n_pins)
- mlx5e_init_pin_config(tstamp);
- } else {
- mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
- }
+ if (MLX5_PPS_CAP(priv->mdev))
+ mlx5e_get_pps_caps(priv, tstamp);
+ if (tstamp->ptp_info.n_pins)
+ mlx5e_init_pin_config(tstamp);
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev);
@@ -516,8 +613,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
priv->tstamp.ptp = NULL;
}
- kfree(tstamp->pps_pin_caps);
- kfree(tstamp->ptp_info.pin_config);
-
+ cancel_work_sync(&tstamp->pps_info.out_work);
cancel_delayed_work_sync(&tstamp->overflow_work);
+ kfree(tstamp->ptp_info.pin_config);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index bdd82c9b3992..eafc59280ada 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
static bool outer_header_zero(u32 *match_criteria)
{
- int size = MLX5_ST_SZ_BYTES(fte_match_param);
+ int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers);
@@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1eac5003084f..57f31fa478ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -377,7 +377,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
break;
case MLX5_DEV_EVENT_PPS:
eqe = (struct mlx5_eqe *)param;
- ptp_event.type = PTP_CLOCK_EXTTS;
ptp_event.index = eqe->data.pps.pin;
ptp_event.timestamp =
timecounter_cyc2time(&priv->tstamp.clock,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index af51a5d2b912..52b9a64cd3a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -698,7 +698,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
else
mlx5_core_dbg(dev, "port_module_event is not set\n");
- if (MLX5_CAP_GEN(dev, pps))
+ if (MLX5_PPS_CAP(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
if (MLX5_CAP_GEN(dev, fpga))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 89bfda419efe..8b18cc9ec026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1668,7 +1668,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
- MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH ||
+ esw->mode == SRIOV_NONE)
return;
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 1ee5bce85901..85298051a3e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -178,8 +178,6 @@ out:
static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{
- mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
-
mlx5_core_destroy_qp(mdev, qp);
}
@@ -194,8 +192,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err;
}
- mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
-
err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
@@ -253,6 +249,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
+ struct mlx5i_priv *ipriv = priv->ppriv;
int err;
err = mlx5e_create_indirect_rqt(priv);
@@ -271,12 +268,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_indirect_tirs;
- err = mlx5i_create_flow_steering(priv);
+ err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
if (err)
goto err_destroy_direct_tirs;
+ err = mlx5i_create_flow_steering(priv);
+ if (err)
+ goto err_remove_rx_underlay_qpn;
+
return 0;
+err_remove_rx_underlay_qpn:
+ mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
@@ -290,6 +293,9 @@ err_destroy_indirect_rqts:
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index a3a836bdcfd2..f26f97fe4666 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -162,22 +162,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
u8 *port1, u8 *port2)
{
- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- if (tracker->netdev_state[0].tx_enabled) {
- *port1 = 1;
- *port2 = 1;
- } else {
- *port1 = 2;
- *port2 = 2;
- }
- } else {
- *port1 = 1;
- *port2 = 2;
- if (!tracker->netdev_state[0].link_up)
- *port1 = 2;
- else if (!tracker->netdev_state[1].link_up)
- *port2 = 1;
+ *port1 = 1;
+ *port2 = 2;
+ if (!tracker->netdev_state[0].tx_enabled ||
+ !tracker->netdev_state[0].link_up) {
+ *port1 = 2;
+ return;
}
+
+ if (!tracker->netdev_state[1].tx_enabled ||
+ !tracker->netdev_state[1].link_up)
+ *port2 = 1;
}
static void mlx5_activate_lag(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 6a3d6bef7dd4..6a263e8d883a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -154,6 +154,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
+ MLX5_CAP_GEN((mdev), pps_modify) && \
+ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
+ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
+
int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
void mlx5e_init(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index bcdf7779c48d..bf99d40e30b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -88,7 +88,11 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
int vf;
if (!sriov->enabled_vfs)
+#ifdef CONFIG_MLX5_CORE_EN
+ goto disable_sriov_resources;
+#else
return;
+#endif
for (vf = 0; vf < sriov->num_vfs; vf++) {
if (!sriov->vfs_ctx[vf].enabled)
@@ -103,6 +107,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
}
#ifdef CONFIG_MLX5_CORE_EN
+disable_sriov_resources:
mlx5_eswitch_disable_sriov(dev->priv.eswitch);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 802f5b8d8761..add03fa34a2d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1631,6 +1631,10 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry);
+static bool
+mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
+ const struct mlxsw_sp_fib_entry *fib_entry);
+
static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
@@ -1639,6 +1643,9 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+ if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
+ fib_entry))
+ continue;
err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
return err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 22cf6353ba04..7ecf549c7f1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -205,7 +205,7 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
{
int i;
- for (i = 0; i < 23; i++)
+ for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
if ((i < 12) || (i > 17))
reg_space[DMA_BUS_MODE / 4 + i] =
readl(ioaddr + DMA_BUS_MODE + i * 4);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index eef2f222ce9a..6502b9aa3bf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -70,7 +70,7 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
{
int i;
- for (i = 0; i < 9; i++)
+ for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++)
reg_space[DMA_BUS_MODE / 4 + i] =
readl(ioaddr + DMA_BUS_MODE + i * 4);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 9091df86723a..adc54006f884 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -136,6 +136,9 @@
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+#define NUM_DWMAC100_DMA_REGS 9
+#define NUM_DWMAC1000_DMA_REGS 23
+
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index babb39c646ff..af30b4857c3b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -33,6 +33,8 @@
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
+#define ETHTOOL_DMA_OFFSET 55
+
struct stmmac_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -442,6 +444,9 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
priv->hw->mac->dump_regs(priv->hw, reg_space);
priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
+ /* Copy DMA registers to where ethtool expects them */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
+ NUM_DWMAC1000_DMA_REGS * 4);
}
static void
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 3af540adb3c5..fca1bca7f69d 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -13,9 +13,9 @@
/* Happy Meal global registers. */
#define GREG_SWRESET 0x000UL /* Software Reset */
#define GREG_CFG 0x004UL /* Config Register */
-#define GREG_STAT 0x108UL /* Status */
-#define GREG_IMASK 0x10cUL /* Interrupt Mask */
-#define GREG_REG_SIZE 0x110UL
+#define GREG_STAT 0x100UL /* Status */
+#define GREG_IMASK 0x104UL /* Interrupt Mask */
+#define GREG_REG_SIZE 0x108UL
/* Global reset register. */
#define GREG_RESET_ETX 0x01
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index d9db8a06afd2..cce9c9ed46aa 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1338,7 +1338,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
{
static int count;
- printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):",
+ printk(KERN_WARNING "%s: Fatal Error Interrupt (%#x):",
dev->name, status);
if (status & Int_IntPCI)
printk(" IntPCI");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8ff4cbf582cc..9453eef6d09f 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -316,14 +316,34 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
return slots_used;
}
-/* Estimate number of page buffers neede to transmit
- * Need at most 2 for RNDIS header plus skb body and fragments.
- */
-static unsigned int netvsc_get_slots(const struct sk_buff *skb)
+static int count_skb_frag_slots(struct sk_buff *skb)
+{
+ int i, frags = skb_shinfo(skb)->nr_frags;
+ int pages = 0;
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ unsigned long size = skb_frag_size(frag);
+ unsigned long offset = frag->page_offset;
+
+ /* Skip unused frames from start of page */
+ offset &= ~PAGE_MASK;
+ pages += PFN_UP(offset + size);
+ }
+ return pages;
+}
+
+static int netvsc_get_slots(struct sk_buff *skb)
{
- return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb))
- + skb_shinfo(skb)->nr_frags
- + 2;
+ char *data = skb->data;
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+ int slots;
+ int frag_slots;
+
+ slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ frag_slots = count_skb_frag_slots(skb);
+ return slots + frag_slots;
}
static u32 net_checksum_info(struct sk_buff *skb)
@@ -360,18 +380,21 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
u32 hash;
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
- /* We can only transmit MAX_PAGE_BUFFER_COUNT number
+ /* We will atmost need two pages to describe the rndis
+ * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
* of pages in a single packet. If skb is scattered around
* more pages we try linearizing it.
*/
- num_data_pgs = netvsc_get_slots(skb);
+
+ num_data_pgs = netvsc_get_slots(skb) + 2;
+
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
++net_device_ctx->eth_stats.tx_scattered;
if (skb_linearize(skb))
goto no_memory;
- num_data_pgs = netvsc_get_slots(skb);
+ num_data_pgs = netvsc_get_slots(skb) + 2;
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
++net_device_ctx->eth_stats.tx_too_big;
goto drop;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 6f6ed75b63c9..765de3bedb88 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
{
struct usb_device *dev = mcs->usbdev;
- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, 0, reg, val, 2,
- msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+ void *dmabuf;
+ int ret;
+
+ dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+ MCS_RD_RTYPE, 0, reg, dmabuf, 2,
+ msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+
+ memcpy(val, dmabuf, sizeof(__u16));
+ kfree(dmabuf);
return ret;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 2dda72004a7d..928fd892f167 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -7,7 +7,16 @@ menuconfig MDIO_DEVICE
help
MDIO devices and driver infrastructure code.
-if MDIO_DEVICE
+config MDIO_BUS
+ tristate
+ default m if PHYLIB=m
+ default MDIO_DEVICE
+ help
+ This internal symbol is used for link time dependencies and it
+ reflects whether the mdio_bus/mdio_device code is built as a
+ loadable module or built-in.
+
+if MDIO_BUS
config MDIO_BCM_IPROC
tristate "Broadcom iProc MDIO bus controller"
@@ -28,7 +37,6 @@ config MDIO_BCM_UNIMAC
config MDIO_BITBANG
tristate "Bitbanged MDIO buses"
- depends on !(MDIO_DEVICE=y && PHYLIB=m)
help
This module implements the MDIO bus protocol in software,
for use by low level drivers that export the ability to
@@ -127,7 +135,6 @@ config MDIO_THUNDER
tristate "ThunderX SOCs MDIO buses"
depends on 64BIT
depends on PCI
- depends on !(MDIO_DEVICE=y && PHYLIB=m)
select MDIO_CAVIUM
help
This driver supports the MDIO interfaces found on Cavium
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3aedf415908b..b9d4922581de 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -748,6 +748,9 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
+
+ /* Now we can run the state machine synchronously */
+ phy_state_machine(&phydev->state_queue.work);
}
/**
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index eac499c58aa7..6dde9a0cfe76 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -131,7 +131,6 @@ static void del_chan(struct pppox_sock *sock)
clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
spin_unlock(&chan_lock);
- synchronize_rcu();
}
static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
@@ -520,6 +519,7 @@ static int pptp_release(struct socket *sock)
po = pppox_sk(sk);
del_chan(po);
+ synchronize_rcu();
pppox_unbind_sock(sk);
sk->sk_state = PPPOX_DEAD;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 464570409796..ae53e899259f 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -60,11 +60,11 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
static int __set_port_dev_addr(struct net_device *port_dev,
const unsigned char *dev_addr)
{
- struct sockaddr addr;
+ struct sockaddr_storage addr;
- memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
- addr.sa_family = port_dev->type;
- return dev_set_mac_address(port_dev, &addr);
+ memcpy(addr.__data, dev_addr, port_dev->addr_len);
+ addr.ss_family = port_dev->type;
+ return dev_set_mac_address(port_dev, (struct sockaddr *)&addr);
}
static int team_port_set_orig_dev_addr(struct team_port *port)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a93392d7a340..68add55f8460 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2593,8 +2593,16 @@ static int __init tun_init(void)
goto err_misc;
}
- register_netdevice_notifier(&tun_notifier_block);
+ ret = register_netdevice_notifier(&tun_notifier_block);
+ if (ret) {
+ pr_err("Can't register netdevice notifier\n");
+ goto err_notifier;
+ }
+
return 0;
+
+err_notifier:
+ misc_deregister(&tun_miscdev);
err_misc:
rtnl_link_unregister(&tun_link_ops);
err_linkops:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1902701e15a9..e90de2186ffc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -952,21 +952,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */
- ctx = mergeable_len_to_ctx(len, headroom);
get_page(alloc_frag->page);
alloc_frag->offset += len + headroom;
hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + headroom) {
/* To avoid internal fragmentation, if there is very likely not
* enough space for another buffer, add the remaining space to
- * the current buffer. This extra space is not included in
- * the truesize stored in ctx.
+ * the current buffer.
*/
len += hole;
alloc_frag->offset += hole;
}
sg_init_one(rq->sg, buf, len);
+ ctx = mergeable_len_to_ctx(len, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
@@ -2805,9 +2804,9 @@ module_init(virtio_net_driver_init);
static __exit void virtio_net_driver_exit(void)
{
+ unregister_virtio_driver(&virtio_net_driver);
cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
cpuhp_remove_multi_state(virtionet_online);
- unregister_virtio_driver(&virtio_net_driver);
}
module_exit(virtio_net_driver_exit);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 2153e8062b4c..5cc3a07dda9e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -214,7 +214,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
/* Make sure there's enough writeable headroom */
if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
- head_delta = drvr->hdrlen - skb_headroom(skb);
+ head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n",
brcmf_ifname(ifp), head_delta);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index fbcbb4325936..f3556122c6ac 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -2053,12 +2053,13 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
atomic_inc(&stats->pktcow_failed);
return -ENOMEM;
}
+ head_pad = 0;
}
skb_push(pkt, head_pad);
dat_buf = (u8 *)(pkt->data);
}
memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
- return 0;
+ return head_pad;
}
/**
@@ -4174,11 +4175,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
goto fail;
}
- /* allocate scatter-gather table. sg support
- * will be disabled upon allocation failure.
- */
- brcmf_sdiod_sgtable_alloc(bus->sdiodev);
-
/* Query the F2 block size, set roundup accordingly */
bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index adaa2f0097cc..fb40ddfced99 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -1189,11 +1189,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
next_reclaimed;
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
next_reclaimed);
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
}
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
- iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
/* process frames */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
index 545d14b0bc92..f5c1127253cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -55,8 +55,8 @@ static inline bool iwl_trace_data(struct sk_buff *skb)
/* also account for the RFC 1042 header, of course */
offs += 6;
- return skb->len > offs + 2 &&
- *(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE);
+ return skb->len <= offs + 2 ||
+ *(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE);
}
static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index bcde1ba0f1c8..c7b1e58e3384 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1084,7 +1084,13 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
+ /*
+ * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
+ * so later code will - from now on - see that we're doing it.
+ */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
/* Clean up some internal and mac80211 state on restart */
iwl_mvm_restart_cleanup(mvm);
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index eaacfaf37206..ddd8719f27b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1090,6 +1090,7 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
* @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
* @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
+ * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
@@ -1101,6 +1102,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
IWL_MVM_STATUS_HW_CTKILL,
IWL_MVM_STATUS_ROC_RUNNING,
+ IWL_MVM_STATUS_HW_RESTART_REQUESTED,
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 4d1188b8736a..9c175d5e9d67 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1235,9 +1235,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/
if (!mvm->fw_restart && fw_error) {
iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
- NULL);
- } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
- &mvm->status)) {
+ NULL);
+ } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
IWL_ERR(mvm,
@@ -1268,6 +1267,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
if (fw_error && mvm->fw_restart > 0)
mvm->fw_restart--;
+ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
ieee80211_restart_hw(mvm->hw);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 4df5f13fcdae..ab66b4394dfc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -277,6 +277,18 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data)
/* Timer expired */
sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
+
+ /*
+ * sta should be valid unless the following happens:
+ * The firmware asserts which triggers a reconfig flow, but
+ * the reconfig fails before we set the pointer to sta into
+ * the fw_id_to_mac_id pointer table. Mac80211 can't stop
+ * A-MDPU and hence the timer continues to run. Then, the
+ * timer expires and sta is NULL.
+ */
+ if (!sta)
+ goto unlock;
+
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
sta->addr, ba_data->tid);
@@ -2015,7 +2027,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_MAX_TID_COUNT,
wdg_timeout);
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
mvm->probe_queue = queue;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_dev_queue = queue;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 92b3a55d0fbc..f95eec52508e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3150,7 +3150,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->d0i3_waitq);
if (trans_pcie->msix_enabled) {
- if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
+ ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
+ if (ret)
goto out_no_pci;
} else {
ret = iwl_pcie_alloc_ict(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index de50418adae5..034bdb4a0b06 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -298,6 +298,9 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txq[i];
+ if (!test_bit(i, trans_pcie->queue_used))
+ continue;
+
spin_lock_bh(&txq->lock);
if (txq->need_update) {
iwl_pcie_txq_inc_wr_ptr(trans, txq);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 2a7ad5ffe997..cd5dc6dcb19f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -846,9 +846,6 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
return false;
}
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
-
bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL);
rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index fb1ebb01133f..70723e67b7d7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2547,7 +2547,6 @@ struct bt_coexist_info {
struct rtl_btc_ops {
void (*btc_init_variables) (struct rtl_priv *rtlpriv);
void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
- void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cb96f4a7ae3a..c49f1f8b2e57 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
c.directive.opcode = nvme_admin_directive_recv;
c.directive.nsid = cpu_to_le32(nsid);
- c.directive.numd = sizeof(*s);
+ c.directive.numd = cpu_to_le32(sizeof(*s));
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
c.directive.dtype = NVME_DIR_STREAMS;
@@ -1995,6 +1995,9 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
int serial_len = sizeof(ctrl->serial);
int model_len = sizeof(ctrl->model);
+ if (!uuid_is_null(&ns->uuid))
+ return sprintf(buf, "uuid.%pU\n", &ns->uuid);
+
if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
return sprintf(buf, "eui.%16phN\n", ns->nguid);
@@ -2709,7 +2712,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
mutex_lock(&ctrl->namespaces_mutex);
/* Forcibly unquiesce queues to avoid blocking dispatch */
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ if (ctrl->admin_q)
+ blk_mq_unquiesce_queue(ctrl->admin_q);
list_for_each_entry(ns, &ctrl->namespaces, list) {
/*
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d666ada39a9b..5c2a08ef08ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1888,7 +1888,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
* the target device is present
*/
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
- return BLK_STS_IOERR;
+ goto busy;
if (!nvme_fc_ctrl_get(ctrl))
return BLK_STS_IOERR;
@@ -1958,22 +1958,25 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
queue->lldd_handle, &op->fcp_req);
if (ret) {
- if (op->rq) /* normal request */
+ if (!(op->flags & FCOP_FLAGS_AEN))
nvme_fc_unmap_data(ctrl, op->rq, op);
- /* else - aen. no cleanup needed */
nvme_fc_ctrl_put(ctrl);
- if (ret != -EBUSY)
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
+ ret != -EBUSY)
return BLK_STS_IOERR;
- if (op->rq)
- blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
-
- return BLK_STS_RESOURCE;
+ goto busy;
}
return BLK_STS_OK;
+
+busy:
+ if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
+ blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
+
+ return BLK_STS_RESOURCE;
}
static blk_status_t
@@ -2802,66 +2805,70 @@ out_fail:
return ERR_PTR(ret);
}
-enum {
- FCT_TRADDR_ERR = 0,
- FCT_TRADDR_WWNN = 1 << 0,
- FCT_TRADDR_WWPN = 1 << 1,
-};
struct nvmet_fc_traddr {
u64 nn;
u64 pn;
};
-static const match_table_t traddr_opt_tokens = {
- { FCT_TRADDR_WWNN, "nn-%s" },
- { FCT_TRADDR_WWPN, "pn-%s" },
- { FCT_TRADDR_ERR, NULL }
-};
-
static int
-nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
+__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
{
- substring_t args[MAX_OPT_ARGS];
- char *options, *o, *p;
- int token, ret = 0;
u64 token64;
- options = o = kstrdup(buf, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
+ if (match_u64(sstr, &token64))
+ return -EINVAL;
+ *val = token64;
- while ((p = strsep(&o, ":\n")) != NULL) {
- if (!*p)
- continue;
+ return 0;
+}
- token = match_token(p, traddr_opt_tokens, args);
- switch (token) {
- case FCT_TRADDR_WWNN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->nn = token64;
- break;
- case FCT_TRADDR_WWPN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->pn = token64;
- break;
- default:
- pr_warn("unknown traddr token or missing value '%s'\n",
- p);
- ret = -EINVAL;
- goto out;
- }
- }
+/*
+ * This routine validates and extracts the WWN's from the TRADDR string.
+ * As kernel parsers need the 0x to determine number base, universally
+ * build string to parse with 0x prefix before parsing name strings.
+ */
+static int
+nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
+{
+ char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
+ substring_t wwn = { name, &name[sizeof(name)-1] };
+ int nnoffset, pnoffset;
+
+ /* validate it string one of the 2 allowed formats */
+ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
+ !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
+ "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
+ nnoffset = NVME_FC_TRADDR_OXNNLEN;
+ pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
+ NVME_FC_TRADDR_OXNNLEN;
+ } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
+ !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
+ "pn-", NVME_FC_TRADDR_NNLEN))) {
+ nnoffset = NVME_FC_TRADDR_NNLEN;
+ pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
+ } else
+ goto out_einval;
-out:
- kfree(options);
- return ret;
+ name[0] = '0';
+ name[1] = 'x';
+ name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
+
+ memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
+ goto out_einval;
+
+ memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
+ goto out_einval;
+
+ return 0;
+
+out_einval:
+ pr_warn("%s: bad traddr string\n", __func__);
+ return -EINVAL;
}
static struct nvme_ctrl *
@@ -2875,11 +2882,11 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
unsigned long flags;
int ret;
- ret = nvme_fc_parse_address(&raddr, opts->traddr);
+ ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
if (ret || !raddr.nn || !raddr.pn)
return ERR_PTR(-EINVAL);
- ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
+ ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
if (ret || !laddr.nn || !laddr.pn)
return ERR_PTR(-EINVAL);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d10d2f279d19..cd888a47d0fc 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -539,7 +539,7 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
}
#endif
-static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
@@ -556,7 +556,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
length -= (page_size - offset);
if (length <= 0)
- return true;
+ return BLK_STS_OK;
dma_len -= (page_size - offset);
if (dma_len) {
@@ -569,7 +569,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
if (length <= page_size) {
iod->first_dma = dma_addr;
- return true;
+ return BLK_STS_OK;
}
nprps = DIV_ROUND_UP(length, page_size);
@@ -585,7 +585,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
if (!prp_list) {
iod->first_dma = dma_addr;
iod->npages = -1;
- return false;
+ return BLK_STS_RESOURCE;
}
list[0] = prp_list;
iod->first_dma = prp_dma;
@@ -595,7 +595,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
- return false;
+ return BLK_STS_RESOURCE;
list[iod->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -609,13 +609,29 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
break;
if (dma_len > 0)
continue;
- BUG_ON(dma_len < 0);
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
sg = sg_next(sg);
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
- return true;
+ return BLK_STS_OK;
+
+ bad_sgl:
+ if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n",
+ blk_rq_payload_bytes(req), iod->nents)) {
+ for_each_sg(iod->sg, sg, iod->nents, i) {
+ dma_addr_t phys = sg_phys(sg);
+ pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
+ "dma_address:%pad dma_length:%d\n", i, &phys,
+ sg->offset, sg->length,
+ &sg_dma_address(sg),
+ sg_dma_len(sg));
+ }
+ }
+ return BLK_STS_IOERR;
+
}
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -637,7 +653,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_ATTR_NO_WARN))
goto out;
- if (!nvme_setup_prps(dev, req))
+ ret = nvme_setup_prps(dev, req);
+ if (ret != BLK_STS_OK)
goto out_unmap;
ret = BLK_STS_IOERR;
@@ -1602,7 +1619,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
struct nvme_host_mem_buf_desc *descs;
- u32 chunk_size, max_entries;
+ u32 chunk_size, max_entries, len;
int i = 0;
void **bufs;
u64 size = 0, tmp;
@@ -1621,10 +1638,10 @@ retry:
if (!bufs)
goto out_free_descs;
- for (size = 0; size < preferred; size += chunk_size) {
- u32 len = min_t(u64, chunk_size, preferred - size);
+ for (size = 0; size < preferred; size += len) {
dma_addr_t dma_addr;
+ len = min_t(u64, chunk_size, preferred - size);
bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
if (!bufs[i])
@@ -2282,7 +2299,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
result = nvme_dev_map(dev);
if (result)
- goto free;
+ goto put_pci;
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
@@ -2291,7 +2308,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
result = nvme_setup_prp_pools(dev);
if (result)
- goto put_pci;
+ goto unmap;
quirks |= check_dell_samsung_bug(pdev);
@@ -2308,9 +2325,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
release_pools:
nvme_release_prp_pools(dev);
+ unmap:
+ nvme_dev_unmap(dev);
put_pci:
put_device(dev->dev);
- nvme_dev_unmap(dev);
free:
kfree(dev->queues);
kfree(dev);
@@ -2466,6 +2484,9 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0a54),
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0x0a55),
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 35f930db3c02..2d7a98ab53fb 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -168,11 +168,21 @@ out:
nvmet_req_complete(req, status);
}
+static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len)
+{
+ int len = min(src_len, dst_len);
+
+ memcpy(dst, src, len);
+ if (dst_len > len)
+ memset(dst + len, ' ', dst_len - len);
+}
+
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
u16 status = 0;
+ const char model[] = "Linux";
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
@@ -184,8 +194,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->vid = 0;
id->ssvid = 0;
- memset(id->sn, ' ', sizeof(id->sn));
- snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
+ bin2hex(id->sn, &ctrl->subsys->serial,
+ min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
+ copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
+ copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
memset(id->mn, ' ', sizeof(id->mn));
strncpy((char *)id->mn, "Linux", sizeof(id->mn));
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index a358ecd93e11..0a0067e771f5 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -650,7 +650,7 @@ out_unlock:
CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
-static ssize_t nvmet_subsys_version_show(struct config_item *item,
+static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
@@ -666,7 +666,7 @@ static ssize_t nvmet_subsys_version_show(struct config_item *item,
(int)NVME_MINOR(subsys->ver));
}
-static ssize_t nvmet_subsys_version_store(struct config_item *item,
+static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
@@ -684,11 +684,33 @@ static ssize_t nvmet_subsys_version_store(struct config_item *item,
return count;
}
-CONFIGFS_ATTR(nvmet_subsys_, version);
+CONFIGFS_ATTR(nvmet_subsys_, attr_version);
+
+static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
+}
+
+static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ down_write(&nvmet_config_sem);
+ sscanf(page, "%llx\n", &subsys->serial);
+ up_write(&nvmet_config_sem);
+
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host,
- &nvmet_subsys_attr_version,
+ &nvmet_subsys_attr_attr_version,
+ &nvmet_subsys_attr_attr_serial,
NULL,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b5b4ac103748..f4b02bb4a1a8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -767,9 +767,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
- /* generate a random serial number as our controllers are ephemeral: */
- get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
-
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
@@ -928,6 +925,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
return NULL;
subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
+ /* generate a random serial number as our controllers are ephemeral: */
+ get_random_bytes(&subsys->serial, sizeof(subsys->serial));
switch (type) {
case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1e6dcc241b3c..31ca55dfcb1d 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1174,14 +1174,14 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
*/
if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
ret = VERR_CR_ASSOC_LEN;
- else if (rqst->desc_list_len <
- cpu_to_be32(FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN))
+ else if (be32_to_cpu(rqst->desc_list_len) <
+ FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
ret = VERR_CR_ASSOC_RQST_LEN;
else if (rqst->assoc_cmd.desc_tag !=
cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
ret = VERR_CR_ASSOC_CMD;
- else if (rqst->assoc_cmd.desc_len <
- cpu_to_be32(FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN))
+ else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
+ FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
ret = VERR_CR_ASSOC_CMD_LEN;
else if (!rqst->assoc_cmd.ersp_ratio ||
(be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
@@ -2293,66 +2293,70 @@ nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
}
EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
-enum {
- FCT_TRADDR_ERR = 0,
- FCT_TRADDR_WWNN = 1 << 0,
- FCT_TRADDR_WWPN = 1 << 1,
-};
struct nvmet_fc_traddr {
u64 nn;
u64 pn;
};
-static const match_table_t traddr_opt_tokens = {
- { FCT_TRADDR_WWNN, "nn-%s" },
- { FCT_TRADDR_WWPN, "pn-%s" },
- { FCT_TRADDR_ERR, NULL }
-};
-
static int
-nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
+__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
{
- substring_t args[MAX_OPT_ARGS];
- char *options, *o, *p;
- int token, ret = 0;
u64 token64;
- options = o = kstrdup(buf, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
+ if (match_u64(sstr, &token64))
+ return -EINVAL;
+ *val = token64;
- while ((p = strsep(&o, ":\n")) != NULL) {
- if (!*p)
- continue;
+ return 0;
+}
- token = match_token(p, traddr_opt_tokens, args);
- switch (token) {
- case FCT_TRADDR_WWNN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->nn = token64;
- break;
- case FCT_TRADDR_WWPN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->pn = token64;
- break;
- default:
- pr_warn("unknown traddr token or missing value '%s'\n",
- p);
- ret = -EINVAL;
- goto out;
- }
- }
+/*
+ * This routine validates and extracts the WWN's from the TRADDR string.
+ * As kernel parsers need the 0x to determine number base, universally
+ * build string to parse with 0x prefix before parsing name strings.
+ */
+static int
+nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
+{
+ char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
+ substring_t wwn = { name, &name[sizeof(name)-1] };
+ int nnoffset, pnoffset;
+
+ /* validate it string one of the 2 allowed formats */
+ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
+ !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
+ "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
+ nnoffset = NVME_FC_TRADDR_OXNNLEN;
+ pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
+ NVME_FC_TRADDR_OXNNLEN;
+ } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
+ !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
+ "pn-", NVME_FC_TRADDR_NNLEN))) {
+ nnoffset = NVME_FC_TRADDR_NNLEN;
+ pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
+ } else
+ goto out_einval;
+
+ name[0] = '0';
+ name[1] = 'x';
+ name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
+
+ memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
+ goto out_einval;
+
+ memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
+ goto out_einval;
-out:
- kfree(options);
- return ret;
+ return 0;
+
+out_einval:
+ pr_warn("%s: bad traddr string\n", __func__);
+ return -EINVAL;
}
static int
@@ -2370,7 +2374,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
/* map the traddr address info to a target port */
- ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
+ ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
+ sizeof(port->disc_addr.traddr));
if (ret)
return ret;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 747bbdb4f9c6..e3b244c7e443 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -112,7 +112,6 @@ struct nvmet_ctrl {
struct mutex lock;
u64 cap;
- u64 serial;
u32 cc;
u32 csts;
@@ -152,6 +151,7 @@ struct nvmet_subsys {
u16 max_qid;
u64 ver;
+ u64 serial;
char *subsysnqn;
struct config_group group;
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index a0d4ede9b8fc..63e3eb55f3ac 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -170,7 +170,7 @@ static const struct of_device_id rockchip_efuse_match[] = {
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
- .compatible = "rockchip,rk322x-efuse",
+ .compatible = "rockchip,rk3228-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6ce72aa65425..ab21c846eb27 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -476,7 +476,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int i;
for (i = 0; i < nr_irqs; i++, res++)
- if (!of_irq_to_resource(dev, i, res))
+ if (of_irq_to_resource(dev, i, res) <= 0)
break;
return i;
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 055f83fddc18..7147aa53e9a2 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -954,7 +954,7 @@ static struct attribute *pdcs_subsys_attrs[] = {
NULL,
};
-static struct attribute_group pdcs_attr_group = {
+static const struct attribute_group pdcs_attr_group = {
.attrs = pdcs_subsys_attrs,
};
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index dc459eb1246b..1c5e0f333779 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -569,22 +569,41 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
if (irq != other_irq) {
pr_warn("mismatched PPIs detected.\n");
err = -EINVAL;
+ goto err_out;
}
} else {
- err = request_irq(irq, handler,
- IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+ struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
+ unsigned long irq_flags;
+
+ err = irq_force_affinity(irq, cpumask_of(cpu));
+
+ if (err && num_possible_cpus() > 1) {
+ pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, cpu);
+ goto err_out;
+ }
+
+ if (platdata && platdata->irq_flags) {
+ irq_flags = platdata->irq_flags;
+ } else {
+ irq_flags = IRQF_PERCPU |
+ IRQF_NOBALANCING |
+ IRQF_NO_THREAD;
+ }
+
+ err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
- return err;
- }
+ if (err)
+ goto err_out;
cpumask_set_cpu(cpu, &armpmu->active_irqs);
-
return 0;
+
+err_out:
+ pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
+ return err;
}
int armpmu_request_irqs(struct arm_pmu *armpmu)
@@ -628,12 +647,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
enable_percpu_irq(irq, IRQ_TYPE_NONE);
return 0;
}
-
- if (irq_force_affinity(irq, cpumask_of(cpu)) &&
- num_possible_cpus() > 1) {
- pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, cpu);
- }
}
return 0;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 69255f53057a..4eafa7a42e52 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -131,8 +131,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
}
if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
- pr_warn("no interrupt-affinity property for %s, guessing.\n",
- of_node_full_name(pdev->dev.of_node));
+ pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
+ pdev->dev.of_node);
}
/*
@@ -211,7 +211,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
}
if (ret) {
- pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
+ pr_info("%pOF: failed to probe PMU!\n", node);
goto out_free;
}
@@ -228,8 +228,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
out_free_irqs:
armpmu_free_irqs(pmu);
out_free:
- pr_info("%s: failed to register PMU devices!\n",
- of_node_full_name(node));
+ pr_info("%pOF: failed to register PMU devices!\n", node);
armpmu_free(pmu);
return ret;
}
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index c259848228b4..b242cce10468 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -546,6 +546,7 @@ static int l2_cache_event_init(struct perf_event *event)
}
if ((event != event->group_leader) &&
+ !is_software_event(event->group_leader) &&
(L2_EVT_GROUP(event->group_leader->attr.config) ==
L2_EVT_GROUP(event->attr.config))) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
@@ -558,6 +559,7 @@ static int l2_cache_event_init(struct perf_event *event)
list_for_each_entry(sibling, &event->group_leader->sibling_list,
group_entry) {
if ((sibling != event) &&
+ !is_software_event(sibling) &&
(L2_EVT_GROUP(sibling->attr.config) ==
L2_EVT_GROUP(event->attr.config))) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 37371b89b14f..64fc59c3ae6d 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -30,8 +30,8 @@ config PHY_BCM_NS_USB3
tristate "Broadcom Northstar USB 3.0 PHY Driver"
depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on HAS_IOMEM && OF
+ depends on MDIO_BUS
select GENERIC_PHY
- select MDIO_DEVICE
help
Enable this to support Broadcom USB 3.0 PHY connected to the USB
controller on Northstar family.
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 7e0d4f724dda..432fc40990bd 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -559,6 +559,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1,
chpid.id = crw0->rsid;
switch (crw0->erc) {
case CRW_ERC_IPARM: /* Path has come. */
+ case CRW_ERC_INIT:
if (!chp_is_registered(chpid))
chp_new(chpid);
chsc_chp_online(chpid);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8914eab84337..4f7cdb28bd38 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -938,7 +938,7 @@ static struct scsi_host_template hpsa_driver_template = {
#endif
.sdev_attrs = hpsa_sdev_attrs,
.shost_attrs = hpsa_shost_attrs,
- .max_sectors = 8192,
+ .max_sectors = 1024,
.no_write_same = 1,
};
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 1e82d4128a84..4fe606b000b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -759,8 +759,11 @@ static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
return false;
return true;
case SG_DXFER_FROM_DEV:
- if (hp->dxfer_len < 0)
- return false;
+ /*
+ * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp
+ * can either be NULL or != NULL so there's no point in checking
+ * it either. So just return true.
+ */
return true;
case SG_DXFER_TO_DEV:
case SG_DXFER_TO_FROM_DEV:
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07ec8a8877de..e164ffade38a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -690,7 +690,7 @@ struct pqi_config_table_heartbeat {
#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32
-#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U)
#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
#define RAID_MAP_MAX_ENTRIES 1024
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 2afe3597982e..f4b7a98a7913 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -134,7 +134,6 @@ struct apid_data {
* @spmic: SPMI controller object
* @ver_ops: version dependent operations.
* @ppid_to_apid in-memory copy of PPID -> channel (APID) mapping table.
- * v2 only.
*/
struct spmi_pmic_arb {
void __iomem *rd_base;
@@ -1016,6 +1015,13 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
+ pa->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
+ sizeof(*pa->ppid_to_apid), GFP_KERNEL);
+ if (!pa->ppid_to_apid) {
+ err = -ENOMEM;
+ goto err_put_ctrl;
+ }
+
hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
@@ -1048,15 +1054,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
err = PTR_ERR(pa->wr_base);
goto err_put_ctrl;
}
-
- pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
- PMIC_ARB_MAX_PPID,
- sizeof(*pa->ppid_to_apid),
- GFP_KERNEL);
- if (!pa->ppid_to_apid) {
- err = -ENOMEM;
- goto err_put_ctrl;
- }
}
dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 2b9b0941d9eb..6d23226e5f69 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -365,11 +365,23 @@ static int spmi_drv_remove(struct device *dev)
return 0;
}
+static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
static struct bus_type spmi_bus_type = {
.name = "spmi",
.match = spmi_device_match,
.probe = spmi_drv_probe,
.remove = spmi_drv_remove,
+ .uevent = spmi_drv_uevent,
};
/**
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 268d4e6ef48a..ef28a1cb64ae 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -110,4 +110,6 @@ source "drivers/staging/ccree/Kconfig"
source "drivers/staging/typec/Kconfig"
+source "drivers/staging/vboxvideo/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index b93e6f5f0f6e..2918580bdb9e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index b2e382888981..2f7bfc1c59e5 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3116,8 +3116,7 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
/* following line: 2-1 per STC */
ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
- /* following line: N-1 per STC */
- ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
+ ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
} else { /* TRIG_EXT */
/* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 85b242ec5f9b..8fc191d99927 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1640,8 +1640,13 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
ibmsg = tx->tx_msg;
ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
- copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE,
- &from);
+ rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob,
+ &from);
+ if (rc != payload_nob) {
+ kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
+ return -EFAULT;
+ }
+
nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
@@ -1741,8 +1746,14 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
break;
}
- copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload,
- IBLND_MSG_SIZE, to);
+ rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen,
+ to);
+ if (rc != rlen) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = 0;
lnet_finalize(ni, lntmsg, 0);
break;
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 002d09159896..a69007ef77bf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -132,7 +132,7 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
kfree(pcmd->parmbuf);
}
- if (!pcmd->rsp) {
+ if (pcmd->rsp) {
if (pcmd->rspsz != 0) {
/* free rsp in cmd_obj */
kfree(pcmd->rsp);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 963235fd7292..d283341cfe43 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -43,6 +43,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{} /* Terminating entry */
};
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 944dd25924be..4754f7a20684 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -40,7 +40,7 @@ static unsigned int get_mxclk_freq(void)
pll_reg = peek32(MXCLK_PLL_CTRL);
M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT;
- N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_M_SHIFT;
+ N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_N_SHIFT;
OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT;
POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT;
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 3aa4128703d5..67207b0554cd 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1053,6 +1053,26 @@ release_fb:
return err;
}
+static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
+ kfree(ap);
+ return 0;
+}
+
static int lynxfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1061,6 +1081,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
int fbidx;
int err;
+ err = lynxfb_kick_out_firmware_fb(pdev);
+ if (err)
+ return err;
+
/* enable device */
err = pcim_enable_device(pdev);
if (err)
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 82e5de248947..67956e24779c 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2314,6 +2314,7 @@ static void __exit speakup_exit(void)
mutex_lock(&spk_mutex);
synth_release();
mutex_unlock(&spk_mutex);
+ spk_ttyio_unregister_ldisc();
speakup_kobj_exit();
@@ -2376,6 +2377,7 @@ static int __init speakup_init(void)
if (err)
goto error_kobjects;
+ spk_ttyio_register_ldisc();
synth_init(synth_name);
speakup_register_devsynth();
/*
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 87b6a0a4c54d..046040ac074c 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -48,6 +48,8 @@ void spk_stop_serial_interrupt(void);
int spk_wait_for_xmitr(struct spk_synth *in_synth);
void spk_serial_release(void);
void spk_ttyio_release(void);
+void spk_ttyio_register_ldisc(void);
+void spk_ttyio_unregister_ldisc(void);
void synth_buffer_skip_nonlatin1(void);
u16 synth_buffer_getc(void);
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index ed8e96b06ead..fe340b07c482 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -154,12 +154,6 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
struct ktermios tmp_termios;
dev_t dev;
- ret = tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops);
- if (ret) {
- pr_err("Error registering line discipline.\n");
- return ret;
- }
-
ret = get_dev_to_use(synth, &dev);
if (ret)
return ret;
@@ -196,10 +190,24 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
tty_unlock(tty);
ret = tty_set_ldisc(tty, N_SPEAKUP);
+ if (ret)
+ pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
return ret;
}
+void spk_ttyio_register_ldisc(void)
+{
+ if (tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops))
+ pr_warn("speakup: Error registering line discipline. Most synths won't work.\n");
+}
+
+void spk_ttyio_unregister_ldisc(void)
+{
+ if (tty_unregister_ldisc(N_SPEAKUP))
+ pr_warn("speakup: Couldn't unregister ldisc\n");
+}
+
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
{
if (in_synth->alive && speakup_tty && speakup_tty->ops->write) {
@@ -300,7 +308,7 @@ void spk_ttyio_release(void)
tty_ldisc_flush(speakup_tty);
tty_unlock(speakup_tty);
- tty_ldisc_release(speakup_tty);
+ tty_release_struct(speakup_tty, speakup_tty->index);
}
EXPORT_SYMBOL_GPL(spk_ttyio_release);
diff --git a/drivers/staging/vboxvideo/Kconfig b/drivers/staging/vboxvideo/Kconfig
new file mode 100644
index 000000000000..a52746f9a670
--- /dev/null
+++ b/drivers/staging/vboxvideo/Kconfig
@@ -0,0 +1,12 @@
+config DRM_VBOXVIDEO
+ tristate "Virtual Box Graphics Card"
+ depends on DRM && X86 && PCI
+ select DRM_KMS_HELPER
+ help
+ This is a KMS driver for the virtual Graphics Card used in
+ Virtual Box virtual machines.
+
+ Although it is possible to builtin this module, it is advised
+ to build this driver as a module, so that it can be updated
+ independently of the kernel. Select M to built this driver as a
+ module and add support for these devices via drm/kms interfaces.
diff --git a/drivers/staging/vboxvideo/Makefile b/drivers/staging/vboxvideo/Makefile
new file mode 100644
index 000000000000..2d0b3bc7ad73
--- /dev/null
+++ b/drivers/staging/vboxvideo/Makefile
@@ -0,0 +1,7 @@
+ccflags-y := -Iinclude/drm
+
+vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
+ vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
+ vbox_mode.o vbox_prime.o vbox_ttm.o
+
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/staging/vboxvideo/TODO b/drivers/staging/vboxvideo/TODO
new file mode 100644
index 000000000000..ce764309b079
--- /dev/null
+++ b/drivers/staging/vboxvideo/TODO
@@ -0,0 +1,9 @@
+TODO:
+-Move the driver over to the atomic API
+-Stop using old load / unload drm_driver hooks
+-Get a full review from the drm-maintainers on dri-devel done on this driver
+-Extend this TODO with the results of that review
+
+Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
+Hans de Goede <hdegoede@redhat.com> and
+Michael Thayer <michael.thayer@oracle.com>.
diff --git a/drivers/staging/vboxvideo/hgsmi_base.c b/drivers/staging/vboxvideo/hgsmi_base.c
new file mode 100644
index 000000000000..15ff5f42e2cd
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_base.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_channels.h"
+#include "hgsmi_ch_setup.h"
+
+/**
+ * Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
+ * @param ctx the context of the guest heap to use.
+ * @param location the offset chosen for the flags within guest VRAM.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
+{
+ struct hgsmi_buffer_location *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
+ HGSMI_CC_HOST_FLAGS_LOCATION);
+ if (!p)
+ return -ENOMEM;
+
+ p->buf_location = location;
+ p->buf_len = sizeof(struct hgsmi_host_flags);
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
+ * @param ctx the context of the guest heap to use.
+ * @param caps the capabilities to report, see vbva_caps.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps)
+{
+ struct vbva_caps *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
+ if (!p)
+ return -ENOMEM;
+
+ p->rc = VERR_NOT_IMPLEMENTED;
+ p->caps = caps;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ WARN_ON_ONCE(RT_FAILURE(p->rc));
+
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+int hgsmi_test_query_conf(struct gen_pool *ctx)
+{
+ u32 value = 0;
+ int ret;
+
+ ret = hgsmi_query_conf(ctx, U32_MAX, &value);
+ if (ret)
+ return ret;
+
+ return value == U32_MAX ? 0 : -EIO;
+}
+
+/**
+ * Query the host for an HGSMI configuration parameter via an HGSMI command.
+ * @param ctx the context containing the heap used
+ * @param index the index of the parameter to query,
+ * @see vbva_conf32::index
+ * @param value_ret where to store the value of the parameter on success
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
+{
+ struct vbva_conf32 *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_QUERY_CONF32);
+ if (!p)
+ return -ENOMEM;
+
+ p->index = index;
+ p->value = U32_MAX;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ *value_ret = p->value;
+
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * Pass the host a new mouse pointer shape via an HGSMI command.
+ *
+ * @param ctx the context containing the heap to be used
+ * @param flags cursor flags, @see VMMDevReqMousePointer::flags
+ * @param hot_x horizontal position of the hot spot
+ * @param hot_y vertical position of the hot spot
+ * @param width width in pixels of the cursor
+ * @param height height in pixels of the cursor
+ * @param pixels pixel data, @see VMMDevReqMousePointer for the format
+ * @param len size in bytes of the pixel data
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
+ u32 hot_x, u32 hot_y, u32 width, u32 height,
+ u8 *pixels, u32 len)
+{
+ struct vbva_mouse_pointer_shape *p;
+ u32 pixel_len = 0;
+ int rc;
+
+ if (flags & VBOX_MOUSE_POINTER_SHAPE) {
+ /*
+ * Size of the pointer data:
+ * sizeof (AND mask) + sizeof (XOR_MASK)
+ */
+ pixel_len = ((((width + 7) / 8) * height + 3) & ~3) +
+ width * 4 * height;
+ if (pixel_len > len)
+ return -EINVAL;
+
+ /*
+ * If shape is supplied, then always create the pointer visible.
+ * See comments in 'vboxUpdatePointerShape'
+ */
+ flags |= VBOX_MOUSE_POINTER_VISIBLE;
+ }
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
+ VBVA_MOUSE_POINTER_SHAPE);
+ if (!p)
+ return -ENOMEM;
+
+ p->result = VINF_SUCCESS;
+ p->flags = flags;
+ p->hot_X = hot_x;
+ p->hot_y = hot_y;
+ p->width = width;
+ p->height = height;
+ if (pixel_len)
+ memcpy(p->data, pixels, pixel_len);
+
+ hgsmi_buffer_submit(ctx, p);
+
+ switch (p->result) {
+ case VINF_SUCCESS:
+ rc = 0;
+ break;
+ case VERR_NO_MEMORY:
+ rc = -ENOMEM;
+ break;
+ case VERR_NOT_SUPPORTED:
+ rc = -EBUSY;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ hgsmi_buffer_free(ctx, p);
+
+ return rc;
+}
+
+/**
+ * Report the guest cursor position. The host may wish to use this information
+ * to re-position its own cursor (though this is currently unlikely). The
+ * current host cursor position is returned.
+ * @param ctx The context containing the heap used.
+ * @param report_position Are we reporting a position?
+ * @param x Guest cursor X position.
+ * @param y Guest cursor Y position.
+ * @param x_host Host cursor X position is stored here. Optional.
+ * @param y_host Host cursor Y position is stored here. Optional.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
+ u32 x, u32 y, u32 *x_host, u32 *y_host)
+{
+ struct vbva_cursor_position *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_CURSOR_POSITION);
+ if (!p)
+ return -ENOMEM;
+
+ p->report_position = report_position;
+ p->x = x;
+ p->y = y;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ *x_host = p->x;
+ *y_host = p->y;
+
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * @todo Mouse pointer position to be read from VMMDev memory, address of the
+ * memory region can be queried from VMMDev via an IOCTL. This VMMDev memory
+ * region will contain host information which is needed by the guest.
+ *
+ * Reading will not cause a switch to the host.
+ *
+ * Have to take into account:
+ * * synchronization: host must write to the memory only from EMT,
+ * large structures must be read under flag, which tells the host
+ * that the guest is currently reading the memory (OWNER flag?).
+ * * guest writes: may be allocate a page for the host info and make
+ * the page readonly for the guest.
+ * * the information should be available only for additions drivers.
+ * * VMMDev additions driver will inform the host which version of the info
+ * it expects, host must support all versions.
+ */
diff --git a/drivers/staging/vboxvideo/hgsmi_ch_setup.h b/drivers/staging/vboxvideo/hgsmi_ch_setup.h
new file mode 100644
index 000000000000..8e6d9e11a69c
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_ch_setup.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __HGSMI_CH_SETUP_H__
+#define __HGSMI_CH_SETUP_H__
+
+/*
+ * Tell the host the location of hgsmi_host_flags structure, where the host
+ * can write information about pending buffers, etc, and which can be quickly
+ * polled by the guest without a need to port IO.
+ */
+#define HGSMI_CC_HOST_FLAGS_LOCATION 0
+
+struct hgsmi_buffer_location {
+ u32 buf_location;
+ u32 buf_len;
+} __packed;
+
+/* HGSMI setup and configuration data structures. */
+/* host->guest commands pending, should be accessed under FIFO lock only */
+#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x01u
+/* IRQ is fired, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_IRQ 0x02u
+/* vsync interrupt flag, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_VSYNC 0x10u
+/** monitor hotplug flag, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_HOTPLUG 0x20u
+/**
+ * Cursor capability state change flag, should be accessed under
+ * VGAState::lock only. @see vbva_conf32.
+ */
+#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u
+
+struct hgsmi_host_flags {
+ /*
+ * Host flags can be accessed and modified in multiple threads
+ * concurrently, e.g. CrOpenGL HGCM and GUI threads when completing
+ * HGSMI 3D and Video Accel respectively, EMT thread when dealing with
+ * HGSMI command processing, etc.
+ * Besides settings/cleaning flags atomically, some flags have their
+ * own special sync restrictions, see comments for flags above.
+ */
+ u32 host_flags;
+ u32 reserved[3];
+} __packed;
+
+#endif
diff --git a/drivers/staging/vboxvideo/hgsmi_channels.h b/drivers/staging/vboxvideo/hgsmi_channels.h
new file mode 100644
index 000000000000..a2a34b2167b4
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_channels.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __HGSMI_CHANNELS_H__
+#define __HGSMI_CHANNELS_H__
+
+/*
+ * Each channel has an 8 bit identifier. There are a number of predefined
+ * (hardcoded) channels.
+ *
+ * HGSMI_CH_HGSMI channel can be used to map a string channel identifier
+ * to a free 16 bit numerical value. values are allocated in range
+ * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST].
+ */
+
+/* A reserved channel value */
+#define HGSMI_CH_RESERVED 0x00
+/* HGCMI: setup and configuration */
+#define HGSMI_CH_HGSMI 0x01
+/* Graphics: VBVA */
+#define HGSMI_CH_VBVA 0x02
+/* Graphics: Seamless with a single guest region */
+#define HGSMI_CH_SEAMLESS 0x03
+/* Graphics: Seamless with separate host windows */
+#define HGSMI_CH_SEAMLESS2 0x04
+/* Graphics: OpenGL HW acceleration */
+#define HGSMI_CH_OPENGL 0x05
+
+/* The first channel index to be used for string mappings (inclusive) */
+#define HGSMI_CH_STRING_FIRST 0x20
+/* The last channel index for string mappings (inclusive) */
+#define HGSMI_CH_STRING_LAST 0xff
+
+#endif
diff --git a/drivers/staging/vboxvideo/hgsmi_defs.h b/drivers/staging/vboxvideo/hgsmi_defs.h
new file mode 100644
index 000000000000..5b21fb974d20
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_defs.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __HGSMI_DEFS_H__
+#define __HGSMI_DEFS_H__
+
+/* Buffer sequence type mask. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03
+/* Single buffer, not a part of a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00
+/* The first buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01
+/* A middle buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02
+/* The last buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03
+
+/* 16 bytes buffer header. */
+struct hgsmi_buffer_header {
+ u32 data_size; /* Size of data that follows the header. */
+ u8 flags; /* HGSMI_BUFFER_HEADER_F_* */
+ u8 channel; /* The channel the data must be routed to. */
+ u16 channel_info; /* Opaque to the HGSMI, used by the channel. */
+
+ union {
+ /* Opaque placeholder to make the union 8 bytes. */
+ u8 header_data[8];
+
+ /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */
+ struct {
+ u32 reserved1; /* A reserved field, initialize to 0. */
+ u32 reserved2; /* A reserved field, initialize to 0. */
+ } buffer;
+
+ /* HGSMI_BUFFER_HEADER_F_SEQ_START */
+ struct {
+ /* Must be the same for all buffers in the sequence. */
+ u32 sequence_number;
+ /* The total size of the sequence. */
+ u32 sequence_size;
+ } sequence_start;
+
+ /*
+ * HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and
+ * HGSMI_BUFFER_HEADER_F_SEQ_END
+ */
+ struct {
+ /* Must be the same for all buffers in the sequence. */
+ u32 sequence_number;
+ /* Data offset in the entire sequence. */
+ u32 sequence_offset;
+ } sequence_continue;
+ } u;
+} __packed;
+
+/* 8 bytes buffer tail. */
+struct hgsmi_buffer_tail {
+ /* Reserved, must be initialized to 0. */
+ u32 reserved;
+ /*
+ * One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html
+ * Over the header, offset and for first 4 bytes of the tail.
+ */
+ u32 checksum;
+} __packed;
+
+/*
+ * The size of the array of channels. Array indexes are u8.
+ * Note: the value must not be changed.
+ */
+#define HGSMI_NUMBER_OF_CHANNELS 0x100
+
+#endif
diff --git a/drivers/staging/vboxvideo/modesetting.c b/drivers/staging/vboxvideo/modesetting.c
new file mode 100644
index 000000000000..7616b8aab23a
--- /dev/null
+++ b/drivers/staging/vboxvideo/modesetting.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_channels.h"
+
+/**
+ * Set a video mode via an HGSMI request. The views must have been
+ * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
+ * set on the first display then it must be set first using registers.
+ * @param ctx The context containing the heap to use
+ * @param display The screen number
+ * @param origin_x The horizontal displacement relative to the first scrn
+ * @param origin_y The vertical displacement relative to the first screen
+ * @param start_offset The offset of the visible area of the framebuffer
+ * relative to the framebuffer start
+ * @param pitch The offset in bytes between the starts of two adjecent
+ * scan lines in video RAM
+ * @param width The mode width
+ * @param height The mode height
+ * @param bpp The colour depth of the mode
+ * @param flags Flags
+ */
+void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
+ s32 origin_x, s32 origin_y, u32 start_offset,
+ u32 pitch, u32 width, u32 height,
+ u16 bpp, u16 flags)
+{
+ struct vbva_infoscreen *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_INFO_SCREEN);
+ if (!p)
+ return;
+
+ p->view_index = display;
+ p->origin_x = origin_x;
+ p->origin_y = origin_y;
+ p->start_offset = start_offset;
+ p->line_size = pitch;
+ p->width = width;
+ p->height = height;
+ p->bits_per_pixel = bpp;
+ p->flags = flags;
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+}
+
+/**
+ * Report the rectangle relative to which absolute pointer events should be
+ * expressed. This information remains valid until the next VBVA resize event
+ * for any screen, at which time it is reset to the bounding rectangle of all
+ * virtual screens.
+ * @param ctx The context containing the heap to use.
+ * @param origin_x Upper left X co-ordinate relative to the first screen.
+ * @param origin_y Upper left Y co-ordinate relative to the first screen.
+ * @param width Rectangle width.
+ * @param height Rectangle height.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
+ u32 width, u32 height)
+{
+ struct vbva_report_input_mapping *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_REPORT_INPUT_MAPPING);
+ if (!p)
+ return -ENOMEM;
+
+ p->x = origin_x;
+ p->y = origin_y;
+ p->cx = width;
+ p->cy = height;
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * Get most recent video mode hints.
+ * @param ctx The context containing the heap to use.
+ * @param screens The number of screens to query hints for, starting at 0.
+ * @param hints Array of vbva_modehint structures for receiving the hints.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
+ struct vbva_modehint *hints)
+{
+ struct vbva_query_mode_hints *p;
+ size_t size;
+
+ if (WARN_ON(!hints))
+ return -EINVAL;
+
+ size = screens * sizeof(struct vbva_modehint);
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA,
+ VBVA_QUERY_MODE_HINTS);
+ if (!p)
+ return -ENOMEM;
+
+ p->hints_queried_count = screens;
+ p->hint_structure_guest_size = sizeof(struct vbva_modehint);
+ p->rc = VERR_NOT_SUPPORTED;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ if (RT_FAILURE(p->rc)) {
+ hgsmi_buffer_free(ctx, p);
+ return -EIO;
+ }
+
+ memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size);
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
new file mode 100644
index 000000000000..92ae1560a16d
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_drv.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/vt_kern.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+
+int vbox_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, vbox_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static const struct pci_device_id pciidlist[] = {
+ { 0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0, 0, 0},
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void vbox_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int vbox_drm_freeze(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ drm_kms_helper_poll_disable(dev);
+
+ pci_save_state(dev->pdev);
+
+ drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, true);
+
+ return 0;
+}
+
+static int vbox_drm_thaw(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ drm_mode_config_reset(dev);
+ drm_helper_resume_force_mode(dev);
+ drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, false);
+
+ return 0;
+}
+
+static int vbox_drm_resume(struct drm_device *dev)
+{
+ int ret;
+
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ ret = vbox_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+
+ return 0;
+}
+
+static int vbox_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ int error;
+
+ error = vbox_drm_freeze(ddev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int vbox_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+ return vbox_drm_resume(ddev);
+}
+
+static int vbox_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ if (!ddev || !ddev->dev_private)
+ return -ENODEV;
+
+ return vbox_drm_freeze(ddev);
+}
+
+static int vbox_pm_thaw(struct device *dev)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+ return vbox_drm_thaw(ddev);
+}
+
+static int vbox_pm_poweroff(struct device *dev)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+ return vbox_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops vbox_pm_ops = {
+ .suspend = vbox_pm_suspend,
+ .resume = vbox_pm_resume,
+ .freeze = vbox_pm_freeze,
+ .thaw = vbox_pm_thaw,
+ .poweroff = vbox_pm_poweroff,
+ .restore = vbox_pm_resume,
+};
+
+static struct pci_driver vbox_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = vbox_pci_probe,
+ .remove = vbox_pci_remove,
+ .driver.pm = &vbox_pm_ops,
+};
+
+static const struct file_operations vbox_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = vbox_mmap,
+ .poll = drm_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .read = drm_read,
+};
+
+static int vbox_master_set(struct drm_device *dev,
+ struct drm_file *file_priv, bool from_open)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ /*
+ * We do not yet know whether the new owner can handle hotplug, so we
+ * do not advertise dynamic modes on the first query and send a
+ * tentative hotplug notification after that to see if they query again.
+ */
+ vbox->initial_mode_queried = false;
+
+ mutex_lock(&vbox->hw_mutex);
+ /*
+ * Disable VBVA when someone releases master in case the next person
+ * tries tries to do VESA.
+ */
+ /** @todo work out if anyone is likely to and whether it will work. */
+ /*
+ * Update: we also disable it because if the new master does not do
+ * dirty rectangle reporting (e.g. old versions of Plymouth) then at
+ * least the first screen will still be updated. We enable it as soon
+ * as we receive a dirty rectangle report.
+ */
+ vbox_disable_accel(vbox);
+ mutex_unlock(&vbox->hw_mutex);
+
+ return 0;
+}
+
+static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ /* See vbox_master_set() */
+ vbox->initial_mode_queried = false;
+
+ mutex_lock(&vbox->hw_mutex);
+ vbox_disable_accel(vbox);
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ DRIVER_PRIME,
+ .dev_priv_size = 0,
+
+ .load = vbox_driver_load,
+ .unload = vbox_driver_unload,
+ .lastclose = vbox_driver_lastclose,
+ .master_set = vbox_master_set,
+ .master_drop = vbox_master_drop,
+ .set_busid = drm_pci_set_busid,
+
+ .fops = &vbox_fops,
+ .irq_handler = vbox_irq_handler,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_free_object = vbox_gem_free_object,
+ .dumb_create = vbox_dumb_create,
+ .dumb_map_offset = vbox_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = vbox_gem_prime_pin,
+ .gem_prime_unpin = vbox_gem_prime_unpin,
+ .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
+ .gem_prime_vmap = vbox_gem_prime_vmap,
+ .gem_prime_vunmap = vbox_gem_prime_vunmap,
+ .gem_prime_mmap = vbox_gem_prime_mmap,
+};
+
+static int __init vbox_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && vbox_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (vbox_modeset == 0)
+ return -EINVAL;
+
+ return drm_pci_init(&driver, &vbox_pci_driver);
+}
+
+static void __exit vbox_exit(void)
+{
+ drm_pci_exit(&driver, &vbox_pci_driver);
+}
+
+module_init(vbox_init);
+module_exit(vbox_exit);
+
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
new file mode 100644
index 000000000000..4b9302703b36
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_drv.h
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#ifndef __VBOX_DRV_H__
+#define __VBOX_DRV_H__
+
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_ch_setup.h"
+
+#define DRIVER_NAME "vboxvideo"
+#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
+#define DRIVER_DATE "20130823"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define VBOX_MAX_CURSOR_WIDTH 64
+#define VBOX_MAX_CURSOR_HEIGHT 64
+#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT)
+#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8)
+
+#define VBOX_MAX_SCREENS 32
+
+#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \
+ VBVA_ADAPTER_INFORMATION_SIZE)
+#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE
+#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \
+ sizeof(struct hgsmi_host_flags))
+#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
+
+struct vbox_fbdev;
+
+struct vbox_private {
+ struct drm_device *dev;
+
+ u8 __iomem *guest_heap;
+ u8 __iomem *vbva_buffers;
+ struct gen_pool *guest_pool;
+ struct vbva_buf_ctx *vbva_info;
+ bool any_pitch;
+ u32 num_crtcs;
+ /** Amount of available VRAM, including space used for buffers. */
+ u32 full_vram_size;
+ /** Amount of available VRAM, not including space used for buffers. */
+ u32 available_vram_size;
+ /** Array of structures for receiving mode hints. */
+ struct vbva_modehint *last_mode_hints;
+
+ struct vbox_fbdev *fbdev;
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ } ttm;
+
+ struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
+ /**
+ * We decide whether or not user-space supports display hot-plug
+ * depending on whether they react to a hot-plug event after the initial
+ * mode query.
+ */
+ bool initial_mode_queried;
+ struct work_struct hotplug_work;
+ u32 input_mapping_width;
+ u32 input_mapping_height;
+ /**
+ * Is user-space using an X.Org-style layout of one large frame-buffer
+ * encompassing all screen ones or is the fbdev console active?
+ */
+ bool single_framebuffer;
+ u32 cursor_width;
+ u32 cursor_height;
+ u32 cursor_hot_x;
+ u32 cursor_hot_y;
+ size_t cursor_data_size;
+ u8 cursor_data[CURSOR_DATA_SIZE];
+};
+
+#undef CURSOR_PIXEL_COUNT
+#undef CURSOR_DATA_SIZE
+
+int vbox_driver_load(struct drm_device *dev, unsigned long flags);
+void vbox_driver_unload(struct drm_device *dev);
+void vbox_driver_lastclose(struct drm_device *dev);
+
+struct vbox_gem_object;
+
+struct vbox_connector {
+ struct drm_connector base;
+ char name[32];
+ struct vbox_crtc *vbox_crtc;
+ struct {
+ u16 width;
+ u16 height;
+ bool disconnected;
+ } mode_hint;
+};
+
+struct vbox_crtc {
+ struct drm_crtc base;
+ bool blanked;
+ bool disconnected;
+ unsigned int crtc_id;
+ u32 fb_offset;
+ bool cursor_enabled;
+ u16 x_hint;
+ u16 y_hint;
+};
+
+struct vbox_encoder {
+ struct drm_encoder base;
+};
+
+struct vbox_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct vbox_fbdev {
+ struct drm_fb_helper helper;
+ struct vbox_framebuffer afb;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
+};
+
+#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
+#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
+#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
+#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
+
+int vbox_mode_init(struct drm_device *dev);
+void vbox_mode_fini(struct drm_device *dev);
+
+#define DRM_MODE_FB_CMD drm_mode_fb_cmd2
+#define CRTC_FB(crtc) ((crtc)->primary->fb)
+
+void vbox_enable_accel(struct vbox_private *vbox);
+void vbox_disable_accel(struct vbox_private *vbox);
+void vbox_report_caps(struct vbox_private *vbox);
+
+void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects);
+
+int vbox_framebuffer_init(struct drm_device *dev,
+ struct vbox_framebuffer *vbox_fb,
+ const struct DRM_MODE_FB_CMD *mode_cmd,
+ struct drm_gem_object *obj);
+
+int vbox_fbdev_init(struct drm_device *dev);
+void vbox_fbdev_fini(struct drm_device *dev);
+void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr);
+
+struct vbox_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ struct ttm_place placements[3];
+ int pin_count;
+};
+
+#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
+
+static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vbox_bo, bo);
+}
+
+#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
+
+int vbox_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+void vbox_gem_free_object(struct drm_gem_object *obj);
+int vbox_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle, u64 *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
+
+int vbox_mm_init(struct vbox_private *vbox);
+void vbox_mm_fini(struct vbox_private *vbox);
+
+int vbox_bo_create(struct drm_device *dev, int size, int align,
+ u32 flags, struct vbox_bo **pvboxbo);
+
+int vbox_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel, struct drm_gem_object **obj);
+
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int vbox_bo_unpin(struct vbox_bo *bo);
+
+static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
+ if (ret) {
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+static inline void vbox_bo_unreserve(struct vbox_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+void vbox_ttm_placement(struct vbox_bo *bo, int domain);
+int vbox_bo_push_sysram(struct vbox_bo *bo);
+int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* vbox_prime.c */
+int vbox_gem_prime_pin(struct drm_gem_object *obj);
+void vbox_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *vbox_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table);
+void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
+void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int vbox_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *area);
+
+/* vbox_irq.c */
+int vbox_irq_init(struct vbox_private *vbox);
+void vbox_irq_fini(struct vbox_private *vbox);
+void vbox_report_hotplug(struct vbox_private *vbox);
+irqreturn_t vbox_irq_handler(int irq, void *arg);
+
+/* vbox_hgsmi.c */
+void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
+ u8 channel, u16 channel_info);
+void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf);
+int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf);
+
+static inline void vbox_write_ioport(u16 index, u16 data)
+{
+ outw(index, VBE_DISPI_IOPORT_INDEX);
+ outw(data, VBE_DISPI_IOPORT_DATA);
+}
+
+#endif
diff --git a/drivers/staging/vboxvideo/vbox_err.h b/drivers/staging/vboxvideo/vbox_err.h
new file mode 100644
index 000000000000..562db8630eb0
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_err.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __VBOX_ERR_H__
+#define __VBOX_ERR_H__
+
+/**
+ * @name VirtualBox virtual-hardware error macros
+ * @{
+ */
+
+#define VINF_SUCCESS 0
+#define VERR_INVALID_PARAMETER (-2)
+#define VERR_INVALID_POINTER (-6)
+#define VERR_NO_MEMORY (-8)
+#define VERR_NOT_IMPLEMENTED (-12)
+#define VERR_INVALID_FUNCTION (-36)
+#define VERR_NOT_SUPPORTED (-37)
+#define VERR_TOO_MUCH_DATA (-42)
+#define VERR_INVALID_STATE (-79)
+#define VERR_OUT_OF_RESOURCES (-80)
+#define VERR_ALREADY_EXISTS (-105)
+#define VERR_INTERNAL_ERROR (-225)
+
+#define RT_SUCCESS_NP(rc) ((int)(rc) >= VINF_SUCCESS)
+#define RT_SUCCESS(rc) (likely(RT_SUCCESS_NP(rc)))
+#define RT_FAILURE(rc) (unlikely(!RT_SUCCESS_NP(rc)))
+
+/** @} */
+
+#endif
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c
new file mode 100644
index 000000000000..35f6d9f8c203
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_fb.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_fb.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+
+#define VBOX_DIRTY_DELAY (HZ / 30)
+/**
+ * Tell the host about dirty rectangles to update.
+ */
+static void vbox_dirty_update(struct vbox_fbdev *fbdev,
+ int x, int y, int width, int height)
+{
+ struct drm_gem_object *obj;
+ struct vbox_bo *bo;
+ int ret = -EBUSY;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
+ struct drm_clip_rect rect;
+
+ obj = fbdev->afb.obj;
+ bo = gem_to_vbox_bo(obj);
+
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
+ if (drm_can_sleep())
+ ret = vbox_bo_reserve(bo, true);
+ if (ret) {
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&fbdev->dirty_lock, flags);
+
+ if (fbdev->y1 < y)
+ y = fbdev->y1;
+ if (fbdev->y2 > y2)
+ y2 = fbdev->y2;
+ if (fbdev->x1 < x)
+ x = fbdev->x1;
+ if (fbdev->x2 > x2)
+ x2 = fbdev->x2;
+
+ if (store_for_later) {
+ fbdev->x1 = x;
+ fbdev->x2 = x2;
+ fbdev->y1 = y;
+ fbdev->y2 = y2;
+ spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
+ return;
+ }
+
+ fbdev->x1 = INT_MAX;
+ fbdev->y1 = INT_MAX;
+ fbdev->x2 = 0;
+ fbdev->y2 = 0;
+
+ spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
+
+ /*
+ * Not sure why the original code subtracted 1 here, but I will keep
+ * it that way to avoid unnecessary differences.
+ */
+ rect.x1 = x;
+ rect.x2 = x2 + 1;
+ rect.y1 = y;
+ rect.y2 = y2 + 1;
+ vbox_framebuffer_dirty_rectangles(&fbdev->afb.base, &rect, 1);
+
+ vbox_bo_unreserve(bo);
+}
+
+#ifdef CONFIG_FB_DEFERRED_IO
+static void vbox_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+ struct vbox_fbdev *fbdev = info->par;
+ unsigned long start, end, min, max;
+ struct page *page;
+ int y1, y2;
+
+ min = ULONG_MAX;
+ max = 0;
+ list_for_each_entry(page, pagelist, lru) {
+ start = page->index << PAGE_SHIFT;
+ end = start + PAGE_SIZE - 1;
+ min = min(min, start);
+ max = max(max, end);
+ }
+
+ if (min < max) {
+ y1 = min / info->fix.line_length;
+ y2 = (max / info->fix.line_length) + 1;
+ DRM_INFO("%s: Calling dirty update: 0, %d, %d, %d\n",
+ __func__, y1, info->var.xres, y2 - y1 - 1);
+ vbox_dirty_update(fbdev, 0, y1, info->var.xres, y2 - y1 - 1);
+ }
+}
+
+static struct fb_deferred_io vbox_defio = {
+ .delay = VBOX_DIRTY_DELAY,
+ .deferred_io = vbox_deferred_io,
+};
+#endif
+
+static void vbox_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct vbox_fbdev *fbdev = info->par;
+
+ sys_fillrect(info, rect);
+ vbox_dirty_update(fbdev, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void vbox_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct vbox_fbdev *fbdev = info->par;
+
+ sys_copyarea(info, area);
+ vbox_dirty_update(fbdev, area->dx, area->dy, area->width, area->height);
+}
+
+static void vbox_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct vbox_fbdev *fbdev = info->par;
+
+ sys_imageblit(info, image);
+ vbox_dirty_update(fbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+static struct fb_ops vboxfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = vbox_fillrect,
+ .fb_copyarea = vbox_copyarea,
+ .fb_imageblit = vbox_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int vboxfb_create_object(struct vbox_fbdev *fbdev,
+ struct DRM_MODE_FB_CMD *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = fbdev->helper.dev;
+ u32 size;
+ struct drm_gem_object *gobj;
+ u32 pitch = mode_cmd->pitches[0];
+ int ret;
+
+ size = pitch * mode_cmd->height;
+ ret = vbox_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+
+ return 0;
+}
+
+static int vboxfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct vbox_fbdev *fbdev =
+ container_of(helper, struct vbox_fbdev, helper);
+ struct drm_device *dev = fbdev->helper.dev;
+ struct DRM_MODE_FB_CMD mode_cmd;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ struct device *device = &dev->pdev->dev;
+ struct drm_gem_object *gobj;
+ struct vbox_bo *bo;
+ int size, ret;
+ u32 pitch;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ mode_cmd.pitches[0] = pitch;
+
+ size = pitch * mode_cmd.height;
+
+ ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ bo = gem_to_vbox_bo(gobj);
+
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+ if (ret) {
+ vbox_bo_unreserve(bo);
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ vbox_bo_unreserve(bo);
+ if (ret) {
+ DRM_ERROR("failed to kmap fbcon\n");
+ return ret;
+ }
+
+ info = framebuffer_alloc(0, device);
+ if (!info)
+ return -ENOMEM;
+ info->par = fbdev;
+
+ fbdev->size = size;
+
+ fb = &fbdev->afb.base;
+ fbdev->helper.fb = fb;
+ fbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "vboxdrmfb");
+
+ /*
+ * The last flag forces a mode set on VT switches even if the kernel
+ * does not think it is needed.
+ */
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT |
+ FBINFO_MISC_ALWAYS_SETPAR;
+ info->fbops = &vboxfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret)
+ return -ENOMEM;
+
+ /*
+ * This seems to be done for safety checking that the framebuffer
+ * is not registered twice by different drivers.
+ */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures)
+ return -ENOMEM;
+ info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = bo->kmap.virtual;
+ info->screen_size = size;
+
+#ifdef CONFIG_FB_DEFERRED_IO
+ info->fbdefio = &vbox_defio;
+ fb_deferred_io_init(info);
+#endif
+
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
+
+ return 0;
+}
+
+static void vbox_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+}
+
+static void vbox_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ *red = regno;
+ *green = regno;
+ *blue = regno;
+}
+
+static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
+ .gamma_set = vbox_fb_gamma_set,
+ .gamma_get = vbox_fb_gamma_get,
+ .fb_probe = vboxfb_create,
+};
+
+void vbox_fbdev_fini(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct vbox_fbdev *fbdev = vbox->fbdev;
+ struct vbox_framebuffer *afb = &fbdev->afb;
+
+ drm_fb_helper_unregister_fbi(&fbdev->helper);
+
+ if (afb->obj) {
+ struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
+
+ if (!vbox_bo_reserve(bo, false)) {
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+ /*
+ * QXL does this, but is it really needed before
+ * freeing?
+ */
+ if (bo->pin_count)
+ vbox_bo_unpin(bo);
+ vbox_bo_unreserve(bo);
+ }
+ drm_gem_object_unreference_unlocked(afb->obj);
+ afb->obj = NULL;
+ }
+ drm_fb_helper_fini(&fbdev->helper);
+
+ drm_framebuffer_unregister_private(&afb->base);
+ drm_framebuffer_cleanup(&afb->base);
+}
+
+int vbox_fbdev_init(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct vbox_fbdev *fbdev;
+ int ret;
+
+ fbdev = devm_kzalloc(dev->dev, sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ return -ENOMEM;
+
+ vbox->fbdev = fbdev;
+ spin_lock_init(&fbdev->dirty_lock);
+
+ drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
+ ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs);
+ if (ret)
+ return ret;
+
+ ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper);
+ if (ret)
+ goto err_fini;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(&fbdev->helper, 32);
+ if (ret)
+ goto err_fini;
+
+ return 0;
+
+err_fini:
+ drm_fb_helper_fini(&fbdev->helper);
+ return ret;
+}
+
+void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr)
+{
+ struct fb_info *fbdev = vbox->fbdev->helper.fbdev;
+
+ fbdev->fix.smem_start = fbdev->apertures->ranges[0].base + gpu_addr;
+ fbdev->fix.smem_len = vbox->available_vram_size - gpu_addr;
+}
diff --git a/drivers/staging/vboxvideo/vbox_hgsmi.c b/drivers/staging/vboxvideo/vbox_hgsmi.c
new file mode 100644
index 000000000000..822fd31121cb
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_hgsmi.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include "vbox_drv.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_defs.h"
+
+/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */
+static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size)
+{
+ while (size--) {
+ hash += *data++;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ return hash;
+}
+
+static u32 hgsmi_hash_end(u32 hash)
+{
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash;
+}
+
+/* Not really a checksum but that is the naming used in all vbox code */
+static u32 hgsmi_checksum(u32 offset,
+ const struct hgsmi_buffer_header *header,
+ const struct hgsmi_buffer_tail *tail)
+{
+ u32 checksum;
+
+ checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset));
+ checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header));
+ /* 4 -> Do not checksum the checksum itself */
+ checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4);
+
+ return hgsmi_hash_end(checksum);
+}
+
+void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
+ u8 channel, u16 channel_info)
+{
+ struct hgsmi_buffer_header *h;
+ struct hgsmi_buffer_tail *t;
+ size_t total_size;
+ dma_addr_t offset;
+
+ total_size = size + sizeof(*h) + sizeof(*t);
+ h = gen_pool_dma_alloc(guest_pool, total_size, &offset);
+ if (!h)
+ return NULL;
+
+ t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size);
+
+ h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
+ h->data_size = size;
+ h->channel = channel;
+ h->channel_info = channel_info;
+ memset(&h->u.header_data, 0, sizeof(h->u.header_data));
+
+ t->reserved = 0;
+ t->checksum = hgsmi_checksum(offset, h, t);
+
+ return (u8 *)h + sizeof(*h);
+}
+
+void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf)
+{
+ struct hgsmi_buffer_header *h =
+ (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h));
+ size_t total_size = h->data_size + sizeof(*h) +
+ sizeof(struct hgsmi_buffer_tail);
+
+ gen_pool_free(guest_pool, (unsigned long)h, total_size);
+}
+
+int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf)
+{
+ phys_addr_t offset;
+
+ offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf -
+ sizeof(struct hgsmi_buffer_header));
+ outl(offset, VGA_PORT_HGSMI_GUEST);
+ /* Make the compiler aware that the host has changed memory. */
+ mb();
+
+ return 0;
+}
diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c
new file mode 100644
index 000000000000..3ca8bec62ac4
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_irq.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2016-2017 Oracle Corporation
+ * This file is based on qxl_irq.c
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+
+static void vbox_clear_irq(void)
+{
+ outl((u32)~0, VGA_PORT_HGSMI_HOST);
+}
+
+static u32 vbox_get_flags(struct vbox_private *vbox)
+{
+ return readl(vbox->guest_heap + HOST_FLAGS_OFFSET);
+}
+
+void vbox_report_hotplug(struct vbox_private *vbox)
+{
+ schedule_work(&vbox->hotplug_work);
+}
+
+irqreturn_t vbox_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
+ u32 host_flags = vbox_get_flags(vbox);
+
+ if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
+ return IRQ_NONE;
+
+ /*
+ * Due to a bug in the initial host implementation of hot-plug irqs,
+ * the hot-plug and cursor capability flags were never cleared.
+ * Fortunately we can tell when they would have been set by checking
+ * that the VSYNC flag is not set.
+ */
+ if (host_flags &
+ (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) &&
+ !(host_flags & HGSMIHOSTFLAGS_VSYNC))
+ vbox_report_hotplug(vbox);
+
+ vbox_clear_irq();
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Check that the position hints provided by the host are suitable for GNOME
+ * shell (i.e. all screens disjoint and hints for all enabled screens) and if
+ * not replace them with default ones. Providing valid hints improves the
+ * chances that we will get a known screen layout for pointer mapping.
+ */
+static void validate_or_set_position_hints(struct vbox_private *vbox)
+{
+ struct vbva_modehint *hintsi, *hintsj;
+ bool valid = true;
+ u16 currentx = 0;
+ int i, j;
+
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ for (j = 0; j < i; ++j) {
+ hintsi = &vbox->last_mode_hints[i];
+ hintsj = &vbox->last_mode_hints[j];
+
+ if (hintsi->enabled && hintsj->enabled) {
+ if (hintsi->dx >= 0xffff ||
+ hintsi->dy >= 0xffff ||
+ hintsj->dx >= 0xffff ||
+ hintsj->dy >= 0xffff ||
+ (hintsi->dx <
+ hintsj->dx + (hintsj->cx & 0x8fff) &&
+ hintsi->dx + (hintsi->cx & 0x8fff) >
+ hintsj->dx) ||
+ (hintsi->dy <
+ hintsj->dy + (hintsj->cy & 0x8fff) &&
+ hintsi->dy + (hintsi->cy & 0x8fff) >
+ hintsj->dy))
+ valid = false;
+ }
+ }
+ }
+ if (!valid)
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ if (vbox->last_mode_hints[i].enabled) {
+ vbox->last_mode_hints[i].dx = currentx;
+ vbox->last_mode_hints[i].dy = 0;
+ currentx +=
+ vbox->last_mode_hints[i].cx & 0x8fff;
+ }
+ }
+}
+
+/**
+ * Query the host for the most recent video mode hints.
+ */
+static void vbox_update_mode_hints(struct vbox_private *vbox)
+{
+ struct drm_device *dev = vbox->dev;
+ struct drm_connector *connector;
+ struct vbox_connector *vbox_conn;
+ struct vbva_modehint *hints;
+ u16 flags;
+ bool disconnected;
+ unsigned int crtc_id;
+ int ret;
+
+ ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs,
+ vbox->last_mode_hints);
+ if (ret) {
+ DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret);
+ return;
+ }
+
+ validate_or_set_position_hints(vbox);
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ vbox_conn = to_vbox_connector(connector);
+
+ hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id];
+ if (hints->magic != VBVAMODEHINT_MAGIC)
+ continue;
+
+ disconnected = !(hints->enabled);
+ crtc_id = vbox_conn->vbox_crtc->crtc_id;
+ vbox_conn->mode_hint.width = hints->cx & 0x8fff;
+ vbox_conn->mode_hint.height = hints->cy & 0x8fff;
+ vbox_conn->vbox_crtc->x_hint = hints->dx;
+ vbox_conn->vbox_crtc->y_hint = hints->dy;
+ vbox_conn->mode_hint.disconnected = disconnected;
+
+ if (vbox_conn->vbox_crtc->disconnected == disconnected)
+ continue;
+
+ if (disconnected)
+ flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
+ else
+ flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK;
+
+ hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0,
+ hints->cx * 4, hints->cx,
+ hints->cy, 0, flags);
+
+ vbox_conn->vbox_crtc->disconnected = disconnected;
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static void vbox_hotplug_worker(struct work_struct *work)
+{
+ struct vbox_private *vbox = container_of(work, struct vbox_private,
+ hotplug_work);
+
+ vbox_update_mode_hints(vbox);
+ drm_kms_helper_hotplug_event(vbox->dev);
+}
+
+int vbox_irq_init(struct vbox_private *vbox)
+{
+ INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
+ vbox_update_mode_hints(vbox);
+
+ return drm_irq_install(vbox->dev, vbox->dev->pdev->irq);
+}
+
+void vbox_irq_fini(struct vbox_private *vbox)
+{
+ drm_irq_uninstall(vbox->dev);
+ flush_work(&vbox->hotplug_work);
+}
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c
new file mode 100644
index 000000000000..d0c6ec75a3c7
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_main.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_main.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>,
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+
+static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
+
+ if (vbox_fb->obj)
+ drm_gem_object_unreference_unlocked(vbox_fb->obj);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+void vbox_enable_accel(struct vbox_private *vbox)
+{
+ unsigned int i;
+ struct vbva_buffer *vbva;
+
+ if (!vbox->vbva_info || !vbox->vbva_buffers) {
+ /* Should never happen... */
+ DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
+ return;
+ }
+
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ if (vbox->vbva_info[i].vbva)
+ continue;
+
+ vbva = (void *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
+ if (!vbva_enable(&vbox->vbva_info[i],
+ vbox->guest_pool, vbva, i)) {
+ /* very old host or driver error. */
+ DRM_ERROR("vboxvideo: vbva_enable failed\n");
+ return;
+ }
+ }
+}
+
+void vbox_disable_accel(struct vbox_private *vbox)
+{
+ unsigned int i;
+
+ for (i = 0; i < vbox->num_crtcs; ++i)
+ vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
+}
+
+void vbox_report_caps(struct vbox_private *vbox)
+{
+ u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
+ VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
+
+ if (vbox->initial_mode_queried)
+ caps |= VBVACAPS_VIDEO_MODE_HINTS;
+
+ hgsmi_send_caps_info(vbox->guest_pool, caps);
+}
+
+/**
+ * Send information about dirty rectangles to VBVA. If necessary we enable
+ * VBVA first, as this is normally disabled after a change of master in case
+ * the new master does not send dirty rectangle information (is this even
+ * allowed?)
+ */
+void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects)
+{
+ struct vbox_private *vbox = fb->dev->dev_private;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ mutex_lock(&vbox->hw_mutex);
+ list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
+ if (CRTC_FB(crtc) != fb)
+ continue;
+
+ vbox_enable_accel(vbox);
+
+ for (i = 0; i < num_rects; ++i) {
+ struct vbva_cmd_hdr cmd_hdr;
+ unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
+
+ if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
+ (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
+ (rects[i].x2 < crtc->x) ||
+ (rects[i].y2 < crtc->y))
+ continue;
+
+ cmd_hdr.x = (s16)rects[i].x1;
+ cmd_hdr.y = (s16)rects[i].y1;
+ cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
+ cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
+
+ if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
+ vbox->guest_pool))
+ continue;
+
+ vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
+ &cmd_hdr, sizeof(cmd_hdr));
+ vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
+ }
+ }
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects)
+{
+ vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs vbox_fb_funcs = {
+ .destroy = vbox_user_framebuffer_destroy,
+ .dirty = vbox_user_framebuffer_dirty,
+};
+
+int vbox_framebuffer_init(struct drm_device *dev,
+ struct vbox_framebuffer *vbox_fb,
+ const struct DRM_MODE_FB_CMD *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
+ vbox_fb->obj = obj;
+ ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct drm_framebuffer *vbox_user_framebuffer_create(
+ struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct vbox_framebuffer *vbox_fb;
+ int ret = -ENOMEM;
+
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
+ if (!vbox_fb)
+ goto err_unref_obj;
+
+ ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
+ if (ret)
+ goto err_free_vbox_fb;
+
+ return &vbox_fb->base;
+
+err_free_vbox_fb:
+ kfree(vbox_fb);
+err_unref_obj:
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(ret);
+}
+
+static const struct drm_mode_config_funcs vbox_mode_funcs = {
+ .fb_create = vbox_user_framebuffer_create,
+};
+
+static int vbox_accel_init(struct vbox_private *vbox)
+{
+ unsigned int i;
+
+ vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
+ sizeof(*vbox->vbva_info), GFP_KERNEL);
+ if (!vbox->vbva_info)
+ return -ENOMEM;
+
+ /* Take a command buffer for each screen from the end of usable VRAM. */
+ vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
+
+ vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
+ vbox->available_vram_size,
+ vbox->num_crtcs *
+ VBVA_MIN_BUFFER_SIZE);
+ if (!vbox->vbva_buffers)
+ return -ENOMEM;
+
+ for (i = 0; i < vbox->num_crtcs; ++i)
+ vbva_setup_buffer_context(&vbox->vbva_info[i],
+ vbox->available_vram_size +
+ i * VBVA_MIN_BUFFER_SIZE,
+ VBVA_MIN_BUFFER_SIZE);
+
+ return 0;
+}
+
+static void vbox_accel_fini(struct vbox_private *vbox)
+{
+ vbox_disable_accel(vbox);
+ pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
+}
+
+/** Do we support the 4.3 plus mode hint reporting interface? */
+static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
+{
+ u32 have_hints, have_cursor;
+ int ret;
+
+ ret = hgsmi_query_conf(vbox->guest_pool,
+ VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
+ &have_hints);
+ if (ret)
+ return false;
+
+ ret = hgsmi_query_conf(vbox->guest_pool,
+ VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
+ &have_cursor);
+ if (ret)
+ return false;
+
+ return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
+}
+
+static bool vbox_check_supported(u16 id)
+{
+ u16 dispi_id;
+
+ vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
+ dispi_id = inw(VBE_DISPI_IOPORT_DATA);
+
+ return dispi_id == id;
+}
+
+/**
+ * Set up our heaps and data exchange buffers in VRAM before handing the rest
+ * to the memory manager.
+ */
+static int vbox_hw_init(struct vbox_private *vbox)
+{
+ int ret = -ENOMEM;
+
+ vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
+ vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
+
+ DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
+
+ /* Map guest-heap at end of vram */
+ vbox->guest_heap =
+ pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
+ GUEST_HEAP_SIZE);
+ if (!vbox->guest_heap)
+ return -ENOMEM;
+
+ /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
+ vbox->guest_pool = gen_pool_create(4, -1);
+ if (!vbox->guest_pool)
+ goto err_unmap_guest_heap;
+
+ ret = gen_pool_add_virt(vbox->guest_pool,
+ (unsigned long)vbox->guest_heap,
+ GUEST_HEAP_OFFSET(vbox),
+ GUEST_HEAP_USABLE_SIZE, -1);
+ if (ret)
+ goto err_destroy_guest_pool;
+
+ ret = hgsmi_test_query_conf(vbox->guest_pool);
+ if (ret) {
+ DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
+ goto err_destroy_guest_pool;
+ }
+
+ /* Reduce available VRAM size to reflect the guest heap. */
+ vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
+ /* Linux drm represents monitors as a 32-bit array. */
+ hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
+ &vbox->num_crtcs);
+ vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
+
+ if (!have_hgsmi_mode_hints(vbox)) {
+ ret = -ENOTSUPP;
+ goto err_destroy_guest_pool;
+ }
+
+ vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
+ sizeof(struct vbva_modehint),
+ GFP_KERNEL);
+ if (!vbox->last_mode_hints) {
+ ret = -ENOMEM;
+ goto err_destroy_guest_pool;
+ }
+
+ ret = vbox_accel_init(vbox);
+ if (ret)
+ goto err_destroy_guest_pool;
+
+ return 0;
+
+err_destroy_guest_pool:
+ gen_pool_destroy(vbox->guest_pool);
+err_unmap_guest_heap:
+ pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
+ return ret;
+}
+
+static void vbox_hw_fini(struct vbox_private *vbox)
+{
+ vbox_accel_fini(vbox);
+ gen_pool_destroy(vbox->guest_pool);
+ pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
+}
+
+int vbox_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct vbox_private *vbox;
+ int ret = 0;
+
+ if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
+ return -ENODEV;
+
+ vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
+ if (!vbox)
+ return -ENOMEM;
+
+ dev->dev_private = vbox;
+ vbox->dev = dev;
+
+ mutex_init(&vbox->hw_mutex);
+
+ ret = vbox_hw_init(vbox);
+ if (ret)
+ return ret;
+
+ ret = vbox_mm_init(vbox);
+ if (ret)
+ goto err_hw_fini;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.funcs = (void *)&vbox_mode_funcs;
+ dev->mode_config.min_width = 64;
+ dev->mode_config.min_height = 64;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
+ dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
+
+ ret = vbox_mode_init(dev);
+ if (ret)
+ goto err_drm_mode_cleanup;
+
+ ret = vbox_irq_init(vbox);
+ if (ret)
+ goto err_mode_fini;
+
+ ret = vbox_fbdev_init(dev);
+ if (ret)
+ goto err_irq_fini;
+
+ return 0;
+
+err_irq_fini:
+ vbox_irq_fini(vbox);
+err_mode_fini:
+ vbox_mode_fini(dev);
+err_drm_mode_cleanup:
+ drm_mode_config_cleanup(dev);
+ vbox_mm_fini(vbox);
+err_hw_fini:
+ vbox_hw_fini(vbox);
+ return ret;
+}
+
+void vbox_driver_unload(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ vbox_fbdev_fini(dev);
+ vbox_irq_fini(vbox);
+ vbox_mode_fini(dev);
+ drm_mode_config_cleanup(dev);
+ vbox_mm_fini(vbox);
+ vbox_hw_fini(vbox);
+}
+
+/**
+ * @note this is described in the DRM framework documentation. AST does not
+ * have it, but we get an oops on driver unload if it is not present.
+ */
+void vbox_driver_lastclose(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ if (vbox->fbdev)
+ drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
+}
+
+int vbox_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel, struct drm_gem_object **obj)
+{
+ struct vbox_bo *vboxbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+
+ *obj = &vboxbo->gem;
+
+ return 0;
+}
+
+int vbox_dumb_create(struct drm_file *file,
+ struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = vbox_gem_create(dev, args->size, false, &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+
+ return 0;
+}
+
+static void vbox_bo_unref(struct vbox_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (!tbo)
+ *bo = NULL;
+}
+
+void vbox_gem_free_object(struct drm_gem_object *obj)
+{
+ struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
+
+ vbox_bo_unref(&vbox_bo);
+}
+
+static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+int
+vbox_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct vbox_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_vbox_bo(obj);
+ *offset = vbox_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
new file mode 100644
index 000000000000..f2b85f3256fa
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -0,0 +1,877 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_mode.c
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include <linux/export.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+#include "hgsmi_channels.h"
+
+static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
+ u32 handle, u32 width, u32 height,
+ s32 hot_x, s32 hot_y);
+static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y);
+
+/**
+ * Set a graphics mode. Poke any required values into registers, do an HGSMI
+ * mode set and tell the host we support advanced graphics functions.
+ */
+static void vbox_do_modeset(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox;
+ int width, height, bpp, pitch;
+ unsigned int crtc_id;
+ u16 flags;
+ s32 x_offset, y_offset;
+
+ vbox = crtc->dev->dev_private;
+ width = mode->hdisplay ? mode->hdisplay : 640;
+ height = mode->vdisplay ? mode->vdisplay : 480;
+ crtc_id = vbox_crtc->crtc_id;
+ bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32;
+ pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8;
+ x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint;
+ y_offset = vbox->single_framebuffer ? crtc->y : vbox_crtc->y_hint;
+
+ /*
+ * This is the old way of setting graphics modes. It assumed one screen
+ * and a frame-buffer at the start of video RAM. On older versions of
+ * VirtualBox, certain parts of the code still assume that the first
+ * screen is programmed this way, so try to fake it.
+ */
+ if (vbox_crtc->crtc_id == 0 && crtc->enabled &&
+ vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
+ vbox_crtc->fb_offset % (bpp / 8) == 0) {
+ vbox_write_ioport(VBE_DISPI_INDEX_XRES, width);
+ vbox_write_ioport(VBE_DISPI_INDEX_YRES, height);
+ vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
+ vbox_write_ioport(VBE_DISPI_INDEX_BPP,
+ CRTC_FB(crtc)->format->cpp[0] * 8);
+ vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
+ vbox_write_ioport(
+ VBE_DISPI_INDEX_X_OFFSET,
+ vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x);
+ vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
+ vbox_crtc->fb_offset / pitch + crtc->y);
+ }
+
+ flags = VBVA_SCREEN_F_ACTIVE;
+ flags |= (crtc->enabled && !vbox_crtc->blanked) ?
+ 0 : VBVA_SCREEN_F_BLANK;
+ flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
+ hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id,
+ x_offset, y_offset,
+ crtc->x * bpp / 8 + crtc->y * pitch,
+ pitch, width, height,
+ vbox_crtc->blanked ? 0 : bpp, flags);
+}
+
+static int vbox_set_view(struct drm_crtc *crtc)
+{
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbva_infoview *p;
+
+ /*
+ * Tell the host about the view. This design originally targeted the
+ * Windows XP driver architecture and assumed that each screen would
+ * have a dedicated frame buffer with the command buffer following it,
+ * the whole being a "view". The host works out which screen a command
+ * buffer belongs to by checking whether it is in the first view, then
+ * whether it is in the second and so on. The first match wins. We
+ * cheat around this by making the first view be the managed memory
+ * plus the first command buffer, the second the same plus the second
+ * buffer and so on.
+ */
+ p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p),
+ HGSMI_CH_VBVA, VBVA_INFO_VIEW);
+ if (!p)
+ return -ENOMEM;
+
+ p->view_index = vbox_crtc->crtc_id;
+ p->view_offset = vbox_crtc->fb_offset;
+ p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset +
+ vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
+ p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset;
+
+ hgsmi_buffer_submit(vbox->guest_pool, p);
+ hgsmi_buffer_free(vbox->guest_pool, p);
+
+ return 0;
+}
+
+static void vbox_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox = crtc->dev->dev_private;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ vbox_crtc->blanked = false;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ vbox_crtc->blanked = true;
+ break;
+ }
+
+ mutex_lock(&vbox->hw_mutex);
+ vbox_do_modeset(crtc, &crtc->hwmode);
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/*
+ * Try to map the layout of virtual screens to the range of the input device.
+ * Return true if we need to re-set the crtc modes due to screen offset
+ * changes.
+ */
+static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
+{
+ struct drm_crtc *crtci;
+ struct drm_connector *connectori;
+ struct drm_framebuffer *fb1 = NULL;
+ bool single_framebuffer = true;
+ bool old_single_framebuffer = vbox->single_framebuffer;
+ u16 width = 0, height = 0;
+
+ /*
+ * Are we using an X.Org-style single large frame-buffer for all crtcs?
+ * If so then screen layout can be deduced from the crtc offsets.
+ * Same fall-back if this is the fbdev frame-buffer.
+ */
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
+ if (!fb1) {
+ fb1 = CRTC_FB(crtci);
+ if (to_vbox_framebuffer(fb1) == &vbox->fbdev->afb)
+ break;
+ } else if (CRTC_FB(crtci) && fb1 != CRTC_FB(crtci)) {
+ single_framebuffer = false;
+ }
+ }
+ if (single_framebuffer) {
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+ head) {
+ if (to_vbox_crtc(crtci)->crtc_id != 0)
+ continue;
+
+ vbox->single_framebuffer = true;
+ vbox->input_mapping_width = CRTC_FB(crtci)->width;
+ vbox->input_mapping_height = CRTC_FB(crtci)->height;
+ return old_single_framebuffer !=
+ vbox->single_framebuffer;
+ }
+ }
+ /* Otherwise calculate the total span of all screens. */
+ list_for_each_entry(connectori, &vbox->dev->mode_config.connector_list,
+ head) {
+ struct vbox_connector *vbox_connector =
+ to_vbox_connector(connectori);
+ struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc;
+
+ width = max_t(u16, width, vbox_crtc->x_hint +
+ vbox_connector->mode_hint.width);
+ height = max_t(u16, height, vbox_crtc->y_hint +
+ vbox_connector->mode_hint.height);
+ }
+
+ vbox->single_framebuffer = false;
+ vbox->input_mapping_width = width;
+ vbox->input_mapping_height = height;
+
+ return old_single_framebuffer != vbox->single_framebuffer;
+}
+
+static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *old_fb, int x, int y)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct vbox_framebuffer *vbox_fb;
+ struct vbox_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* Unpin the previous fb. */
+ if (old_fb) {
+ vbox_fb = to_vbox_framebuffer(old_fb);
+ obj = vbox_fb->obj;
+ bo = gem_to_vbox_bo(obj);
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ vbox_bo_unpin(bo);
+ vbox_bo_unreserve(bo);
+ }
+
+ vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc));
+ obj = vbox_fb->obj;
+ bo = gem_to_vbox_bo(obj);
+
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ vbox_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&vbox->fbdev->afb == vbox_fb)
+ vbox_fbdev_set_base(vbox, gpu_addr);
+ vbox_bo_unreserve(bo);
+
+ /* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */
+ vbox_crtc->fb_offset = gpu_addr;
+ if (vbox_set_up_input_mapping(vbox)) {
+ struct drm_crtc *crtci;
+
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+ head) {
+ vbox_set_view(crtc);
+ vbox_do_modeset(crtci, &crtci->mode);
+ }
+ }
+
+ return 0;
+}
+
+static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return vbox_crtc_do_set_base(crtc, old_fb, x, y);
+}
+
+static int vbox_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ int ret;
+
+ vbox_crtc_mode_set_base(crtc, x, y, old_fb);
+
+ mutex_lock(&vbox->hw_mutex);
+ ret = vbox_set_view(crtc);
+ if (!ret)
+ vbox_do_modeset(crtc, mode);
+ hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
+ vbox->input_mapping_width,
+ vbox->input_mapping_height);
+ mutex_unlock(&vbox->hw_mutex);
+
+ return ret;
+}
+
+static void vbox_crtc_disable(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
+ .dpms = vbox_crtc_dpms,
+ .mode_fixup = vbox_crtc_mode_fixup,
+ .mode_set = vbox_crtc_mode_set,
+ /* .mode_set_base = vbox_crtc_mode_set_base, */
+ .disable = vbox_crtc_disable,
+ .load_lut = vbox_crtc_load_lut,
+ .prepare = vbox_crtc_prepare,
+ .commit = vbox_crtc_commit,
+};
+
+static void vbox_crtc_reset(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_funcs vbox_crtc_funcs = {
+ .cursor_move = vbox_cursor_move,
+ .cursor_set2 = vbox_cursor_set2,
+ .reset = vbox_crtc_reset,
+ .set_config = drm_crtc_helper_set_config,
+ /* .gamma_set = vbox_crtc_gamma_set, */
+ .destroy = vbox_crtc_destroy,
+};
+
+static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
+{
+ struct vbox_crtc *vbox_crtc;
+
+ vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
+ if (!vbox_crtc)
+ return NULL;
+
+ vbox_crtc->crtc_id = i;
+
+ drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
+ drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
+
+ return vbox_crtc;
+}
+
+static void vbox_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+
+ /* pick the encoder ids */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+
+ return NULL;
+}
+
+static const struct drm_encoder_funcs vbox_enc_funcs = {
+ .destroy = vbox_encoder_destroy,
+};
+
+static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool vbox_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void vbox_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void vbox_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void vbox_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = {
+ .dpms = vbox_encoder_dpms,
+ .mode_fixup = vbox_mode_fixup,
+ .prepare = vbox_encoder_prepare,
+ .commit = vbox_encoder_commit,
+ .mode_set = vbox_encoder_mode_set,
+};
+
+static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
+ unsigned int i)
+{
+ struct vbox_encoder *vbox_encoder;
+
+ vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL);
+ if (!vbox_encoder)
+ return NULL;
+
+ drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs);
+
+ vbox_encoder->base.possible_crtcs = 1 << i;
+ return &vbox_encoder->base;
+}
+
+/**
+ * Generate EDID data with a mode-unique serial number for the virtual
+ * monitor to try to persuade Unity that different modes correspond to
+ * different monitors and it should not try to force the same resolution on
+ * them.
+ */
+static void vbox_set_edid(struct drm_connector *connector, int width,
+ int height)
+{
+ enum { EDID_SIZE = 128 };
+ unsigned char edid[EDID_SIZE] = {
+ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */
+ 0x58, 0x58, /* manufacturer (VBX) */
+ 0x00, 0x00, /* product code */
+ 0x00, 0x00, 0x00, 0x00, /* serial number goes here */
+ 0x01, /* week of manufacture */
+ 0x00, /* year of manufacture */
+ 0x01, 0x03, /* EDID version */
+ 0x80, /* capabilities - digital */
+ 0x00, /* horiz. res in cm, zero for projectors */
+ 0x00, /* vert. res in cm */
+ 0x78, /* display gamma (120 == 2.2). */
+ 0xEE, /* features (standby, suspend, off, RGB, std */
+ /* colour space, preferred timing mode) */
+ 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
+ /* chromaticity for standard colour space. */
+ 0x00, 0x00, 0x00, /* no default timings */
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, /* no standard timings */
+ 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02,
+ 0x02, 0x02,
+ /* descriptor block 1 goes below */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* descriptor block 2, monitor ranges */
+ 0x00, 0x00, 0x00, 0xFD, 0x00,
+ 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20,
+ /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
+ 0x20,
+ /* descriptor block 3, monitor name */
+ 0x00, 0x00, 0x00, 0xFC, 0x00,
+ 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r',
+ '\n',
+ /* descriptor block 4: dummy data */
+ 0x00, 0x00, 0x00, 0x10, 0x00,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20,
+ 0x00, /* number of extensions */
+ 0x00 /* checksum goes here */
+ };
+ int clock = (width + 6) * (height + 6) * 60 / 10000;
+ unsigned int i, sum = 0;
+
+ edid[12] = width & 0xff;
+ edid[13] = width >> 8;
+ edid[14] = height & 0xff;
+ edid[15] = height >> 8;
+ edid[54] = clock & 0xff;
+ edid[55] = clock >> 8;
+ edid[56] = width & 0xff;
+ edid[58] = (width >> 4) & 0xf0;
+ edid[59] = height & 0xff;
+ edid[61] = (height >> 4) & 0xf0;
+ for (i = 0; i < EDID_SIZE - 1; ++i)
+ sum += edid[i];
+ edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
+ drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
+}
+
+static int vbox_get_modes(struct drm_connector *connector)
+{
+ struct vbox_connector *vbox_connector = NULL;
+ struct drm_display_mode *mode = NULL;
+ struct vbox_private *vbox = NULL;
+ unsigned int num_modes = 0;
+ int preferred_width, preferred_height;
+
+ vbox_connector = to_vbox_connector(connector);
+ vbox = connector->dev->dev_private;
+ /*
+ * Heuristic: we do not want to tell the host that we support dynamic
+ * resizing unless we feel confident that the user space client using
+ * the video driver can handle hot-plug events. So the first time modes
+ * are queried after a "master" switch we tell the host that we do not,
+ * and immediately after we send the client a hot-plug notification as
+ * a test to see if they will respond and query again.
+ * That is also the reason why capabilities are reported to the host at
+ * this place in the code rather than elsewhere.
+ * We need to report the flags location before reporting the IRQ
+ * capability.
+ */
+ hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
+ HOST_FLAGS_OFFSET);
+ if (vbox_connector->vbox_crtc->crtc_id == 0)
+ vbox_report_caps(vbox);
+ if (!vbox->initial_mode_queried) {
+ if (vbox_connector->vbox_crtc->crtc_id == 0) {
+ vbox->initial_mode_queried = true;
+ vbox_report_hotplug(vbox);
+ }
+ return drm_add_modes_noedid(connector, 800, 600);
+ }
+ num_modes = drm_add_modes_noedid(connector, 2560, 1600);
+ preferred_width = vbox_connector->mode_hint.width ?
+ vbox_connector->mode_hint.width : 1024;
+ preferred_height = vbox_connector->mode_hint.height ?
+ vbox_connector->mode_hint.height : 768;
+ mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height,
+ 60, false, false, false);
+ if (mode) {
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ ++num_modes;
+ }
+ vbox_set_edid(connector, preferred_width, preferred_height);
+ drm_object_property_set_value(
+ &connector->base, vbox->dev->mode_config.suggested_x_property,
+ vbox_connector->vbox_crtc->x_hint);
+ drm_object_property_set_value(
+ &connector->base, vbox->dev->mode_config.suggested_y_property,
+ vbox_connector->vbox_crtc->y_hint);
+
+ return num_modes;
+}
+
+static int vbox_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void vbox_connector_destroy(struct drm_connector *connector)
+{
+ struct vbox_connector *vbox_connector;
+
+ vbox_connector = to_vbox_connector(connector);
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static enum drm_connector_status
+vbox_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct vbox_connector *vbox_connector;
+
+ vbox_connector = to_vbox_connector(connector);
+
+ return vbox_connector->mode_hint.disconnected ?
+ connector_status_disconnected : connector_status_connected;
+}
+
+static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
+ u32 max_y)
+{
+ struct vbox_connector *vbox_connector;
+ struct drm_device *dev;
+ struct drm_display_mode *mode, *iterator;
+
+ vbox_connector = to_vbox_connector(connector);
+ dev = vbox_connector->base.dev;
+ list_for_each_entry_safe(mode, iterator, &connector->modes, head) {
+ list_del(&mode->head);
+ drm_mode_destroy(dev, mode);
+ }
+
+ return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
+}
+
+static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
+ .mode_valid = vbox_mode_valid,
+ .get_modes = vbox_get_modes,
+ .best_encoder = vbox_best_single_encoder,
+};
+
+static const struct drm_connector_funcs vbox_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = vbox_connector_detect,
+ .fill_modes = vbox_fill_modes,
+ .destroy = vbox_connector_destroy,
+};
+
+static int vbox_connector_init(struct drm_device *dev,
+ struct vbox_crtc *vbox_crtc,
+ struct drm_encoder *encoder)
+{
+ struct vbox_connector *vbox_connector;
+ struct drm_connector *connector;
+
+ vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
+ if (!vbox_connector)
+ return -ENOMEM;
+
+ connector = &vbox_connector->base;
+ vbox_connector->vbox_crtc = vbox_crtc;
+
+ drm_connector_init(dev, connector, &vbox_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_mode_create_suggested_offset_properties(dev);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, -1);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, -1);
+ drm_connector_register(connector);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+int vbox_mode_init(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct vbox_crtc *vbox_crtc;
+ unsigned int i;
+ int ret;
+
+ /* vbox_cursor_init(dev); */
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ vbox_crtc = vbox_crtc_init(dev, i);
+ if (!vbox_crtc)
+ return -ENOMEM;
+ encoder = vbox_encoder_init(dev, i);
+ if (!encoder)
+ return -ENOMEM;
+ ret = vbox_connector_init(dev, vbox_crtc, encoder);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void vbox_mode_fini(struct drm_device *dev)
+{
+ /* vbox_cursor_fini(dev); */
+}
+
+/**
+ * Copy the ARGB image and generate the mask, which is needed in case the host
+ * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
+ * if the corresponding alpha value in the ARGB image is greater than 0xF0.
+ */
+static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
+ size_t mask_size)
+{
+ size_t line_size = (width + 7) / 8;
+ u32 i, j;
+
+ memcpy(dst + mask_size, src, width * height * 4);
+ for (i = 0; i < height; ++i)
+ for (j = 0; j < width; ++j)
+ if (((u32 *)src)[i * width + j] > 0xf0000000)
+ dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
+}
+
+static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
+ u32 handle, u32 width, u32 height,
+ s32 hot_x, s32 hot_y)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct ttm_bo_kmap_obj uobj_map;
+ size_t data_size, mask_size;
+ struct drm_gem_object *obj;
+ u32 flags, caps = 0;
+ struct vbox_bo *bo;
+ bool src_isiomem;
+ u8 *dst = NULL;
+ u8 *src;
+ int ret;
+
+ /*
+ * Re-set this regularly as in 5.0.20 and earlier the information was
+ * lost on save and restore.
+ */
+ hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
+ vbox->input_mapping_width,
+ vbox->input_mapping_height);
+ if (!handle) {
+ bool cursor_enabled = false;
+ struct drm_crtc *crtci;
+
+ /* Hide cursor. */
+ vbox_crtc->cursor_enabled = false;
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+ head) {
+ if (to_vbox_crtc(crtci)->cursor_enabled)
+ cursor_enabled = true;
+ }
+
+ if (!cursor_enabled)
+ hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
+ 0, 0, NULL, 0);
+ return 0;
+ }
+
+ vbox_crtc->cursor_enabled = true;
+
+ if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
+ width == 0 || height == 0)
+ return -EINVAL;
+
+ ret = hgsmi_query_conf(vbox->guest_pool,
+ VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
+ if (ret)
+ return ret;
+
+ if (!(caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
+ /*
+ * -EINVAL means cursor_set2() not supported, -EAGAIN means
+ * retry at once.
+ */
+ return -EBUSY;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+ return -ENOENT;
+ }
+
+ bo = gem_to_vbox_bo(obj);
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ goto out_unref_obj;
+
+ /*
+ * The mask must be calculated based on the alpha
+ * channel, one bit per ARGB word, and must be 32-bit
+ * padded.
+ */
+ mask_size = ((width + 7) / 8 * height + 3) & ~3;
+ data_size = width * height * 4 + mask_size;
+ vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width);
+ vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height);
+ vbox->cursor_width = width;
+ vbox->cursor_height = height;
+ vbox->cursor_data_size = data_size;
+ dst = vbox->cursor_data;
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
+ if (ret) {
+ vbox->cursor_data_size = 0;
+ goto out_unreserve_bo;
+ }
+
+ src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
+ if (src_isiomem) {
+ DRM_ERROR("src cursor bo not in main memory\n");
+ ret = -EIO;
+ goto out_unmap_bo;
+ }
+
+ copy_cursor_image(src, dst, width, height, mask_size);
+
+ flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
+ VBOX_MOUSE_POINTER_ALPHA;
+ ret = hgsmi_update_pointer_shape(vbox->guest_pool, flags,
+ vbox->cursor_hot_x, vbox->cursor_hot_y,
+ width, height, dst, data_size);
+out_unmap_bo:
+ ttm_bo_kunmap(&uobj_map);
+out_unreserve_bo:
+ vbox_bo_unreserve(bo);
+out_unref_obj:
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ u32 flags = VBOX_MOUSE_POINTER_VISIBLE |
+ VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA;
+ s32 crtc_x =
+ vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint;
+ s32 crtc_y =
+ vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint;
+ u32 host_x, host_y;
+ u32 hot_x = 0;
+ u32 hot_y = 0;
+ int ret;
+
+ /*
+ * We compare these to unsigned later and don't
+ * need to handle negative.
+ */
+ if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0)
+ return 0;
+
+ ret = hgsmi_cursor_position(vbox->guest_pool, true, x + crtc_x,
+ y + crtc_y, &host_x, &host_y);
+
+ /*
+ * The only reason we have vbox_cursor_move() is that some older clients
+ * might use DRM_IOCTL_MODE_CURSOR instead of DRM_IOCTL_MODE_CURSOR2 and
+ * use DRM_MODE_CURSOR_MOVE to set the hot-spot.
+ *
+ * However VirtualBox 5.0.20 and earlier has a bug causing it to return
+ * 0,0 as host cursor location after a save and restore.
+ *
+ * To work around this we ignore a 0, 0 return, since missing the odd
+ * time when it legitimately happens is not going to hurt much.
+ */
+ if (ret || (host_x == 0 && host_y == 0))
+ return ret;
+
+ if (x + crtc_x < host_x)
+ hot_x = min(host_x - x - crtc_x, vbox->cursor_width);
+ if (y + crtc_y < host_y)
+ hot_y = min(host_y - y - crtc_y, vbox->cursor_height);
+
+ if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y)
+ return 0;
+
+ vbox->cursor_hot_x = hot_x;
+ vbox->cursor_hot_y = hot_y;
+
+ return hgsmi_update_pointer_shape(vbox->guest_pool, flags,
+ hot_x, hot_y, vbox->cursor_width, vbox->cursor_height,
+ vbox->cursor_data, vbox->cursor_data_size);
+}
diff --git a/drivers/staging/vboxvideo/vbox_prime.c b/drivers/staging/vboxvideo/vbox_prime.c
new file mode 100644
index 000000000000..b7453e427a1d
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_prime.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ * Copyright 2017 Canonical
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andreas Pokorny
+ */
+
+#include "vbox_drv.h"
+
+/*
+ * Based on qxl_prime.c:
+ * Empty Implementations as there should not be any other driver for a virtual
+ * device that might share buffers with vboxvideo
+ */
+
+int vbox_gem_prime_pin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return -ENOSYS;
+}
+
+void vbox_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+struct drm_gem_object *vbox_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
+{
+ WARN_ONCE(1, "not implemented");
+ return -ENOSYS;
+}
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
new file mode 100644
index 000000000000..34a905d40735
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_ttm.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com>
+ */
+#include "vbox_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct vbox_private, ttm.bdev);
+}
+
+static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+/**
+ * Adds the vbox memory manager object/structures to the global memory manager.
+ */
+static int vbox_ttm_global_init(struct vbox_private *vbox)
+{
+ struct drm_global_reference *global_ref;
+ int ret;
+
+ global_ref = &vbox->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &vbox_ttm_mem_global_init;
+ global_ref->release = &vbox_ttm_mem_global_release;
+ ret = drm_global_item_ref(global_ref);
+ if (ret) {
+ DRM_ERROR("Failed setting up TTM memory subsystem.\n");
+ return ret;
+ }
+
+ vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
+ global_ref = &vbox->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+
+ ret = drm_global_item_ref(global_ref);
+ if (ret) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&vbox->ttm.mem_global_ref);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Removes the vbox memory manager object from the global memory manager.
+ */
+static void vbox_ttm_global_release(struct vbox_private *vbox)
+{
+ drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&vbox->ttm.mem_global_ref);
+}
+
+static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct vbox_bo *bo;
+
+ bo = container_of(tbo, struct vbox_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &vbox_bo_ttm_destroy)
+ return true;
+
+ return false;
+}
+
+static int
+vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct vbox_bo *vboxbo = vbox_bo(bo);
+
+ if (!vbox_ttm_bo_is_vbox_bo(bo))
+ return;
+
+ vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
+ *pl = vboxbo->placement;
+}
+
+static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ return 0;
+}
+
+static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct vbox_private *vbox = vbox_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+static int vbox_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+ return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
+}
+
+static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func vbox_tt_backend_func = {
+ .destroy = &vbox_ttm_backend_destroy,
+};
+
+static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ u32 page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ tt->func = &vbox_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+
+ return tt;
+}
+
+static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver vbox_bo_driver = {
+ .ttm_tt_create = vbox_ttm_tt_create,
+ .ttm_tt_populate = vbox_ttm_tt_populate,
+ .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
+ .init_mem_type = vbox_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = vbox_bo_evict_flags,
+ .move = vbox_bo_move,
+ .verify_access = vbox_bo_verify_access,
+ .io_mem_reserve = &vbox_ttm_io_mem_reserve,
+ .io_mem_free = &vbox_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
+};
+
+int vbox_mm_init(struct vbox_private *vbox)
+{
+ int ret;
+ struct drm_device *dev = vbox->dev;
+ struct ttm_bo_device *bdev = &vbox->ttm.bdev;
+
+ ret = vbox_ttm_global_init(vbox);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&vbox->ttm.bdev,
+ vbox->ttm.bo_global_ref.ref.object,
+ &vbox_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET, true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ goto err_ttm_global_release;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ vbox->available_vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ goto err_device_release;
+ }
+
+#ifdef DRM_MTRR_WC
+ vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+#else
+ vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+#endif
+ return 0;
+
+err_device_release:
+ ttm_bo_device_release(&vbox->ttm.bdev);
+err_ttm_global_release:
+ vbox_ttm_global_release(vbox);
+ return ret;
+}
+
+void vbox_mm_fini(struct vbox_private *vbox)
+{
+#ifdef DRM_MTRR_WC
+ drm_mtrr_del(vbox->fb_mtrr,
+ pci_resource_start(vbox->dev->pdev, 0),
+ pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC);
+#else
+ arch_phys_wc_del(vbox->fb_mtrr);
+#endif
+ ttm_bo_device_release(&vbox->ttm.bdev);
+ vbox_ttm_global_release(vbox);
+}
+
+void vbox_ttm_placement(struct vbox_bo *bo, int domain)
+{
+ unsigned int i;
+ u32 c = 0;
+
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++].flags =
+ TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++].flags =
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++].flags =
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+
+ for (i = 0; i < c; ++i) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
+ }
+}
+
+int vbox_bo_create(struct drm_device *dev, int size, int align,
+ u32 flags, struct vbox_bo **pvboxbo)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct vbox_bo *vboxbo;
+ size_t acc_size;
+ int ret;
+
+ vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
+ if (!vboxbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &vboxbo->gem, size);
+ if (ret)
+ goto err_free_vboxbo;
+
+ vboxbo->bo.bdev = &vbox->ttm.bdev;
+
+ vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
+ sizeof(struct vbox_bo));
+
+ ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
+ ttm_bo_type_device, &vboxbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, NULL, vbox_bo_ttm_destroy);
+ if (ret)
+ goto err_free_vboxbo;
+
+ *pvboxbo = vboxbo;
+
+ return 0;
+
+err_free_vboxbo:
+ kfree(vboxbo);
+ return ret;
+}
+
+static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = vbox_bo_gpu_offset(bo);
+
+ return 0;
+ }
+
+ vbox_ttm_placement(bo, pl_flag);
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+
+ if (gpu_addr)
+ *gpu_addr = vbox_bo_gpu_offset(bo);
+
+ return 0;
+}
+
+int vbox_bo_unpin(struct vbox_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Move a vbox-owned buffer object to system memory if no one else has it
+ * pinned. The caller must have pinned it previously, and this call will
+ * release the caller's pin.
+ */
+int vbox_bo_push_sysram(struct vbox_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct vbox_private *vbox;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return -EINVAL;
+
+ file_priv = filp->private_data;
+ vbox = file_priv->minor->dev->dev_private;
+
+ return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
+}
diff --git a/drivers/staging/vboxvideo/vboxvideo.h b/drivers/staging/vboxvideo/vboxvideo.h
new file mode 100644
index 000000000000..d835d75d761c
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo.h
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ */
+
+#ifndef __VBOXVIDEO_H__
+#define __VBOXVIDEO_H__
+
+/*
+ * This should be in sync with monitorCount <xsd:maxInclusive value="64"/> in
+ * src/VBox/Main/xml/VirtualBox-settings-common.xsd
+ */
+#define VBOX_VIDEO_MAX_SCREENS 64
+
+/*
+ * The last 4096 bytes of the guest VRAM contains the generic info for all
+ * DualView chunks: sizes and offsets of chunks. This is filled by miniport.
+ *
+ * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info,
+ * etc. This is used exclusively by the corresponding instance of a display
+ * driver.
+ *
+ * The VRAM layout:
+ * Last 4096 bytes - Adapter information area.
+ * 4096 bytes aligned miniport heap (value specified in the config rouded up).
+ * Slack - what left after dividing the VRAM.
+ * 4096 bytes aligned framebuffers:
+ * last 4096 bytes of each framebuffer is the display information area.
+ *
+ * The Virtual Graphics Adapter information in the guest VRAM is stored by the
+ * guest video driver using structures prepended by VBOXVIDEOINFOHDR.
+ *
+ * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO
+ * the host starts to process the info. The first element at the start of
+ * the 4096 bytes region should be normally be a LINK that points to
+ * actual information chain. That way the guest driver can have some
+ * fixed layout of the information memory block and just rewrite
+ * the link to point to relevant memory chain.
+ *
+ * The processing stops at the END element.
+ *
+ * The host can access the memory only when the port IO is processed.
+ * All data that will be needed later must be copied from these 4096 bytes.
+ * But other VRAM can be used by host until the mode is disabled.
+ *
+ * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO
+ * to disable the mode.
+ *
+ * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
+ * from the host and issue commands to the host.
+ *
+ * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
+ * following operations with the VBE data register can be performed:
+ *
+ * Operation Result
+ * write 16 bit value NOP
+ * read 16 bit value count of monitors
+ * write 32 bit value set the vbox cmd value and the cmd processed by the host
+ * read 32 bit value result of the last vbox command is returned
+ */
+
+/**
+ * VBVA command header.
+ *
+ * @todo Where does this fit in?
+ */
+struct vbva_cmd_hdr {
+ /** Coordinates of affected rectangle. */
+ s16 x;
+ s16 y;
+ u16 w;
+ u16 h;
+} __packed;
+
+/** @name VBVA ring defines.
+ *
+ * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
+ * data. For example big bitmaps which do not fit to the buffer.
+ *
+ * Guest starts writing to the buffer by initializing a record entry in the
+ * records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
+ * written. As data is written to the ring buffer, the guest increases
+ * free_offset.
+ *
+ * The host reads the records on flushes and processes all completed records.
+ * When host encounters situation when only a partial record presents and
+ * len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
+ * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
+ * data_offset. After that on each flush the host continues fetching the data
+ * until the record is completed.
+ *
+ */
+#define VBVA_RING_BUFFER_SIZE (4194304 - 1024)
+#define VBVA_RING_BUFFER_THRESHOLD (4096)
+
+#define VBVA_MAX_RECORDS (64)
+
+#define VBVA_F_MODE_ENABLED 0x00000001u
+#define VBVA_F_MODE_VRDP 0x00000002u
+#define VBVA_F_MODE_VRDP_RESET 0x00000004u
+#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u
+
+#define VBVA_F_STATE_PROCESSING 0x00010000u
+
+#define VBVA_F_RECORD_PARTIAL 0x80000000u
+
+/**
+ * VBVA record.
+ */
+struct vbva_record {
+ /** The length of the record. Changed by guest. */
+ u32 len_and_flags;
+} __packed;
+
+/*
+ * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of
+ * the runtime heapsimple API. Use minimum 2 pages here, because the info area
+ * also may contain other data (for example hgsmi_host_flags structure).
+ */
+#define VBVA_ADAPTER_INFORMATION_SIZE 65536
+#define VBVA_MIN_BUFFER_SIZE 65536
+
+/* The value for port IO to let the adapter to interpret the adapter memory. */
+#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF
+
+/* The value for port IO to let the adapter to interpret the adapter memory. */
+#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000
+
+/* The value for port IO to let the adapter to interpret the display memory.
+ * The display number is encoded in low 16 bits.
+ */
+#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000
+
+struct vbva_host_flags {
+ u32 host_events;
+ u32 supported_orders;
+} __packed;
+
+struct vbva_buffer {
+ struct vbva_host_flags host_flags;
+
+ /* The offset where the data start in the buffer. */
+ u32 data_offset;
+ /* The offset where next data must be placed in the buffer. */
+ u32 free_offset;
+
+ /* The queue of record descriptions. */
+ struct vbva_record records[VBVA_MAX_RECORDS];
+ u32 record_first_index;
+ u32 record_free_index;
+
+ /* Space to leave free when large partial records are transferred. */
+ u32 partial_write_tresh;
+
+ u32 data_len;
+ /* variable size for the rest of the vbva_buffer area in VRAM. */
+ u8 data[0];
+} __packed;
+
+#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
+
+/* guest->host commands */
+#define VBVA_QUERY_CONF32 1
+#define VBVA_SET_CONF32 2
+#define VBVA_INFO_VIEW 3
+#define VBVA_INFO_HEAP 4
+#define VBVA_FLUSH 5
+#define VBVA_INFO_SCREEN 6
+#define VBVA_ENABLE 7
+#define VBVA_MOUSE_POINTER_SHAPE 8
+/* informs host about HGSMI caps. see vbva_caps below */
+#define VBVA_INFO_CAPS 12
+/* configures scanline, see VBVASCANLINECFG below */
+#define VBVA_SCANLINE_CFG 13
+/* requests scanline info, see VBVASCANLINEINFO below */
+#define VBVA_SCANLINE_INFO 14
+/* inform host about VBVA Command submission */
+#define VBVA_CMDVBVA_SUBMIT 16
+/* inform host about VBVA Command submission */
+#define VBVA_CMDVBVA_FLUSH 17
+/* G->H DMA command */
+#define VBVA_CMDVBVA_CTL 18
+/* Query most recent mode hints sent */
+#define VBVA_QUERY_MODE_HINTS 19
+/**
+ * Report the guest virtual desktop position and size for mapping host and
+ * guest pointer positions.
+ */
+#define VBVA_REPORT_INPUT_MAPPING 20
+/** Report the guest cursor position and query the host position. */
+#define VBVA_CURSOR_POSITION 21
+
+/* host->guest commands */
+#define VBVAHG_EVENT 1
+#define VBVAHG_DISPLAY_CUSTOM 2
+
+/* vbva_conf32::index */
+#define VBOX_VBVA_CONF32_MONITOR_COUNT 0
+#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1
+/**
+ * Returns VINF_SUCCESS if the host can report mode hints via VBVA.
+ * Set value to VERR_NOT_SUPPORTED before calling.
+ */
+#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2
+/**
+ * Returns VINF_SUCCESS if the host can report guest cursor enabled status via
+ * VBVA. Set value to VERR_NOT_SUPPORTED before calling.
+ */
+#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3
+/**
+ * Returns the currently available host cursor capabilities. Available if
+ * vbva_conf32::VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success.
+ * @see VMMDevReqMouseStatus::mouseFeatures.
+ */
+#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4
+/** Returns the supported flags in vbva_infoscreen::flags. */
+#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5
+/** Returns the max size of VBVA record. */
+#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6
+
+struct vbva_conf32 {
+ u32 index;
+ u32 value;
+} __packed;
+
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 BIT(0)
+/**
+ * Guest cursor capability: can the host show a hardware cursor at the host
+ * pointer location?
+ */
+#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE BIT(1)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 BIT(2)
+/** Reserved for historical reasons. Must always be unset. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 BIT(3)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 BIT(4)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 BIT(5)
+
+struct vbva_infoview {
+ /* Index of the screen, assigned by the guest. */
+ u32 view_index;
+
+ /* The screen offset in VRAM, the framebuffer starts here. */
+ u32 view_offset;
+
+ /* The size of the VRAM memory that can be used for the view. */
+ u32 view_size;
+
+ /* The recommended maximum size of the VRAM memory for the screen. */
+ u32 max_screen_size;
+} __packed;
+
+struct vbva_flush {
+ u32 reserved;
+} __packed;
+
+/* vbva_infoscreen::flags */
+#define VBVA_SCREEN_F_NONE 0x0000
+#define VBVA_SCREEN_F_ACTIVE 0x0001
+/**
+ * The virtual monitor has been disabled by the guest and should be removed
+ * by the host and ignored for purposes of pointer position calculation.
+ */
+#define VBVA_SCREEN_F_DISABLED 0x0002
+/**
+ * The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using width, height, etc values from the vbva_infoscreen
+ * request.
+ */
+#define VBVA_SCREEN_F_BLANK 0x0004
+/**
+ * The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using the previous mode values for width. height, etc.
+ */
+#define VBVA_SCREEN_F_BLANK2 0x0008
+
+struct vbva_infoscreen {
+ /* Which view contains the screen. */
+ u32 view_index;
+
+ /* Physical X origin relative to the primary screen. */
+ s32 origin_x;
+
+ /* Physical Y origin relative to the primary screen. */
+ s32 origin_y;
+
+ /* Offset of visible framebuffer relative to the framebuffer start. */
+ u32 start_offset;
+
+ /* The scan line size in bytes. */
+ u32 line_size;
+
+ /* Width of the screen. */
+ u32 width;
+
+ /* Height of the screen. */
+ u32 height;
+
+ /* Color depth. */
+ u16 bits_per_pixel;
+
+ /* VBVA_SCREEN_F_* */
+ u16 flags;
+} __packed;
+
+/* vbva_enable::flags */
+#define VBVA_F_NONE 0x00000000
+#define VBVA_F_ENABLE 0x00000001
+#define VBVA_F_DISABLE 0x00000002
+/* extended VBVA to be used with WDDM */
+#define VBVA_F_EXTENDED 0x00000004
+/* vbva offset is absolute VRAM offset */
+#define VBVA_F_ABSOFFSET 0x00000008
+
+struct vbva_enable {
+ u32 flags;
+ u32 offset;
+ s32 result;
+} __packed;
+
+struct vbva_enable_ex {
+ struct vbva_enable base;
+ u32 screen_id;
+} __packed;
+
+struct vbva_mouse_pointer_shape {
+ /* The host result. */
+ s32 result;
+
+ /* VBOX_MOUSE_POINTER_* bit flags. */
+ u32 flags;
+
+ /* X coordinate of the hot spot. */
+ u32 hot_X;
+
+ /* Y coordinate of the hot spot. */
+ u32 hot_y;
+
+ /* Width of the pointer in pixels. */
+ u32 width;
+
+ /* Height of the pointer in scanlines. */
+ u32 height;
+
+ /* Pointer data.
+ *
+ ****
+ * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
+ * mask.
+ *
+ * For pointers without alpha channel the XOR mask pixels are 32 bit
+ * values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask
+ * consists of (lsb)BGRA(msb) 32 bit values.
+ *
+ * Guest driver must create the AND mask for pointers with alpha chan.,
+ * so if host does not support alpha, the pointer could be displayed as
+ * a normal color pointer. The AND mask can be constructed from alpha
+ * values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
+ *
+ * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
+ * mask, therefore, is and_len = (width + 7) / 8 * height. The padding
+ * bits at the end of any scanline are undefined.
+ *
+ * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
+ * u8 *xor = and + (and_len + 3) & ~3
+ * Bytes in the gap between the AND and the XOR mask are undefined.
+ * XOR mask scanlines have no gap between them and size of XOR mask is:
+ * xor_len = width * 4 * height.
+ ****
+ *
+ * Preallocate 4 bytes for accessing actual data as p->data.
+ */
+ u8 data[4];
+} __packed;
+
+/**
+ * @name vbva_mouse_pointer_shape::flags
+ * @note The VBOX_MOUSE_POINTER_* flags are used in the guest video driver,
+ * values must be <= 0x8000 and must not be changed. (try make more sense
+ * of this, please).
+ * @{
+ */
+
+/** pointer is visible */
+#define VBOX_MOUSE_POINTER_VISIBLE 0x0001
+/** pointer has alpha channel */
+#define VBOX_MOUSE_POINTER_ALPHA 0x0002
+/** pointerData contains new pointer shape */
+#define VBOX_MOUSE_POINTER_SHAPE 0x0004
+
+/** @} */
+
+/*
+ * The guest driver can handle asynch guest cmd completion by reading the
+ * command offset from io port.
+ */
+#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001
+/* the guest driver can handle video adapter IRQs */
+#define VBVACAPS_IRQ 0x00000002
+/** The guest can read video mode hints sent via VBVA. */
+#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004
+/** The guest can switch to a software cursor on demand. */
+#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008
+/** The guest does not depend on host handling the VBE registers. */
+#define VBVACAPS_USE_VBVA_ONLY 0x00000010
+
+struct vbva_caps {
+ s32 rc;
+ u32 caps;
+} __packed;
+
+/** Query the most recent mode hints received from the host. */
+struct vbva_query_mode_hints {
+ /** The maximum number of screens to return hints for. */
+ u16 hints_queried_count;
+ /** The size of the mode hint structures directly following this one. */
+ u16 hint_structure_guest_size;
+ /** Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */
+ s32 rc;
+} __packed;
+
+/**
+ * Structure in which a mode hint is returned. The guest allocates an array
+ * of these immediately after the vbva_query_mode_hints structure.
+ * To accommodate future extensions, the vbva_query_mode_hints structure
+ * specifies the size of the vbva_modehint structures allocated by the guest,
+ * and the host only fills out structure elements which fit into that size. The
+ * host should fill any unused members (e.g. dx, dy) or structure space on the
+ * end with ~0. The whole structure can legally be set to ~0 to skip a screen.
+ */
+struct vbva_modehint {
+ u32 magic;
+ u32 cx;
+ u32 cy;
+ u32 bpp; /* Which has never been used... */
+ u32 display;
+ u32 dx; /**< X offset into the virtual frame-buffer. */
+ u32 dy; /**< Y offset into the virtual frame-buffer. */
+ u32 enabled; /* Not flags. Add new members for new flags. */
+} __packed;
+
+#define VBVAMODEHINT_MAGIC 0x0801add9u
+
+/**
+ * Report the rectangle relative to which absolute pointer events should be
+ * expressed. This information remains valid until the next VBVA resize event
+ * for any screen, at which time it is reset to the bounding rectangle of all
+ * virtual screens and must be re-set.
+ * @see VBVA_REPORT_INPUT_MAPPING.
+ */
+struct vbva_report_input_mapping {
+ s32 x; /**< Upper left X co-ordinate relative to the first screen. */
+ s32 y; /**< Upper left Y co-ordinate relative to the first screen. */
+ u32 cx; /**< Rectangle width. */
+ u32 cy; /**< Rectangle height. */
+} __packed;
+
+/**
+ * Report the guest cursor position and query the host one. The host may wish
+ * to use the guest information to re-position its own cursor (though this is
+ * currently unlikely).
+ * @see VBVA_CURSOR_POSITION
+ */
+struct vbva_cursor_position {
+ u32 report_position; /**< Are we reporting a position? */
+ u32 x; /**< Guest cursor X position */
+ u32 y; /**< Guest cursor Y position */
+} __packed;
+
+#endif
diff --git a/drivers/staging/vboxvideo/vboxvideo_guest.h b/drivers/staging/vboxvideo/vboxvideo_guest.h
new file mode 100644
index 000000000000..d09da841711a
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo_guest.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __VBOXVIDEO_GUEST_H__
+#define __VBOXVIDEO_GUEST_H__
+
+#include <linux/genalloc.h>
+#include "vboxvideo.h"
+
+/**
+ * Structure grouping the context needed for sending graphics acceleration
+ * information to the host via VBVA. Each screen has its own VBVA buffer.
+ */
+struct vbva_buf_ctx {
+ /** Offset of the buffer in the VRAM section for the screen */
+ u32 buffer_offset;
+ /** Length of the buffer in bytes */
+ u32 buffer_length;
+ /** Set if we wrote to the buffer faster than the host could read it */
+ bool buffer_overflow;
+ /** VBVA record that we are currently preparing for the host, or NULL */
+ struct vbva_record *record;
+ /**
+ * Pointer to the VBVA buffer mapped into the current address space.
+ * Will be NULL if VBVA is not enabled.
+ */
+ struct vbva_buffer *vbva;
+};
+
+/**
+ * @name Base HGSMI APIs
+ * @{
+ */
+int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location);
+int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps);
+int hgsmi_test_query_conf(struct gen_pool *ctx);
+int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret);
+int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
+ u32 hot_x, u32 hot_y, u32 width, u32 height,
+ u8 *pixels, u32 len);
+int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
+ u32 x, u32 y, u32 *x_host, u32 *y_host);
+/** @} */
+
+/**
+ * @name VBVA APIs
+ * @{
+ */
+bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ struct vbva_buffer *vbva, s32 screen);
+void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ s32 screen);
+bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
+ struct gen_pool *ctx);
+void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx);
+bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ const void *p, u32 len);
+void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
+ u32 buffer_offset, u32 buffer_length);
+/** @} */
+
+/**
+ * @name Modesetting APIs
+ * @{
+ */
+void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
+ s32 origin_x, s32 origin_y, u32 start_offset,
+ u32 pitch, u32 width, u32 height,
+ u16 bpp, u16 flags);
+int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
+ u32 width, u32 height);
+int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
+ struct vbva_modehint *hints);
+/** @} */
+
+#endif
diff --git a/drivers/staging/vboxvideo/vboxvideo_vbe.h b/drivers/staging/vboxvideo/vboxvideo_vbe.h
new file mode 100644
index 000000000000..f842f4d9c80a
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo_vbe.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __VBOXVIDEO_VBE_H__
+#define __VBOXVIDEO_VBE_H__
+
+/* GUEST <-> HOST Communication API */
+
+/**
+ * @todo FIXME: Either dynamicly ask host for this or put somewhere high in
+ * physical memory like 0xE0000000.
+ */
+
+#define VBE_DISPI_BANK_ADDRESS 0xA0000
+#define VBE_DISPI_BANK_SIZE_KB 64
+
+#define VBE_DISPI_MAX_XRES 16384
+#define VBE_DISPI_MAX_YRES 16384
+#define VBE_DISPI_MAX_BPP 32
+
+#define VBE_DISPI_IOPORT_INDEX 0x01CE
+#define VBE_DISPI_IOPORT_DATA 0x01CF
+
+#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8
+#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9
+
+#define VBE_DISPI_INDEX_ID 0x0
+#define VBE_DISPI_INDEX_XRES 0x1
+#define VBE_DISPI_INDEX_YRES 0x2
+#define VBE_DISPI_INDEX_BPP 0x3
+#define VBE_DISPI_INDEX_ENABLE 0x4
+#define VBE_DISPI_INDEX_BANK 0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
+#define VBE_DISPI_INDEX_X_OFFSET 0x8
+#define VBE_DISPI_INDEX_Y_OFFSET 0x9
+#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa
+#define VBE_DISPI_INDEX_FB_BASE_HI 0xb
+
+#define VBE_DISPI_ID0 0xB0C0
+#define VBE_DISPI_ID1 0xB0C1
+#define VBE_DISPI_ID2 0xB0C2
+#define VBE_DISPI_ID3 0xB0C3
+#define VBE_DISPI_ID4 0xB0C4
+
+#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00
+/* The VBOX interface id. Indicates support for VBVA shared memory interface. */
+#define VBE_DISPI_ID_HGSMI 0xBE01
+#define VBE_DISPI_ID_ANYX 0xBE02
+
+#define VBE_DISPI_DISABLED 0x00
+#define VBE_DISPI_ENABLED 0x01
+#define VBE_DISPI_GETCAPS 0x02
+#define VBE_DISPI_8BIT_DAC 0x20
+/**
+ * @note this definition is a BOCHS legacy, used only in the video BIOS
+ * code and ignored by the emulated hardware.
+ */
+#define VBE_DISPI_LFB_ENABLED 0x40
+#define VBE_DISPI_NOCLEARMEM 0x80
+
+#define VGA_PORT_HGSMI_HOST 0x3b0
+#define VGA_PORT_HGSMI_GUEST 0x3d0
+
+#endif
diff --git a/drivers/staging/vboxvideo/vbva_base.c b/drivers/staging/vboxvideo/vbva_base.c
new file mode 100644
index 000000000000..c10c782f94e1
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbva_base.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "hgsmi_channels.h"
+
+/*
+ * There is a hardware ring buffer in the graphics device video RAM, formerly
+ * in the VBox VMMDev PCI memory space.
+ * All graphics commands go there serialized by vbva_buffer_begin_update.
+ * and vbva_buffer_end_update.
+ *
+ * free_offset is writing position. data_offset is reading position.
+ * free_offset == data_offset means buffer is empty.
+ * There must be always gap between data_offset and free_offset when data
+ * are in the buffer.
+ * Guest only changes free_offset, host changes data_offset.
+ */
+
+static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
+{
+ s32 diff = vbva->data_offset - vbva->free_offset;
+
+ return diff > 0 ? diff : vbva->data_len + diff;
+}
+
+static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
+ const void *p, u32 len, u32 offset)
+{
+ struct vbva_buffer *vbva = vbva_ctx->vbva;
+ u32 bytes_till_boundary = vbva->data_len - offset;
+ u8 *dst = &vbva->data[offset];
+ s32 diff = len - bytes_till_boundary;
+
+ if (diff <= 0) {
+ /* Chunk will not cross buffer boundary. */
+ memcpy(dst, p, len);
+ } else {
+ /* Chunk crosses buffer boundary. */
+ memcpy(dst, p, bytes_till_boundary);
+ memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
+ }
+}
+
+static void vbva_buffer_flush(struct gen_pool *ctx)
+{
+ struct vbva_flush *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
+ if (!p)
+ return;
+
+ p->reserved = 0;
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+}
+
+bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ const void *p, u32 len)
+{
+ struct vbva_record *record;
+ struct vbva_buffer *vbva;
+ u32 available;
+
+ vbva = vbva_ctx->vbva;
+ record = vbva_ctx->record;
+
+ if (!vbva || vbva_ctx->buffer_overflow ||
+ !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
+ return false;
+
+ available = vbva_buffer_available(vbva);
+
+ while (len > 0) {
+ u32 chunk = len;
+
+ if (chunk >= available) {
+ vbva_buffer_flush(ctx);
+ available = vbva_buffer_available(vbva);
+ }
+
+ if (chunk >= available) {
+ if (WARN_ON(available <= vbva->partial_write_tresh)) {
+ vbva_ctx->buffer_overflow = true;
+ return false;
+ }
+ chunk = available - vbva->partial_write_tresh;
+ }
+
+ vbva_buffer_place_data_at(vbva_ctx, p, chunk,
+ vbva->free_offset);
+
+ vbva->free_offset = (vbva->free_offset + chunk) %
+ vbva->data_len;
+ record->len_and_flags += chunk;
+ available -= chunk;
+ len -= chunk;
+ p += chunk;
+ }
+
+ return true;
+}
+
+static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
+ struct gen_pool *ctx, s32 screen, bool enable)
+{
+ struct vbva_enable_ex *p;
+ bool ret;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
+ if (!p)
+ return false;
+
+ p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
+ p->base.offset = vbva_ctx->buffer_offset;
+ p->base.result = VERR_NOT_SUPPORTED;
+ if (screen >= 0) {
+ p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
+ p->screen_id = screen;
+ }
+
+ hgsmi_buffer_submit(ctx, p);
+
+ if (enable)
+ ret = RT_SUCCESS(p->base.result);
+ else
+ ret = true;
+
+ hgsmi_buffer_free(ctx, p);
+
+ return ret;
+}
+
+bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ struct vbva_buffer *vbva, s32 screen)
+{
+ bool ret = false;
+
+ memset(vbva, 0, sizeof(*vbva));
+ vbva->partial_write_tresh = 256;
+ vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
+ vbva_ctx->vbva = vbva;
+
+ ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
+ if (!ret)
+ vbva_disable(vbva_ctx, ctx, screen);
+
+ return ret;
+}
+
+void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ s32 screen)
+{
+ vbva_ctx->buffer_overflow = false;
+ vbva_ctx->record = NULL;
+ vbva_ctx->vbva = NULL;
+
+ vbva_inform_host(vbva_ctx, ctx, screen, false);
+}
+
+bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
+ struct gen_pool *ctx)
+{
+ struct vbva_record *record;
+ u32 next;
+
+ if (!vbva_ctx->vbva ||
+ !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
+ return false;
+
+ WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
+
+ next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
+
+ /* Flush if all slots in the records queue are used */
+ if (next == vbva_ctx->vbva->record_first_index)
+ vbva_buffer_flush(ctx);
+
+ /* If even after flush there is no place then fail the request */
+ if (next == vbva_ctx->vbva->record_first_index)
+ return false;
+
+ record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
+ record->len_and_flags = VBVA_F_RECORD_PARTIAL;
+ vbva_ctx->vbva->record_free_index = next;
+ /* Remember which record we are using. */
+ vbva_ctx->record = record;
+
+ return true;
+}
+
+void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
+{
+ struct vbva_record *record = vbva_ctx->record;
+
+ WARN_ON(!vbva_ctx->vbva || !record ||
+ !(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
+
+ /* Mark the record completed. */
+ record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
+
+ vbva_ctx->buffer_overflow = false;
+ vbva_ctx->record = NULL;
+}
+
+void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
+ u32 buffer_offset, u32 buffer_length)
+{
+ vbva_ctx->buffer_offset = buffer_offset;
+ vbva_ctx->buffer_length = buffer_length;
+}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 030bec855d86..314ffac50bb8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3391,7 +3391,6 @@ static int vchiq_probe(struct platform_device *pdev)
struct device_node *fw_node;
struct rpi_firmware *fw;
int err;
- void *ptr_err;
fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
if (!fw_node) {
@@ -3427,14 +3426,14 @@ static int vchiq_probe(struct platform_device *pdev)
/* create sysfs entries */
vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
- ptr_err = vchiq_class;
- if (IS_ERR(ptr_err))
+ err = PTR_ERR(vchiq_class);
+ if (IS_ERR(vchiq_class))
goto failed_class_create;
vchiq_dev = device_create(vchiq_class, NULL,
vchiq_devid, NULL, "vchiq");
- ptr_err = vchiq_dev;
- if (IS_ERR(ptr_err))
+ err = PTR_ERR(vchiq_dev);
+ if (IS_ERR(vchiq_dev))
goto failed_device_create;
/* create debugfs entries */
@@ -3455,7 +3454,6 @@ failed_device_create:
class_destroy(vchiq_class);
failed_class_create:
cdev_del(&vchiq_cdev);
- err = PTR_ERR(ptr_err);
failed_cdev_add:
unregister_chrdev_region(vchiq_devid, 1);
failed_platform_init:
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ab3e8f410444..e9391bbd4036 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -30,7 +30,7 @@ static DEFINE_IDA(nvm_ida);
struct nvm_auth_status {
struct list_head list;
- uuid_be uuid;
+ uuid_t uuid;
u32 status;
};
@@ -47,7 +47,7 @@ static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
struct nvm_auth_status *st;
list_for_each_entry(st, &nvm_auth_status_cache, list) {
- if (!uuid_be_cmp(st->uuid, *sw->uuid))
+ if (uuid_equal(&st->uuid, sw->uuid))
return st;
}
@@ -281,9 +281,11 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
if (active) {
config.name = "nvm_active";
config.reg_read = tb_switch_nvm_read;
+ config.read_only = true;
} else {
config.name = "nvm_non_active";
config.reg_write = tb_switch_nvm_write;
+ config.root_only = true;
}
config.id = id;
@@ -292,7 +294,6 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
config.size = size;
config.dev = &sw->dev;
config.owner = THIS_MODULE;
- config.root_only = true;
config.priv = sw;
return nvmem_register(&config);
@@ -1460,7 +1461,7 @@ struct tb_sw_lookup {
struct tb *tb;
u8 link;
u8 depth;
- const uuid_be *uuid;
+ const uuid_t *uuid;
};
static int tb_switch_match(struct device *dev, void *data)
@@ -1517,7 +1518,7 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
* Returned switch has reference count increased so the caller needs to
* call tb_switch_put() when done with the switch.
*/
-struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid)
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
{
struct tb_sw_lookup lookup;
struct device *dev;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 3d9f64676e58..e0deee4f1eb0 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -101,7 +101,7 @@ struct tb_switch {
struct tb_dma_port *dma_port;
struct tb *tb;
u64 uid;
- uuid_be *uuid;
+ uuid_t *uuid;
u16 vendor;
u16 device;
const char *vendor_name;
@@ -407,7 +407,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw);
struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
-struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid);
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 85b6d33c0919..de6441e4a060 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -179,7 +179,7 @@ struct icm_fr_pkg_get_topology_response {
struct icm_fr_event_device_connected {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 link_info;
@@ -193,7 +193,7 @@ struct icm_fr_event_device_connected {
struct icm_fr_pkg_approve_device {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -207,7 +207,7 @@ struct icm_fr_event_device_disconnected {
struct icm_fr_pkg_add_device_key {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -216,7 +216,7 @@ struct icm_fr_pkg_add_device_key {
struct icm_fr_pkg_add_device_key_response {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -224,7 +224,7 @@ struct icm_fr_pkg_add_device_key_response {
struct icm_fr_pkg_challenge_device {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -233,7 +233,7 @@ struct icm_fr_pkg_challenge_device {
struct icm_fr_pkg_challenge_device_response {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index d1399aac05a1..284749fb0f6b 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -448,48 +448,6 @@ err:
return retval;
}
-/**
- * pty_open_peer - open the peer of a pty
- * @tty: the peer of the pty being opened
- *
- * Open the cached dentry in tty->link, providing a safe way for userspace
- * to get the slave end of a pty (where they have the master fd and cannot
- * access or trust the mount namespace /dev/pts was mounted inside).
- */
-static struct file *pty_open_peer(struct tty_struct *tty, int flags)
-{
- if (tty->driver->subtype != PTY_TYPE_MASTER)
- return ERR_PTR(-EIO);
- return dentry_open(tty->link->driver_data, flags, current_cred());
-}
-
-static int pty_get_peer(struct tty_struct *tty, int flags)
-{
- int fd = -1;
- struct file *filp = NULL;
- int retval = -EINVAL;
-
- fd = get_unused_fd_flags(0);
- if (fd < 0) {
- retval = fd;
- goto err;
- }
-
- filp = pty_open_peer(tty, flags);
- if (IS_ERR(filp)) {
- retval = PTR_ERR(filp);
- goto err_put;
- }
-
- fd_install(fd, filp);
- return fd;
-
-err_put:
- put_unused_fd(fd);
-err:
- return retval;
-}
-
static void pty_cleanup(struct tty_struct *tty)
{
tty_port_put(tty->port);
@@ -646,9 +604,50 @@ static inline void legacy_pty_init(void) { }
/* Unix98 devices */
#ifdef CONFIG_UNIX98_PTYS
-
static struct cdev ptmx_cdev;
+/**
+ * pty_open_peer - open the peer of a pty
+ * @tty: the peer of the pty being opened
+ *
+ * Open the cached dentry in tty->link, providing a safe way for userspace
+ * to get the slave end of a pty (where they have the master fd and cannot
+ * access or trust the mount namespace /dev/pts was mounted inside).
+ */
+static struct file *pty_open_peer(struct tty_struct *tty, int flags)
+{
+ if (tty->driver->subtype != PTY_TYPE_MASTER)
+ return ERR_PTR(-EIO);
+ return dentry_open(tty->link->driver_data, flags, current_cred());
+}
+
+static int pty_get_peer(struct tty_struct *tty, int flags)
+{
+ int fd = -1;
+ struct file *filp = NULL;
+ int retval = -EINVAL;
+
+ fd = get_unused_fd_flags(0);
+ if (fd < 0) {
+ retval = fd;
+ goto err;
+ }
+
+ filp = pty_open_peer(tty, flags);
+ if (IS_ERR(filp)) {
+ retval = PTR_ERR(filp);
+ goto err_put;
+ }
+
+ fd_install(fd, filp);
+ return fd;
+
+err_put:
+ put_unused_fd(fd);
+err:
+ return retval;
+}
+
static int pty_unix98_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 343de8c384b0..898dcb091a27 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -619,6 +619,12 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
TIOCSER_TEMT : 0;
}
+static bool lpuart_is_32(struct lpuart_port *sport)
+{
+ return sport->port.iotype == UPIO_MEM32 ||
+ sport->port.iotype == UPIO_MEM32BE;
+}
+
static irqreturn_t lpuart_txint(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
@@ -627,7 +633,7 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id)
spin_lock_irqsave(&sport->port.lock, flags);
if (sport->port.x_char) {
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart32_write(&sport->port, sport->port.x_char, UARTDATA);
else
writeb(sport->port.x_char, sport->port.membase + UARTDR);
@@ -635,14 +641,14 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id)
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart32_stop_tx(&sport->port);
else
lpuart_stop_tx(&sport->port);
goto out;
}
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart32_transmit_buffer(sport);
else
lpuart_transmit_buffer(sport);
@@ -1978,12 +1984,12 @@ static int __init lpuart_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart32_console_get_options(sport, &baud, &parity, &bits);
else
lpuart_console_get_options(sport, &baud, &parity, &bits);
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart32_setup_watermark(sport);
else
lpuart_setup_watermark(sport);
@@ -2118,7 +2124,7 @@ static int lpuart_probe(struct platform_device *pdev)
}
sport->port.irq = ret;
sport->port.iotype = sdata->iotype;
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
sport->port.ops = &lpuart32_pops;
else
sport->port.ops = &lpuart_pops;
@@ -2145,7 +2151,7 @@ static int lpuart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &sport->port);
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart_reg.cons = LPUART32_CONSOLE;
else
lpuart_reg.cons = LPUART_CONSOLE;
@@ -2198,7 +2204,7 @@ static int lpuart_suspend(struct device *dev)
struct lpuart_port *sport = dev_get_drvdata(dev);
unsigned long temp;
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) {
+ if (lpuart_is_32(sport)) {
/* disable Rx/Tx and interrupts */
temp = lpuart32_read(&sport->port, UARTCTRL);
temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE);
@@ -2249,7 +2255,7 @@ static int lpuart_resume(struct device *dev)
if (sport->port.suspended && !sport->port.irq_wake)
clk_prepare_enable(sport->clk);
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) {
+ if (lpuart_is_32(sport)) {
lpuart32_setup_watermark(sport);
temp = lpuart32_read(&sport->port, UARTCTRL);
temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9e3162bf3bd1..80934e7bd67f 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -186,11 +186,6 @@
#define UART_NR 8
-/* RX DMA buffer periods */
-#define RX_DMA_PERIODS 4
-#define RX_BUF_SIZE (PAGE_SIZE)
-
-
/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
enum imx_uart_type {
IMX1_UART,
@@ -226,7 +221,6 @@ struct imx_port {
struct dma_chan *dma_chan_rx, *dma_chan_tx;
struct scatterlist rx_sgl, tx_sgl[2];
void *rx_buf;
- unsigned int rx_buf_size;
struct circ_buf rx_ring;
unsigned int rx_periods;
dma_cookie_t rx_cookie;
@@ -464,7 +458,7 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
}
}
- while (!uart_circ_empty(xmit) &&
+ while (!uart_circ_empty(xmit) && !sport->dma_is_txing &&
!(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) {
/* send xmit->buf[xmit->tail]
* out the port here */
@@ -967,6 +961,8 @@ static void imx_timeout(unsigned long data)
}
}
+#define RX_BUF_SIZE (PAGE_SIZE)
+
/*
* There are two kinds of RX DMA interrupts(such as in the MX6Q):
* [1] the RX DMA buffer is full.
@@ -1049,6 +1045,9 @@ static void dma_rx_callback(void *data)
}
}
+/* RX DMA buffer periods */
+#define RX_DMA_PERIODS 4
+
static int start_rx_dma(struct imx_port *sport)
{
struct scatterlist *sgl = &sport->rx_sgl;
@@ -1059,8 +1058,9 @@ static int start_rx_dma(struct imx_port *sport)
sport->rx_ring.head = 0;
sport->rx_ring.tail = 0;
+ sport->rx_periods = RX_DMA_PERIODS;
- sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size);
+ sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
if (ret == 0) {
dev_err(dev, "DMA mapping error for RX.\n");
@@ -1171,7 +1171,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
goto err;
}
- sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL);
+ sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sport->rx_buf) {
ret = -ENOMEM;
goto err;
@@ -2036,7 +2036,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
{
struct device_node *np = pdev->dev.of_node;
int ret;
- u32 dma_buf_size[2];
sport->devdata = of_device_get_match_data(&pdev->dev);
if (!sport->devdata)
@@ -2060,14 +2059,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
if (of_get_property(np, "rts-gpios", NULL))
sport->have_rtsgpio = 1;
- if (!of_property_read_u32_array(np, "fsl,dma-size", dma_buf_size, 2)) {
- sport->rx_buf_size = dma_buf_size[0] * dma_buf_size[1];
- sport->rx_periods = dma_buf_size[1];
- } else {
- sport->rx_buf_size = RX_BUF_SIZE;
- sport->rx_periods = RX_DMA_PERIODS;
- }
-
return 0;
}
#else
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index da5ddfc14778..e08b16b070c0 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1085,10 +1085,12 @@ static ssize_t rx_trigger_store(struct device *dev,
{
struct uart_port *port = dev_get_drvdata(dev);
struct sci_port *sci = to_sci_port(port);
+ int ret;
long r;
- if (kstrtol(buf, 0, &r) == -EINVAL)
- return -EINVAL;
+ ret = kstrtol(buf, 0, &r);
+ if (ret)
+ return ret;
sci->rx_trigger = scif_set_rtrg(port, r);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
@@ -1116,10 +1118,12 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
{
struct uart_port *port = dev_get_drvdata(dev);
struct sci_port *sci = to_sci_port(port);
+ int ret;
long r;
- if (kstrtol(buf, 0, &r) == -EINVAL)
- return -EINVAL;
+ ret = kstrtol(buf, 0, &r);
+ if (ret)
+ return ret;
sci->rx_fifo_timeout = r;
scif_set_rtrg(port, 1);
if (r > 0)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index f5335be344f6..6b0ca65027d0 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -758,6 +758,7 @@ static int asc_init_port(struct asc_port *ascport,
if (IS_ERR(ascport->pinctrl)) {
ret = PTR_ERR(ascport->pinctrl);
dev_err(&pdev->dev, "Failed to get Pinctrl: %d\n", ret);
+ return ret;
}
ascport->states[DEFAULT] =
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5357d83bbda2..5e056064259c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1829,6 +1829,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index bc3b3fda5000..c4066cd77e47 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3573,6 +3573,9 @@ irq_retry:
/* Report disconnection if it is not already done. */
dwc2_hsotg_disconnect(hsotg);
+ /* Reset device address to zero */
+ __bic32(hsotg->regs + DCFG, DCFG_DEVADDR_MASK);
+
if (usb_status & GOTGCTL_BSESVLD && connected)
dwc2_hsotg_core_init_disconnected(hsotg, true);
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 326b302fc440..03474d3575ab 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -766,15 +766,15 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc->maximum_speed = USB_SPEED_HIGH;
}
- ret = dwc3_core_soft_reset(dwc);
+ ret = dwc3_core_get_phy(dwc);
if (ret)
goto err0;
- ret = dwc3_phy_setup(dwc);
+ ret = dwc3_core_soft_reset(dwc);
if (ret)
goto err0;
- ret = dwc3_core_get_phy(dwc);
+ ret = dwc3_phy_setup(dwc);
if (ret)
goto err0;
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 98926504b55b..f5aaa0cf3873 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -512,15 +512,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
/* check the DMA Status */
reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
- irq_set_status_flags(omap->irq, IRQ_NOAUTOEN);
- ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
- dwc3_omap_interrupt_thread, IRQF_SHARED,
- "dwc3-omap", omap);
- if (ret) {
- dev_err(dev, "failed to request IRQ #%d --> %d\n",
- omap->irq, ret);
- goto err1;
- }
ret = dwc3_omap_extcon_register(omap);
if (ret < 0)
@@ -532,8 +523,15 @@ static int dwc3_omap_probe(struct platform_device *pdev)
goto err1;
}
+ ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
+ dwc3_omap_interrupt_thread, IRQF_SHARED,
+ "dwc3-omap", omap);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ #%d --> %d\n",
+ omap->irq, ret);
+ goto err1;
+ }
dwc3_omap_enable_irqs(omap);
- enable_irq(omap->irq);
return 0;
err1:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9e41605a276b..6b299c7b7656 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -191,14 +191,16 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
req->started = false;
list_del(&req->list);
- req->trb = NULL;
req->remaining = 0;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- usb_gadget_unmap_request_by_dev(dwc->sysdev,
- &req->request, req->direction);
+ if (req->trb)
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
+
+ req->trb = NULL;
trace_dwc3_gadget_giveback(req);
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index e80b9c123a9d..f95bddd6513f 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2490,7 +2490,7 @@ static int fsg_main_thread(void *common_)
int i;
down_write(&common->filesem);
- for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
+ for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
struct fsg_lun *curlun = common->luns[i];
if (!curlun || !fsg_lun_is_open(curlun))
continue;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 8656f84e17d9..29efbedc91f9 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -92,9 +92,9 @@ static struct uac_input_terminal_descriptor usb_out_it_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
.bTerminalID = USB_OUT_IT_ID,
- .wTerminalType = UAC_TERMINAL_STREAMING,
+ .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
- .wChannelConfig = 0x3,
+ .wChannelConfig = cpu_to_le16(0x3),
};
#define IO_OUT_OT_ID 2
@@ -103,7 +103,7 @@ static struct uac1_output_terminal_descriptor io_out_ot_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
.bTerminalID = IO_OUT_OT_ID,
- .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER,
+ .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
.bAssocTerminal = 0,
.bSourceID = USB_OUT_IT_ID,
};
@@ -114,9 +114,9 @@ static struct uac_input_terminal_descriptor io_in_it_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
.bTerminalID = IO_IN_IT_ID,
- .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE,
+ .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
.bAssocTerminal = 0,
- .wChannelConfig = 0x3,
+ .wChannelConfig = cpu_to_le16(0x3),
};
#define USB_IN_OT_ID 4
@@ -125,7 +125,7 @@ static struct uac1_output_terminal_descriptor usb_in_ot_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
.bTerminalID = USB_IN_OT_ID,
- .wTerminalType = UAC_TERMINAL_STREAMING,
+ .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
.bSourceID = IO_IN_IT_ID,
};
@@ -174,7 +174,7 @@ static struct uac1_as_header_descriptor as_out_header_desc = {
.bDescriptorSubtype = UAC_AS_GENERAL,
.bTerminalLink = USB_OUT_IT_ID,
.bDelay = 1,
- .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+ .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
};
static struct uac1_as_header_descriptor as_in_header_desc = {
@@ -183,7 +183,7 @@ static struct uac1_as_header_descriptor as_in_header_desc = {
.bDescriptorSubtype = UAC_AS_GENERAL,
.bTerminalLink = USB_IN_OT_ID,
.bDelay = 1,
- .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+ .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
};
DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
@@ -606,8 +606,8 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
if (status)
goto fail;
- audio->out_ep_maxpsize = as_out_ep_desc.wMaxPacketSize;
- audio->in_ep_maxpsize = as_in_ep_desc.wMaxPacketSize;
+ audio->out_ep_maxpsize = le16_to_cpu(as_out_ep_desc.wMaxPacketSize);
+ audio->in_ep_maxpsize = le16_to_cpu(as_in_ep_desc.wMaxPacketSize);
audio->params.c_chmask = audio_opts->c_chmask;
audio->params.c_srate = audio_opts->c_srate;
audio->params.c_ssize = audio_opts->c_ssize;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 9082ce261e70..f05c3f3e6103 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -168,7 +168,7 @@ static struct uac2_input_terminal_descriptor usb_out_it_desc = {
.bAssocTerminal = 0,
.bCSourceID = USB_OUT_CLK_ID,
.iChannelNames = 0,
- .bmControls = (CONTROL_RDWR << COPY_CTRL),
+ .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
/* Input Terminal for I/O-In */
@@ -182,7 +182,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
.bAssocTerminal = 0,
.bCSourceID = USB_IN_CLK_ID,
.iChannelNames = 0,
- .bmControls = (CONTROL_RDWR << COPY_CTRL),
+ .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
/* Ouput Terminal for USB_IN */
@@ -196,7 +196,7 @@ static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
.bAssocTerminal = 0,
.bSourceID = IO_IN_IT_ID,
.bCSourceID = USB_IN_CLK_ID,
- .bmControls = (CONTROL_RDWR << COPY_CTRL),
+ .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
/* Ouput Terminal for I/O-Out */
@@ -210,7 +210,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
.bAssocTerminal = 0,
.bSourceID = USB_OUT_IT_ID,
.bCSourceID = USB_OUT_CLK_ID,
- .bmControls = (CONTROL_RDWR << COPY_CTRL),
+ .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
static struct uac2_ac_header_descriptor ac_hdr_desc = {
@@ -220,9 +220,10 @@ static struct uac2_ac_header_descriptor ac_hdr_desc = {
.bDescriptorSubtype = UAC_MS_HEADER,
.bcdADC = cpu_to_le16(0x200),
.bCategory = UAC2_FUNCTION_IO_BOX,
- .wTotalLength = sizeof in_clk_src_desc + sizeof out_clk_src_desc
- + sizeof usb_out_it_desc + sizeof io_in_it_desc
- + sizeof usb_in_ot_desc + sizeof io_out_ot_desc,
+ .wTotalLength = cpu_to_le16(sizeof in_clk_src_desc
+ + sizeof out_clk_src_desc + sizeof usb_out_it_desc
+ + sizeof io_in_it_desc + sizeof usb_in_ot_desc
+ + sizeof io_out_ot_desc),
.bmControls = 0,
};
@@ -569,10 +570,12 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
return ret;
}
- agdev->in_ep_maxpsize = max(fs_epin_desc.wMaxPacketSize,
- hs_epin_desc.wMaxPacketSize);
- agdev->out_ep_maxpsize = max(fs_epout_desc.wMaxPacketSize,
- hs_epout_desc.wMaxPacketSize);
+ agdev->in_ep_maxpsize = max_t(u16,
+ le16_to_cpu(fs_epin_desc.wMaxPacketSize),
+ le16_to_cpu(hs_epin_desc.wMaxPacketSize));
+ agdev->out_ep_maxpsize = max_t(u16,
+ le16_to_cpu(fs_epout_desc.wMaxPacketSize),
+ le16_to_cpu(hs_epout_desc.wMaxPacketSize));
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 9ffb11ec9ed9..7cd5c969fcbe 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -192,7 +192,7 @@ config USB_RENESAS_USBHS_UDC
config USB_RENESAS_USB3
tristate 'Renesas USB3.0 Peripheral controller'
depends on ARCH_RENESAS || COMPILE_TEST
- depends on EXTCON
+ depends on EXTCON && HAS_DMA
help
Renesas USB3.0 Peripheral controller is a USB peripheral controller
that supports super, high, and full speed USB 3.0 data transfers.
@@ -257,6 +257,7 @@ config USB_MV_U3D
config USB_SNP_CORE
depends on (USB_AMD5536UDC || USB_SNP_UDC_PLAT)
+ depends on HAS_DMA
tristate
help
This enables core driver support for Synopsys USB 2.0 Device
@@ -271,7 +272,7 @@ config USB_SNP_CORE
config USB_SNP_UDC_PLAT
tristate "Synopsys USB 2.0 Device controller"
- depends on (USB_GADGET && OF)
+ depends on USB_GADGET && OF && HAS_DMA
select USB_GADGET_DUALSPEED
select USB_SNP_CORE
default ARCH_BCM_IPROC
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index d8278322d5ac..62dc9c7798e7 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -89,6 +89,9 @@
/* USB_COM_CON */
#define USB_COM_CON_CONF BIT(24)
+#define USB_COM_CON_PN_WDATAIF_NL BIT(23)
+#define USB_COM_CON_PN_RDATAIF_NL BIT(22)
+#define USB_COM_CON_PN_LSTTR_PP BIT(21)
#define USB_COM_CON_SPD_MODE BIT(17)
#define USB_COM_CON_EP0_EN BIT(16)
#define USB_COM_CON_DEV_ADDR_SHIFT 8
@@ -686,6 +689,9 @@ static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
{
usb3_init_axi_bridge(usb3);
usb3_init_epc_registers(usb3);
+ usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL |
+ USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP,
+ USB3_USB_COM_CON);
usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_STA);
usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_ENA);
@@ -1369,7 +1375,7 @@ static int renesas_usb3_dma_free_prd(struct renesas_usb3 *usb3,
usb3_for_each_dma(usb3, dma, i) {
if (dma->prd) {
- dma_free_coherent(dev, USB3_DMA_MAX_XFER_SIZE,
+ dma_free_coherent(dev, USB3_DMA_PRD_SIZE,
dma->prd, dma->prd_dma);
dma->prd = NULL;
}
@@ -1409,12 +1415,12 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
int ret = -EAGAIN;
u32 enable_bits = 0;
+ spin_lock_irqsave(&usb3->lock, flags);
if (usb3_ep->halt || usb3_ep->started)
- return;
+ goto out;
if (usb3_req != usb3_req_first)
- return;
+ goto out;
- spin_lock_irqsave(&usb3->lock, flags);
if (usb3_pn_change(usb3, usb3_ep->num) < 0)
goto out;
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 2e11f19e07ae..f7b4d0f159e4 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -28,7 +28,7 @@
/* description */
#define UDC_MOD_DESCRIPTION "Synopsys UDC platform driver"
-void start_udc(struct udc *udc)
+static void start_udc(struct udc *udc)
{
if (udc->driver) {
dev_info(udc->dev, "Connecting...\n");
@@ -38,7 +38,7 @@ void start_udc(struct udc *udc)
}
}
-void stop_udc(struct udc *udc)
+static void stop_udc(struct udc *udc)
{
int tmp;
u32 reg;
@@ -76,7 +76,7 @@ void stop_udc(struct udc *udc)
dev_info(udc->dev, "Device disconnected\n");
}
-void udc_drd_work(struct work_struct *work)
+static void udc_drd_work(struct work_struct *work)
{
struct udc *udc;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a9a1e4c40480..c8989c62a262 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -77,6 +77,16 @@
#define USB_INTEL_USB3_PSSEN 0xD8
#define USB_INTEL_USB3PRM 0xDC
+/* ASMEDIA quirk use */
+#define ASMT_DATA_WRITE0_REG 0xF8
+#define ASMT_DATA_WRITE1_REG 0xFC
+#define ASMT_CONTROL_REG 0xE0
+#define ASMT_CONTROL_WRITE_BIT 0x02
+#define ASMT_WRITEREG_CMD 0x10423
+#define ASMT_FLOWCTL_ADDR 0xFA30
+#define ASMT_FLOWCTL_DATA 0xBA
+#define ASMT_PSEUDO_DATA 0
+
/*
* amd_chipset_gen values represent AMD different chipset generations
*/
@@ -412,6 +422,50 @@ void usb_amd_quirk_pll_disable(void)
}
EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
+static int usb_asmedia_wait_write(struct pci_dev *pdev)
+{
+ unsigned long retry_count;
+ unsigned char value;
+
+ for (retry_count = 1000; retry_count > 0; --retry_count) {
+
+ pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
+
+ if (value == 0xff) {
+ dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
+ return -EIO;
+ }
+
+ if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
+ return 0;
+
+ usleep_range(40, 60);
+ }
+
+ dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
+ return -ETIMEDOUT;
+}
+
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
+{
+ if (usb_asmedia_wait_write(pdev) != 0)
+ return;
+
+ /* send command and address to device */
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
+ pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+
+ if (usb_asmedia_wait_write(pdev) != 0)
+ return;
+
+ /* send data to device */
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
+ pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+}
+EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
+
void usb_amd_quirk_pll_enable(void)
{
usb_amd_quirk_pll(0);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 0222195bd5b0..655994480198 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -11,6 +11,7 @@ bool usb_amd_prefetch_quirk(void);
void usb_amd_dev_put(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
@@ -18,6 +19,7 @@ void sb800_prefetch(struct device *dev, int on);
struct pci_dev;
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
+static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
static inline void usb_amd_dev_put(void) {}
static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
static inline void sb800_prefetch(struct device *dev, int on) {}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1adae9eab831..00721e8807ab 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -398,14 +398,21 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
spin_lock_irqsave(&xhci->lock, flags);
for (i = LAST_EP_INDEX; i > 0; i--) {
if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
+ struct xhci_ep_ctx *ep_ctx;
struct xhci_command *command;
+
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i);
+
+ /* Check ep is running, required by AMD SNPS 3.1 xHC */
+ if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING)
+ continue;
+
command = xhci_alloc_command(xhci, false, false,
GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, cmd);
return -ENOMEM;
-
}
xhci_queue_stop_endpoint(xhci, command, slot_id, i,
suspend);
@@ -603,12 +610,14 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
/* Disable all Device Slots */
xhci_dbg(xhci, "Disable all slots\n");
+ spin_unlock_irqrestore(&xhci->lock, *flags);
for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
retval = xhci_disable_slot(xhci, NULL, i);
if (retval)
xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
i, retval);
}
+ spin_lock_irqsave(&xhci->lock, *flags);
/* Put all ports to the Disable state by clear PP */
xhci_dbg(xhci, "Disable all port (PP = 0)\n");
/* Power off USB3 ports*/
@@ -897,6 +906,9 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
clear_bit(wIndex, &bus_state->resuming_ports);
set_bit(wIndex, &bus_state->rexit_ports);
+
+ xhci_test_and_clear_bit(xhci, port_array, wIndex,
+ PORT_PLC);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 53882e2babbb..5b0fa553c8bc 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,8 @@
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
+#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
+
static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver;
@@ -217,6 +219,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x1142)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
+ xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL;
+
if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index c50c902d009e..cc368ad2b51e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -864,13 +864,16 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;
- for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+ for (stream_id = 1; stream_id < ep->stream_info->num_streams;
stream_id++) {
+ ring = ep->stream_info->stream_rings[stream_id];
+ if (!ring)
+ continue;
+
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u, stream %u",
- slot_id, ep_index, stream_id + 1);
- xhci_kill_ring_urbs(xhci,
- ep->stream_info->stream_rings[stream_id]);
+ slot_id, ep_index, stream_id);
+ xhci_kill_ring_urbs(xhci, ring);
}
} else {
ring = ep->ring;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 56f85df013db..b2ff1ff1a02f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -198,6 +198,9 @@ int xhci_reset(struct xhci_hcd *xhci)
if (ret)
return ret;
+ if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+ usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
+
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wait for controller to be ready for doorbell rings");
/*
@@ -622,8 +625,10 @@ int xhci_run(struct usb_hcd *hcd)
if (!command)
return -ENOMEM;
- xhci_queue_vendor_command(xhci, command, 0, 0, 0,
+ ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
+ if (ret)
+ xhci_free_command(xhci, command);
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
@@ -1085,6 +1090,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
compliance_mode_recovery_timer_init(xhci);
+ if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+ usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
+
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3c6da1f93c84..e3e935291ed6 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1820,6 +1820,7 @@ struct xhci_hcd {
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
#define XHCI_U2_DISABLE_WAKE (1 << 27)
+#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 623c51300393..f0ce304c5aaf 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -752,8 +752,10 @@ static int usbhsc_resume(struct device *dev)
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
- if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
+ if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
usbhsc_power_ctrl(priv, 1);
+ usbhs_mod_autonomy_mode(priv);
+ }
usbhs_platform_call(priv, phy_reset, pdev);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 5bc7a6138855..93fba9033b00 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -37,6 +37,7 @@ struct usbhsg_gpriv;
struct usbhsg_uep {
struct usb_ep ep;
struct usbhs_pipe *pipe;
+ spinlock_t lock; /* protect the pipe */
char ep_name[EP_NAME_SIZE];
@@ -636,10 +637,16 @@ usbhsg_ep_enable_end:
static int usbhsg_ep_disable(struct usb_ep *ep)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pipe *pipe;
+ unsigned long flags;
+ int ret = 0;
- if (!pipe)
- return -EINVAL;
+ spin_lock_irqsave(&uep->lock, flags);
+ pipe = usbhsg_uep_to_pipe(uep);
+ if (!pipe) {
+ ret = -EINVAL;
+ goto out;
+ }
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
@@ -647,6 +654,9 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
uep->pipe->mod_private = NULL;
uep->pipe = NULL;
+out:
+ spin_unlock_irqrestore(&uep->lock, flags);
+
return 0;
}
@@ -696,8 +706,11 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pipe *pipe;
+ unsigned long flags;
+ spin_lock_irqsave(&uep->lock, flags);
+ pipe = usbhsg_uep_to_pipe(uep);
if (pipe)
usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
@@ -706,6 +719,7 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
* even if the pipe is NULL.
*/
usbhsg_queue_pop(uep, ureq, -ECONNRESET);
+ spin_unlock_irqrestore(&uep->lock, flags);
return 0;
}
@@ -852,10 +866,10 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
- struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
+ struct usbhsg_uep *uep;
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
- int ret = 0;
+ int ret = 0, i;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
@@ -887,7 +901,9 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
- usbhsg_ep_disable(&dcp->ep);
+ /* disable all eps */
+ usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
+ usbhsg_ep_disable(&uep->ep);
dev_dbg(dev, "stop gadget\n");
@@ -1069,6 +1085,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
ret = -ENOMEM;
goto usbhs_mod_gadget_probe_err_gpriv;
}
+ spin_lock_init(&uep->lock);
gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index fba4005dd737..6a7720e66595 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1529,8 +1529,11 @@ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
/* Make sure driver was initialized */
- if (us->extra == NULL)
+ if (us->extra == NULL) {
usb_stor_dbg(us, "ERROR Driver not initialized\n");
+ srb->result = DID_ERROR << 16;
+ return;
+ }
scsi_set_resid(srb, 0);
/* scsi_bufflen might change in protocol translation to ata */
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 6b0d2f0918c6..8a88f45822e3 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -3,6 +3,7 @@
#define __DRIVER_USB_TYPEC_UCSI_H
#include <linux/bitops.h>
+#include <linux/device.h>
#include <linux/types.h>
/* -------------------------------------------------------------------------- */
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e4613a3c362d..9cb3f722dce1 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -308,7 +308,6 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->avail = NULL;
vq->used = NULL;
vq->last_avail_idx = 0;
- vq->last_used_event = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
vq->signalled_used = 0;
@@ -1402,7 +1401,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EINVAL;
break;
}
- vq->last_avail_idx = vq->last_used_event = s.num;
+ vq->last_avail_idx = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
@@ -2241,6 +2240,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__u16 old, new;
__virtio16 event;
bool v;
+ /* Flush out used index updates. This is paired
+ * with the barrier that the Guest executes when enabling
+ * interrupts. */
+ smp_mb();
if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx))
@@ -2248,10 +2251,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags;
- /* Flush out used index updates. This is paired
- * with the barrier that the Guest executes when enabling
- * interrupts. */
- smp_mb();
if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
@@ -2266,26 +2265,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
- /* We're sure if the following conditions are met, there's no
- * need to notify guest:
- * 1) cached used event is ahead of new
- * 2) old to new updating does not cross cached used event. */
- if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
- !vring_need_event(vq->last_used_event, new, old))
- return false;
-
- /* Flush out used index updates. This is paired
- * with the barrier that the Guest executes when enabling
- * interrupts. */
- smp_mb();
-
if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
- vq->last_used_event = vhost16_to_cpu(vq, event);
-
- return vring_need_event(vq->last_used_event, new, old);
+ return vring_need_event(vhost16_to_cpu(vq, event), new, old);
}
/* This actually signals the guest, using eventfd. */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index f72095868b93..bb7c29b8b9fc 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -115,9 +115,6 @@ struct vhost_virtqueue {
/* Last index we used. */
u16 last_used_idx;
- /* Last used evet we've seen */
- u16 last_used_event;
-
/* Used flags */
u16 used_flags;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 22caf808bfab..f0b3a0b9d42f 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -104,12 +104,6 @@ static u32 page_to_balloon_pfn(struct page *page)
return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
}
-static struct page *balloon_pfn_to_page(u32 pfn)
-{
- BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE);
- return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE);
-}
-
static void balloon_ack(struct virtqueue *vq)
{
struct virtio_balloon *vb = vq->vdev->priv;
@@ -138,8 +132,10 @@ static void set_page_pfns(struct virtio_balloon *vb,
{
unsigned int i;
- /* Set balloon pfns pointing at this page.
- * Note that the first pfn points at start of the page. */
+ /*
+ * Set balloon pfns pointing at this page.
+ * Note that the first pfn points at start of the page.
+ */
for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
pfns[i] = cpu_to_virtio32(vb->vdev,
page_to_balloon_pfn(page) + i);
@@ -182,18 +178,16 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
return num_allocated_pages;
}
-static void release_pages_balloon(struct virtio_balloon *vb)
+static void release_pages_balloon(struct virtio_balloon *vb,
+ struct list_head *pages)
{
- unsigned int i;
- struct page *page;
+ struct page *page, *next;
- /* Find pfns pointing at start of each page, get pages and free them. */
- for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
- vb->pfns[i]));
+ list_for_each_entry_safe(page, next, pages, lru) {
if (!virtio_has_feature(vb->vdev,
VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
adjust_managed_page_count(page, 1);
+ list_del(&page->lru);
put_page(page); /* balloon reference */
}
}
@@ -203,6 +197,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
unsigned num_freed_pages;
struct page *page;
struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
+ LIST_HEAD(pages);
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
@@ -216,6 +211,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
if (!page)
break;
set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
+ list_add(&page->lru, &pages);
vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
}
@@ -227,7 +223,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
*/
if (vb->num_pfns != 0)
tell_host(vb, vb->deflate_vq);
- release_pages_balloon(vb);
+ release_pages_balloon(vb, &pages);
mutex_unlock(&vb->balloon_lock);
return num_freed_pages;
}
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3612542b6044..83fc9aab34e8 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -704,7 +704,8 @@ static int omap_hdq_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- ret = -ENXIO;
+ dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ ret = irq;
goto err_irq;
}
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 95ea7e6b1d99..74471e7aa5cc 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -728,6 +728,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
atomic_set(&sl->refcnt, 1);
atomic_inc(&sl->master->refcnt);
+ dev->slave_count++;
/* slave modules need to be loaded in a context with unlocked mutex */
mutex_unlock(&dev->mutex);
@@ -747,11 +748,11 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
sl->family = f;
-
err = __w1_attach_slave_device(sl);
if (err < 0) {
dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__,
sl->name);
+ dev->slave_count--;
w1_family_put(sl->family);
atomic_dec(&sl->master->refcnt);
kfree(sl);
@@ -759,7 +760,6 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
}
sl->ttl = dev->slave_ttl;
- dev->slave_count++;
memcpy(msg.id.id, rn, sizeof(msg.id));
msg.type = W1_SLAVE_ADD;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 50dcb68d8070..ab609255a0f3 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -780,6 +780,9 @@ static int __init balloon_init(void)
}
#endif
+ /* Init the xen-balloon driver. */
+ xen_balloon_init();
+
return 0;
}
subsys_initcall(balloon_init);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b241bfa529ce..bae1f5d36c26 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -343,14 +343,6 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
info->cpu = cpu;
}
-static void xen_evtchn_mask_all(void)
-{
- unsigned int evtchn;
-
- for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
- mask_evtchn(evtchn);
-}
-
/**
* notify_remote_via_irq - send event to remote end of event channel via irq
* @irq: irq of event channel to send event to
@@ -1573,7 +1565,6 @@ void xen_irq_resume(void)
struct irq_info *info;
/* New event-channel space is not 'live' yet. */
- xen_evtchn_mask_all();
xen_evtchn_resume();
/* No IRQ <-> event-channel mappings. */
@@ -1681,6 +1672,7 @@ module_param(fifo_events, bool, 0);
void __init xen_init_IRQ(void)
{
int ret = -EINVAL;
+ unsigned int evtchn;
if (fifo_events)
ret = xen_evtchn_fifo_init();
@@ -1692,7 +1684,8 @@ void __init xen_init_IRQ(void)
BUG_ON(!evtchn_to_irq);
/* No event channels are 'live' right now. */
- xen_evtchn_mask_all();
+ for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
+ mask_evtchn(evtchn);
pirq_needs_eoi = pirq_needs_eoi_flag;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index d6786b87e13b..2c6a9114d332 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -42,6 +42,7 @@
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/workqueue.h>
+#include <linux/ratelimit.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -1072,8 +1073,14 @@ static int gnttab_expand(unsigned int req_entries)
cur = nr_grant_frames;
extra = ((req_entries + (grefs_per_grant_frame-1)) /
grefs_per_grant_frame);
- if (cur + extra > gnttab_max_grant_frames())
+ if (cur + extra > gnttab_max_grant_frames()) {
+ pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
+ " cur=%u extra=%u limit=%u"
+ " gnttab_free_count=%u req_entries=%u\n",
+ cur, extra, gnttab_max_grant_frames(),
+ gnttab_free_count, req_entries);
return -ENOSPC;
+ }
rc = gnttab_map(cur, cur + extra - 1);
if (rc == 0)
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index e7715cb62eef..e89136ab851e 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -59,6 +59,8 @@ static void watch_target(struct xenbus_watch *watch,
{
unsigned long long new_target;
int err;
+ static bool watch_fired;
+ static long target_diff;
err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
if (err != 1) {
@@ -69,7 +71,14 @@ static void watch_target(struct xenbus_watch *watch,
/* The given memory/target value is in KiB, so it needs converting to
* pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
*/
- balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
+ new_target >>= PAGE_SHIFT - 10;
+ if (watch_fired) {
+ balloon_set_new_target(new_target - target_diff);
+ return;
+ }
+
+ watch_fired = true;
+ target_diff = new_target - balloon_stats.target_pages;
}
static struct xenbus_watch target_watch = {
.node = "memory/target",
@@ -94,22 +103,15 @@ static struct notifier_block xenstore_notifier = {
.notifier_call = balloon_init_watcher,
};
-static int __init balloon_init(void)
+void xen_balloon_init(void)
{
- if (!xen_domain())
- return -ENODEV;
-
- pr_info("Initialising balloon driver\n");
-
register_balloon(&balloon_dev);
register_xen_selfballooning(&balloon_dev);
register_xenstore_notifier(&xenstore_notifier);
-
- return 0;
}
-subsys_initcall(balloon_init);
+EXPORT_SYMBOL_GPL(xen_balloon_init);
#define BALLOON_SHOW(name, format, args...) \
static ssize_t show_##name(struct device *dev, \
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 66620713242a..a67e955cacd1 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -151,8 +151,8 @@ static unsigned long frontswap_inertia_counter;
static void frontswap_selfshrink(void)
{
static unsigned long cur_frontswap_pages;
- static unsigned long last_frontswap_pages;
- static unsigned long tgt_frontswap_pages;
+ unsigned long last_frontswap_pages;
+ unsigned long tgt_frontswap_pages;
last_frontswap_pages = cur_frontswap_pages;
cur_frontswap_pages = frontswap_curr_pages();
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 967f069385d0..71ddfb4cf61c 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -87,7 +87,6 @@ static int __init xenfs_init(void)
if (xen_domain())
return register_filesystem(&xenfs_type);
- pr_info("not registering filesystem on non-xen platform\n");
return 0;
}