summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/scan.c17
-rw-r--r--drivers/acpi/x86/s2idle.c14
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/base/core.c53
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/regmap/regmap-debugfs.c9
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/rnbd/Kconfig1
-rw-r--r--drivers/block/rnbd/README1
-rw-r--r--drivers/block/rnbd/rnbd-clt.c18
-rw-r--r--drivers/block/rnbd/rnbd-srv.c8
-rw-r--r--drivers/block/xen-blkfront.c20
-rw-r--r--drivers/bus/arm-integrator-lm.c1
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c9
-rw-r--r--drivers/clk/imx/Kconfig2
-rw-r--r--drivers/clk/mmp/clk-audio.c6
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c21
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c4
-rw-r--r--drivers/clk/tegra/clk-tegra30.c2
-rw-r--r--drivers/counter/ti-eqep.c35
-rw-r--r--drivers/cpufreq/intel_pstate.c16
-rw-r--r--drivers/cpufreq/powernow-k8.c9
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h4
-rw-r--r--drivers/dma-buf/dma-buf.c21
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c3
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c4
-rw-r--r--drivers/dma/idxd/sysfs.c4
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c1
-rw-r--r--drivers/dma/milbeaut-xdmac.c4
-rw-r--r--drivers/dma/qcom/bam_dma.c6
-rw-r--r--drivers/dma/qcom/gpi.c10
-rw-r--r--drivers/dma/stm32-mdma.c2
-rw-r--r--drivers/dma/ti/k3-udma.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c11
-rw-r--r--drivers/firmware/imx/Kconfig1
-rw-r--r--drivers/gpio/Kconfig5
-rw-r--r--drivers/gpio/gpio-mvebu.c19
-rw-r--r--drivers/gpio/gpiolib-cdev.c145
-rw-r--r--drivers/gpio/gpiolib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c11
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c145
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h38
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c166
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c14
-rw-r--r--drivers/gpu/drm/drm_plane.c9
-rw-r--r--drivers/gpu/drm/drm_syncobj.c8
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c9
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c157
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c81
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c5
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_mitigations.c146
-rw-r--r--drivers/gpu/drm/i915/i915_mitigations.h13
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c30
-rw-r--r--drivers/gpu/drm/i915/i915_request.h37
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c106
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c35
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c8
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/hid-wiimote-core.c2
-rw-r--r--drivers/hid/wacom_sys.c40
-rw-r--r--drivers/hid/wacom_wac.h2
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/amd_energy.c3
-rw-r--r--drivers/hwmon/pwm-fan.c12
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/hwtracing/stm/heartbeat.c6
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c20
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c27
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c2
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c24
-rw-r--r--drivers/ide/ide-atapi.c1
-rw-r--r--drivers/ide/ide-io.c7
-rw-r--r--drivers/ide/ide-pm.c2
-rw-r--r--drivers/idle/intel_idle.c41
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c6
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c31
-rw-r--r--drivers/iio/dac/ad5504.c4
-rw-r--r--drivers/iio/proximity/sx9310.c5
-rw-r--r--drivers/iio/temperature/mlx90632.c6
-rw-r--r--drivers/infiniband/core/cma_configfs.c4
-rw-r--r--drivers/infiniband/core/restrack.c1
-rw-r--r--drivers/infiniband/core/ucma.c135
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c11
-rw-r--r--drivers/infiniband/hw/mlx5/main.c10
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c7
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h14
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c5
-rw-r--r--drivers/interconnect/imx/imx.c3
-rw-r--r--drivers/interconnect/imx/imx8mq.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig23
-rw-r--r--drivers/iommu/amd/init.c3
-rw-r--r--drivers/iommu/amd/iommu.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c4
-rw-r--r--drivers/iommu/dma-iommu.c27
-rw-r--r--drivers/iommu/intel/dmar.c4
-rw-r--r--drivers/iommu/intel/iommu.c149
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/intel/svm.c45
-rw-r--r--drivers/iommu/iova.c8
-rw-r--r--drivers/irqchip/Kconfig5
-rw-r--r--drivers/irqchip/irq-bcm2836.c4
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-mips-cpu.c7
-rw-r--r--drivers/irqchip/irq-sl28cpld.c2
-rw-r--r--drivers/isdn/mISDN/Kconfig1
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/lightnvm/core.c3
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/bcache/features.c2
-rw-r--r--drivers/md/bcache/features.h30
-rw-r--r--drivers/md/bcache/super.c53
-rw-r--r--drivers/md/dm-bufio.c6
-rw-r--r--drivers/md/dm-crypt.c177
-rw-r--r--drivers/md/dm-integrity.c92
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-snap.c24
-rw-r--r--drivers/md/dm-table.c15
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/media/cec/platform/Makefile1
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c3
-rw-r--r--drivers/media/i2c/ccs-pll.c8
-rw-r--r--drivers/media/i2c/ccs/ccs-data.c2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c2
-rw-r--r--drivers/media/platform/qcom/venus/core.c2
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c2
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c2
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/rc-main.c8
-rw-r--r--drivers/media/rc/serial_ir.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c4
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c7
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c77
-rw-r--r--drivers/misc/habanalabs/common/device.c19
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c65
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h5
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c1
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c9
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/common/memory.c10
-rw-r--r--drivers/misc/habanalabs/common/mmu.c6
-rw-r--r--drivers/misc/habanalabs/common/mmu_v1.c12
-rw-r--r--drivers/misc/habanalabs/common/pci.c28
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c194
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h7
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c78
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h9
-rw-r--r--drivers/misc/pvpanic.c19
-rw-r--r--drivers/mmc/core/queue.c4
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c6
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c27
-rw-r--r--drivers/mmc/host/sdhci-xenon.c7
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/nandsim.c7
-rw-r--r--drivers/mtd/nand/raw/omap2.c15
-rw-r--r--drivers/mtd/nand/spi/core.c14
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/arcnet/com20020_cs.c2
-rw-r--r--drivers/net/bareudp.c32
-rw-r--r--drivers/net/bonding/bond_main.c174
-rw-r--r--drivers/net/bonding/bond_options.c55
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/Makefile7
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/c_can/c_can.c4
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/dev.c1338
-rw-r--r--drivers/net/can/dev/Makefile11
-rw-r--r--drivers/net/can/dev/bittiming.c261
-rw-r--r--drivers/net/can/dev/dev.c468
-rw-r--r--drivers/net/can/dev/length.c95
-rw-r--r--drivers/net/can/dev/netlink.c379
-rw-r--r--drivers/net/can/dev/rx-offload.c (renamed from drivers/net/can/rx-offload.c)5
-rw-r--r--drivers/net/can/dev/skb.c231
-rw-r--r--drivers/net/can/flexcan.c130
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c4
-rw-r--r--drivers/net/can/kvaser_pciefd.c6
-rw-r--r--drivers/net/can/m_can/Makefile4
-rw-r--r--drivers/net/can/m_can/m_can.c10
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c (renamed from drivers/net/can/m_can/tcan4x5x.c)148
-rw-r--r--drivers/net/can/m_can/tcan4x5x-regmap.c135
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h57
-rw-r--r--drivers/net/can/mscan/mscan.c4
-rw-r--r--drivers/net/can/pch_can.c4
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c4
-rw-r--r--drivers/net/can/rcar/Kconfig4
-rw-r--r--drivers/net/can/rcar/rcar_can.c4
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c4
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/softing/softing_main.c4
-rw-r--r--drivers/net/can/spi/hi311x.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c98
-rw-r--r--drivers/net/can/sun4i_can.c4
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/can/usb/ems_usb.c4
-rw-r--r--drivers/net/can/usb/esd_usb2.c4
-rw-r--r--drivers/net/can/usb/gs_usb.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c2
-rw-r--r--drivers/net/can/usb/mcba_usb.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c8
-rw-r--r--drivers/net/can/usb/ucan.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/vxcan.c6
-rw-r--r--drivers/net/can/xilinx_can.c6
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c113
-rw-r--r--drivers/net/dsa/b53/b53_priv.h16
-rw-r--r--drivers/net/dsa/bcm_sf2.c46
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c10
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h1
-rw-r--r--drivers/net/dsa/dsa_loop.c74
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig1
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c348
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h17
-rw-r--r--drivers/net/dsa/lan9303-core.c12
-rw-r--r--drivers/net/dsa/lantiq_gswip.c137
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c108
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c98
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c29
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h8
-rw-r--r--drivers/net/dsa/mt7530.c164
-rw-r--r--drivers/net/dsa/mt7530.h20
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig13
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c464
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c65
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h187
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c21
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h5
-rw-r--r--drivers/net/dsa/ocelot/felix.c276
-rw-r--r--drivers/net/dsa/ocelot/felix.h2
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c37
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c20
-rw-r--r--drivers/net/dsa/qca/ar9331.c165
-rw-r--r--drivers/net/dsa/qca8k.c38
-rw-r--r--drivers/net/dsa/realtek-smi-core.h9
-rw-r--r--drivers/net/dsa/rtl8366.c152
-rw-r--r--drivers/net/dsa/rtl8366rb.c276
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c9
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c101
-rw-r--r--drivers/net/dsa/xrs700x/Kconfig26
-rw-r--r--drivers/net/dsa/xrs700x/Makefile4
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c622
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.h42
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c150
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c163
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_reg.h203
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig1
-rw-r--r--drivers/net/ethernet/aurora/Kconfig23
-rw-r--r--drivers/net/ethernet/aurora/Makefile2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1520
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h316
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c186
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h14
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c89
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h37
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c114
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c284
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h249
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h59
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c30
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/broadcom/unimac.h68
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c13
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c108
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c41
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c133
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h13
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c135
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c93
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c61
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c552
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c197
-rw-r--r--drivers/net/ethernet/intel/e100.c92
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c46
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c32
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c33
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c14
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c43
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c102
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c652
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c170
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c155
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c71
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c43
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c984
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c457
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c215
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c276
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c275
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c556
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c233
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c579
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c1592
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h167
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c1640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c165
-rw-r--r--drivers/net/ethernet/micrel/Kconfig4
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c114
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c66
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h20
-rw-r--r--drivers/net/ethernet/mscc/Makefile3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c73
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h10
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c885
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c281
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c143
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c4
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c83
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c49
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c29
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c14
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c196
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c397
-rw-r--r--drivers/net/ethernet/renesas/ravb.h37
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c30
-rw-r--r--drivers/net/ethernet/rocker/rocker.h6
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c61
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c45
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/rx.c10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c10
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c20
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c96
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c70
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c8
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c18
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/geneve.c19
-rw-r--r--drivers/net/gtp.c527
-rw-r--r--drivers/net/hyperv/hyperv_net.h93
-rw-r--r--drivers/net/hyperv/netvsc.c60
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c14
-rw-r--r--drivers/net/hyperv/netvsc_drv.c56
-rw-r--r--drivers/net/hyperv/rndis_filter.c246
-rw-r--r--drivers/net/ipa/Kconfig10
-rw-r--r--drivers/net/ipa/gsi.c274
-rw-r--r--drivers/net/ipa/gsi_trans.h1
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_clock.c194
-rw-r--r--drivers/net/ipa/ipa_cmd.c45
-rw-r--r--drivers/net/ipa/ipa_cmd.h24
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c38
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c38
-rw-r--r--drivers/net/ipa/ipa_data.h26
-rw-r--r--drivers/net/ipa/ipa_endpoint.c83
-rw-r--r--drivers/net/ipa/ipa_main.c39
-rw-r--r--drivers/net/ipa/ipa_modem.c1
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/mdio-bitbang.c6
-rw-r--r--drivers/net/mhi_net.c20
-rw-r--r--drivers/net/netdevsim/fib.c23
-rw-r--r--drivers/net/netdevsim/netdev.c2
-rw-r--r--drivers/net/phy/at803x.c84
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/realtek.c132
-rw-r--r--drivers/net/phy/sfp-bus.c35
-rw-r--r--drivers/net/phy/sfp.c208
-rw-r--r--drivers/net/phy/smsc.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c12
-rw-r--r--drivers/net/ppp/pptp.c8
-rw-r--r--drivers/net/tap.c12
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/tun.c22
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/cdc_ether.c13
-rw-r--r--drivers/net/usb/cdc_ncm.c23
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c87
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/usb/r8153_ecm.c8
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/usbnet.c21
-rw-r--r--drivers/net/veth.c14
-rw-r--r--drivers/net/virtio_net.c30
-rw-r--r--drivers/net/vxlan.c20
-rw-r--r--drivers/net/wan/Kconfig1
-rw-r--r--drivers/net/wan/hdlc_ppp.c7
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c44
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c24
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c24
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c65
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c55
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c74
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c9
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c8
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c5
-rw-r--r--drivers/net/xen-netback/xenbus.c4
-rw-r--r--drivers/net/xen-netfront.c16
-rw-r--r--drivers/nfc/Kconfig11
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/fdp/i2c.c2
-rw-r--r--drivers/nfc/trf7970a.c2
-rw-r--r--drivers/nfc/virtual_ncidev.c215
-rw-r--r--drivers/nvme/host/core.c36
-rw-r--r--drivers/nvme/host/fc.c15
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c129
-rw-r--r--drivers/nvme/host/rdma.c15
-rw-r--r--drivers/nvme/host/tcp.c30
-rw-r--r--drivers/nvme/target/admin-cmd.c8
-rw-r--r--drivers/nvme/target/fcloop.c7
-rw-r--r--drivers/nvme/target/rdma.c26
-rw-r--r--drivers/opp/core.c9
-rw-r--r--drivers/perf/arm_pmu.c5
-rw-r--r--drivers/phy/ingenic/Makefile2
-rw-r--r--drivers/phy/mediatek/Kconfig4
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c19
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c4
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c1
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c80
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c96
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h2
-rw-r--r--drivers/platform/surface/Kconfig8
-rw-r--r--drivers/platform/surface/surface_gpe.c4
-rw-r--r--drivers/platform/x86/amd-pmc.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c3
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c31
-rw-r--r--drivers/platform/x86/ideapad-laptop.c15
-rw-r--r--drivers/platform/x86/intel-vbtn.c10
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c18
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/regulator/Kconfig1
-rw-r--r--drivers/regulator/bd718x7-regulator.c57
-rw-r--r--drivers/regulator/core.c44
-rw-r--r--drivers/regulator/pf8x00-regulator.c8
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/s390/net/qeth_core.h47
-rw-r--r--drivers/s390/net/qeth_core_main.c141
-rw-r--r--drivers/s390/net/qeth_core_sys.c10
-rw-r--r--drivers/s390/net/qeth_l2_main.c8
-rw-r--r--drivers/s390/net/qeth_l3_main.c94
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig1
-rw-r--r--drivers/scsi/fnic/vnic_dev.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c66
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c8
-rw-r--r--drivers/scsi/libfc/fc_exch.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c45
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c29
-rw-r--r--drivers/scsi/mpt3sas/Kconfig2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c4
-rw-r--r--drivers/scsi/scsi_debug.c5
-rw-r--r--drivers/scsi/scsi_lib.c27
-rw-r--r--drivers/scsi/scsi_transport_spi.c27
-rw-r--r--drivers/scsi/scsi_transport_srp.c9
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/ufs/Kconfig1
-rw-r--r--drivers/scsi/ufs/ufs-mediatek-trace.h2
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c21
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h1
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c73
-rw-r--r--drivers/scsi/ufs/ufshcd.c102
-rw-r--r--drivers/scsi/ufs/ufshcd.h14
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/virq-debugfs.c14
-rw-r--r--drivers/soc/atmel/soc.c13
-rw-r--r--drivers/soc/fsl/qe/qe_common.c20
-rw-r--r--drivers/soc/imx/Kconfig2
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c3
-rw-r--r--drivers/spi/spi-altera.c29
-rw-r--r--drivers/spi/spi-cadence.c6
-rw-r--r--drivers/spi/spi-fsl-spi.c5
-rw-r--r--drivers/spi/spi-geni-qcom.c84
-rw-r--r--drivers/spi/spi-stm32.c4
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c115
-rw-r--r--drivers/staging/hikey9xx/hisi-spmi-controller.c21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c20
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c2
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c4
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_wifi_regd.h6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c10
-rw-r--r--drivers/target/target_core_user.c11
-rw-r--r--drivers/target/target_core_xcopy.c119
-rw-r--r--drivers/target/target_core_xcopy.h1
-rw-r--r--drivers/tee/optee/call.c4
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/tty/Kconfig14
-rw-r--r--drivers/tty/Makefile3
-rw-r--r--drivers/tty/n_tty.c7
-rw-r--r--drivers/tty/serial/mvebu-uart.c10
-rw-r--r--drivers/tty/serial/sifive.c1
-rw-r--r--drivers/tty/tty_io.c51
-rw-r--r--drivers/tty/ttynull.c18
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c22
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c6
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usblp.c21
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c2
-rw-r--r--drivers/usb/dwc3/gadget.c16
-rw-r--r--drivers/usb/dwc3/ulpi.c38
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/composite.c10
-rw-r--r--drivers/usb/gadget/configfs.c19
-rw-r--r--drivers/usb/gadget/function/f_printer.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c69
-rw-r--r--drivers/usb/gadget/function/u_ether.c9
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c4
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c5
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/core.c16
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c45
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c122
-rw-r--r--drivers/usb/host/ehci-hcd.c12
-rw-r--r--drivers/usb/host/ehci-hub.c3
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c7
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/misc/yurex.c3
-rw-r--r--drivers/usb/serial/iuu_phoenix.c20
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/altmodes/Kconfig2
-rw-r--r--drivers/usb/typec/class.c2
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c11
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/vhost/net.c36
-rw-r--r--drivers/vhost/vsock.c68
-rw-r--r--drivers/xen/events/events_base.c10
-rw-r--r--drivers/xen/platform-pci.c8
-rw-r--r--drivers/xen/privcmd.c25
-rw-r--r--drivers/xen/xenbus/xenbus.h1
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c110
956 files changed, 25902 insertions, 13710 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index edf1558c1105..ebcf534514be 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -395,9 +395,6 @@ config ACPI_CONTAINER
This helps support hotplug of nodes, CPUs, and memory.
- To compile this driver as a module, choose M here:
- the module will be called container.
-
config ACPI_HOTPLUG_MEMORY
bool "Memory Hotplug"
depends on MEMORY_HOTPLUG
@@ -411,9 +408,6 @@ config ACPI_HOTPLUG_MEMORY
removing memory devices at runtime, you need not enable
this driver.
- To compile this driver as a module, choose M here:
- the module will be called acpi_memhotplug.
-
config ACPI_HOTPLUG_IOAPIC
bool
depends on PCI
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index cb229e24c563..e6a5d997241c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -97,7 +97,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
extern struct list_head acpi_bus_id_list;
struct acpi_device_bus_id {
- char bus_id[15];
+ const char *bus_id;
unsigned int instance_no;
struct list_head node;
};
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 80b668c80073..1db063b02f63 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -486,6 +486,7 @@ static void acpi_device_del(struct acpi_device *device)
acpi_device_bus_id->instance_no--;
else {
list_del(&acpi_device_bus_id->node);
+ kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
}
break;
@@ -585,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
if (!device)
return -EINVAL;
+ *device = NULL;
+
status = acpi_get_data_full(handle, acpi_scan_drop_device,
(void **)device, callback);
if (ACPI_FAILURE(status) || !*device) {
@@ -674,7 +677,14 @@ int acpi_device_add(struct acpi_device *device,
}
if (!found) {
acpi_device_bus_id = new_bus_id;
- strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
+ acpi_device_bus_id->bus_id =
+ kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
+ if (!acpi_device_bus_id->bus_id) {
+ pr_err(PREFIX "Memory allocation error for bus id\n");
+ result = -ENOMEM;
+ goto err_free_new_bus_id;
+ }
+
acpi_device_bus_id->instance_no = 0;
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
@@ -709,6 +719,11 @@ int acpi_device_add(struct acpi_device *device,
if (device->parent)
list_del(&device->node);
list_del(&device->wakeup_list);
+
+ err_free_new_bus_id:
+ if (!found)
+ kfree(new_bus_id);
+
mutex_unlock(&acpi_device_lock);
err_detach:
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 25fea34b544c..2b69536cdccb 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -105,18 +105,8 @@ static void lpi_device_get_constraints_amd(void)
for (i = 0; i < out_obj->package.count; i++) {
union acpi_object *package = &out_obj->package.elements[i];
- struct lpi_device_info_amd info = { };
- if (package->type == ACPI_TYPE_INTEGER) {
- switch (i) {
- case 0:
- info.revision = package->integer.value;
- break;
- case 1:
- info.count = package->integer.value;
- break;
- }
- } else if (package->type == ACPI_TYPE_PACKAGE) {
+ if (package->type == ACPI_TYPE_PACKAGE) {
lpi_constraints_table = kcalloc(package->package.count,
sizeof(*lpi_constraints_table),
GFP_KERNEL);
@@ -135,12 +125,10 @@ static void lpi_device_get_constraints_amd(void)
for (k = 0; k < info_obj->package.count; ++k) {
union acpi_object *obj = &info_obj->package.elements[k];
- union acpi_object *obj_new;
list = &lpi_constraints_table[lpi_constraints_table_size];
list->min_dstate = -1;
- obj_new = &obj[k];
switch (k) {
case 0:
dev_info.enabled = obj->integer.value;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 65a3886f68c9..5f0472c18bcb 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
- return err;
+ goto err_out_disable_pdev;
}
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 25e08e5f40bd..6eb4c7a904c5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -208,6 +208,16 @@ int device_links_read_lock_held(void)
#endif
#endif /* !CONFIG_SRCU */
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+ while (target->parent) {
+ target = target->parent;
+ if (dev == target)
+ return true;
+ }
+ return false;
+}
+
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
@@ -221,7 +231,12 @@ int device_is_dependent(struct device *dev, void *target)
struct device_link *link;
int ret;
- if (dev == target)
+ /*
+ * The "ancestors" check is needed to catch the case when the target
+ * device has not been completely initialized yet and it is still
+ * missing from the list of children of its parent device.
+ */
+ if (dev == target || device_is_ancestor(dev, target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
@@ -456,7 +471,9 @@ static int devlink_add_symlinks(struct device *dev,
struct device *con = link->consumer;
char *buf;
- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
@@ -470,12 +487,12 @@ static int devlink_add_symlinks(struct device *dev,
if (ret)
goto err_con;
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_con_dev;
- snprintf(buf, len, "supplier:%s", dev_name(sup));
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_sup_dev;
@@ -483,7 +500,7 @@ static int devlink_add_symlinks(struct device *dev,
goto out;
err_sup_dev:
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
@@ -506,7 +523,9 @@ static void devlink_remove_symlinks(struct device *dev,
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf) {
@@ -514,9 +533,9 @@ static void devlink_remove_symlinks(struct device *dev,
return;
}
- snprintf(buf, len, "supplier:%s", dev_name(sup));
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
sysfs_remove_link(&con->kobj, buf);
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
kfree(buf);
}
@@ -737,8 +756,9 @@ struct device_link *device_link_add(struct device *consumer,
link->link_dev.class = &devlink_class;
device_set_pm_not_required(&link->link_dev);
- dev_set_name(&link->link_dev, "%s--%s",
- dev_name(supplier), dev_name(consumer));
+ dev_set_name(&link->link_dev, "%s:%s--%s:%s",
+ dev_bus_name(supplier), dev_name(supplier),
+ dev_bus_name(consumer), dev_name(consumer));
if (device_register(&link->link_dev)) {
put_device(consumer);
put_device(supplier);
@@ -1808,9 +1828,7 @@ const char *dev_driver_string(const struct device *dev)
* never change once they are set, so they don't need special care.
*/
drv = READ_ONCE(dev->driver);
- return drv ? drv->name :
- (dev->bus ? dev->bus->name :
- (dev->class ? dev->class->name : ""));
+ return drv ? drv->name : dev_bus_name(dev);
}
EXPORT_SYMBOL(dev_driver_string);
@@ -4414,6 +4432,12 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*
* Set the device's firmware node pointer to @fwnode, but if a secondary
* firmware node of the device is present, preserve it.
+ *
+ * Valid fwnode cases are:
+ * - primary --> secondary --> -ENODEV
+ * - primary --> NULL
+ * - secondary --> -ENODEV
+ * - NULL
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
@@ -4432,8 +4456,9 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
+ /* Set fn->secondary = NULL, so fn remains the primary fwnode */
if (!(parent && fn == parent->fwnode))
- fn->secondary = ERR_PTR(-ENODEV);
+ fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 2f32f38a11ed..9179825ff646 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -371,13 +371,6 @@ static void driver_bound(struct device *dev)
device_pm_check_callbacks(dev);
/*
- * Reorder successfully probed devices to the end of the device list.
- * This ensures that suspend/resume order matches probe order, which
- * is usually what drivers rely on.
- */
- device_pm_move_to_tail(dev);
-
- /*
* Make sure the device is no longer in one of the deferred lists and
* kick off retrying all pending devices
*/
@@ -619,6 +612,8 @@ dev_groups_failed:
else if (drv->remove)
drv->remove(dev);
probe_failed:
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 95fd1549f87d..8456d8384ac8 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev,
return -ERANGE;
nvec = platform_irq_count(dev);
+ if (nvec < 0)
+ return nvec;
if (nvec < minvec)
return -ENOSPC;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 8dfac7f3ed7a..ff2ee87987c7 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
devname = dev_name(map->dev);
if (name) {
- map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ if (!map->debugfs_name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name);
+ if (!map->debugfs_name)
+ return;
+ }
name = map->debugfs_name;
} else {
name = devname;
@@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name);
-
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
+ if (!map->debugfs_name)
+ return;
name = map->debugfs_name;
dummy_index++;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 262326973ee0..583b671b1d2d 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -445,6 +445,7 @@ config BLK_DEV_RBD
config BLK_DEV_RSXX
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
depends on PCI
+ select CRC32
help
Device driver for IBM's high speed PCIe SSD
storage device: Flash Adapter 900GB Full Height.
diff --git a/drivers/block/rnbd/Kconfig b/drivers/block/rnbd/Kconfig
index 4b6d3d816d1f..2ff05a0d2646 100644
--- a/drivers/block/rnbd/Kconfig
+++ b/drivers/block/rnbd/Kconfig
@@ -7,6 +7,7 @@ config BLK_DEV_RNBD_CLIENT
tristate "RDMA Network Block Device driver client"
depends on INFINIBAND_RTRS_CLIENT
select BLK_DEV_RNBD
+ select SG_POOL
help
RNBD client is a network block device driver using rdma transport.
diff --git a/drivers/block/rnbd/README b/drivers/block/rnbd/README
index 1773c0aa0bd4..080f58a5400a 100644
--- a/drivers/block/rnbd/README
+++ b/drivers/block/rnbd/README
@@ -90,3 +90,4 @@ Kleber Souza <kleber.souza@profitbricks.com>
Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
Milind Dumbare <Milind.dumbare@gmail.com>
Roman Penyaev <roman.penyaev@profitbricks.com>
+Swapnil Ingle <ingleswapnil@gmail.com>
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 96e3f9fe8241..45a470076652 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -375,12 +375,19 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
init_waitqueue_head(&iu->comp.wait);
iu->comp.errno = INT_MAX;
+ if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
+ rnbd_put_permit(sess, permit);
+ kfree(iu);
+ return NULL;
+ }
+
return iu;
}
static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
{
if (atomic_dec_and_test(&iu->refcount)) {
+ sg_free_table(&iu->sgt);
rnbd_put_permit(sess, iu->permit);
kfree(iu);
}
@@ -487,8 +494,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
iu->buf = NULL;
iu->dev = dev;
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
-
msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
msg.device_id = cpu_to_le32(device_id);
@@ -502,7 +507,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -575,7 +579,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
iu->buf = rsp;
iu->dev = dev;
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
@@ -594,7 +597,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -622,8 +624,6 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
iu->buf = rsp;
iu->sess = sess;
-
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
@@ -650,7 +650,6 @@ put_iu:
} else {
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -1698,7 +1697,8 @@ static void rnbd_destroy_sessions(void)
*/
list_for_each_entry_safe(sess, sn, &sess_list, list) {
- WARN_ON(!rnbd_clt_get_sess(sess));
+ if (!rnbd_clt_get_sess(sess))
+ continue;
close_rtrs(sess);
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index b8e44331e494..a6a68d44f517 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -338,10 +338,12 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
{
- mutex_lock(&sess_dev->sess->lock);
- rnbd_srv_destroy_dev_session_sysfs(sess_dev);
- mutex_unlock(&sess_dev->sess->lock);
+ struct rnbd_srv_session *sess = sess_dev->sess;
+
sess_dev->keep_id = true;
+ mutex_lock(&sess->lock);
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&sess->lock);
}
static int process_msg_close(struct rtrs_srv *rtrs,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5265975b3fba..e1c6798889f4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -945,7 +945,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
if (info->feature_discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_granularity = info->discard_granularity ?:
+ info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
@@ -2179,19 +2180,12 @@ static void blkfront_closing(struct blkfront_info *info)
static void blkfront_setup_discard(struct blkfront_info *info)
{
- int err;
- unsigned int discard_granularity;
- unsigned int discard_alignment;
-
info->feature_discard = 1;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-granularity", "%u", &discard_granularity,
- "discard-alignment", "%u", &discard_alignment,
- NULL);
- if (!err) {
- info->discard_granularity = discard_granularity;
- info->discard_alignment = discard_alignment;
- }
+ info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-granularity",
+ 0);
+ info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-alignment", 0);
info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0);
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
index 845b6c43fef8..2344d560b144 100644
--- a/drivers/bus/arm-integrator-lm.c
+++ b/drivers/bus/arm-integrator-lm.c
@@ -54,6 +54,7 @@ static int integrator_lm_populate(int num, struct device *dev)
ret = of_platform_default_populate(child, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate module\n");
+ of_node_put(child);
return ret;
}
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index b8e6acdf932e..8af978bd0000 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -840,6 +840,15 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ /*
+ * We know that the device has an endpoint because we verified by
+ * interrogating the firmware. This is the case when the device was not
+ * yet discovered by the fsl-mc bus, thus the lookup returned NULL.
+ * Differentiate this case by returning EPROBE_DEFER.
+ */
+ if (!endpoint)
+ return ERR_PTR(-EPROBE_DEFER);
+
return endpoint;
}
EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 3061896503f3..47d9ec3abd2f 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -6,8 +6,6 @@ config MXC_CLK
config MXC_CLK_SCU
tristate
- depends on ARCH_MXC
- depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1
def_bool SOC_IMX1
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index eea69d498bd2..7aa7f4a9564f 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -392,7 +392,8 @@ static int mmp2_audio_clk_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int mmp2_audio_clk_suspend(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@@ -404,7 +405,7 @@ static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
+static int mmp2_audio_clk_resume(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@@ -415,6 +416,7 @@ static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index d82d725ac231..b05901b24917 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -891,21 +891,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
},
};
-static struct clk_branch gcc_camera_ahb_clk = {
- .halt_reg = 0xb008,
- .halt_check = BRANCH_HALT,
- .hwcg_reg = 0xb008,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0xb008,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_camera_ahb_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_camera_hf_axi_clk = {
.halt_reg = 0xb020,
.halt_check = BRANCH_HALT,
@@ -2317,7 +2302,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
- [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
[GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
[GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
@@ -2519,11 +2503,12 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
/*
* Keep the clocks always-ON
- * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
- * GCC_GPU_CFG_AHB_CLK
+ * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
+ * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
*/
regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
index 6cb6617b8d88..ab594a0f0c40 100644
--- a/drivers/clk/qcom/gcc-sm8250.c
+++ b/drivers/clk/qcom/gcc-sm8250.c
@@ -722,7 +722,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_4,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -745,7 +745,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
.name = "gcc_sdcc4_apps_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 37244a7e68c2..9cf249c344d9 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1256,6 +1256,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
+ { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index a60aee1a1a29..65df9ef5b5bc 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
return len;
}
-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, char *buf)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- u32 qposinit;
-
- regmap_read(priv->regmap32, QPOSINIT, &qposinit);
-
- return sprintf(buf, "%u\n", qposinit);
-}
-
-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, const char *buf,
- size_t len)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- int err;
- u32 res;
-
- err = kstrtouint(buf, 0, &res);
- if (err < 0)
- return err;
-
- regmap_write(priv->regmap32, QPOSINIT, res);
-
- return len;
-}
-
static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
struct counter_count *count,
void *ext_priv, char *buf)
@@ -302,11 +272,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
.write = ti_eqep_position_ceiling_write,
},
{
- .name = "floor",
- .read = ti_eqep_position_floor_read,
- .write = ti_eqep_position_floor_write,
- },
- {
.name = "enable",
.read = ti_eqep_position_enable_read,
.write = ti_eqep_position_enable_write,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6e23376548ce..be05e038d956 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -76,11 +76,6 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
-static inline int32_t percent_fp(int percent)
-{
- return div_fp(percent, 100);
-}
-
static inline u64 mul_ext_fp(u64 x, u64 y)
{
return (x * y) >> EXT_FRAC_BITS;
@@ -91,11 +86,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
return div64_u64(x << EXT_FRAC_BITS, y);
}
-static inline int32_t percent_ext_fp(int percent)
-{
- return div_ext_fp(percent, 100);
-}
-
/**
* struct sample - Store performance sample
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
@@ -2653,12 +2643,13 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
unsigned long capacity)
{
struct cpudata *cpu = all_cpu_data[cpunum];
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
update_turbo_state();
- cap_pstate = global.turbo_disabled ? cpu->pstate.max_pstate :
- cpu->pstate.turbo_pstate;
+ cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
/* Optimization: Avoid unnecessary divisions. */
@@ -3086,7 +3077,6 @@ static int __init intel_pstate_init(void)
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
- intel_cpufreq.fast_switch = NULL;
intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
if (!default_driver)
default_driver = &intel_pstate;
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0acc9e241cd7..b9ccb6a3dad9 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
/* Take a frequency, and issue the fid/vid transition command */
static int transition_frequency_fidvid(struct powernow_k8_data *data,
- unsigned int index)
+ unsigned int index,
+ struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy;
u32 fid = 0;
u32 vid = 0;
int res;
@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- policy = cpufreq_cpu_get(smp_processor_id());
- cpufreq_cpu_put(policy);
-
cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
cpufreq_freq_transition_end(policy, &freqs, res);
@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
powernow_k8_acpi_pst_values(data, newstate);
- ret = transition_frequency_fidvid(data, newstate);
+ ret = transition_frequency_fidvid(data, newstate, pol);
if (ret) {
pr_err("transition frequency failed\n");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index bbd51703e738..e535f28a8028 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -366,6 +366,7 @@ if CRYPTO_DEV_OMAP
config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
depends on ARCH_OMAP2PLUS
+ select CRYPTO_ENGINE
select CRYPTO_SHA1
select CRYPTO_MD5
select CRYPTO_SHA256
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index fabfaaccca87..fa56b45620c7 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -300,11 +300,11 @@ struct mv_cesa_tdma_desc {
__le32 byte_cnt;
union {
__le32 src;
- dma_addr_t src_dma;
+ u32 src_dma;
};
union {
__le32 dst;
- dma_addr_t dst_dma;
+ u32 dst_dma;
};
__le32 next_dma;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index e63684d4cd90..9ad6397aaa97 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
dmabuf->ops->release(dmabuf);
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
-
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
@@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
kfree(dmabuf);
}
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+
+ return 0;
+}
+
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
@@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
+ .release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 3c4e34301172..364fc2f3e499 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -251,6 +251,9 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
buffer->vaddr = NULL;
}
+ /* free page list */
+ kfree(buffer->pages);
+ /* release memory */
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
kfree(buffer);
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index b971505b8715..08d71dafa001 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
if (desc->chunk) {
/* Create and add new element into the linked list */
- desc->chunks_alloc++;
- list_add_tail(&chunk->list, &desc->chunk->list);
if (!dw_edma_alloc_burst(chunk)) {
kfree(chunk);
return NULL;
}
+ desc->chunks_alloc++;
+ list_add_tail(&chunk->list, &desc->chunk->list);
} else {
/* List head */
chunk->burst = NULL;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 266423a2cabc..4dbb03c545e4 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -434,7 +434,7 @@ int idxd_register_driver(void)
return 0;
drv_fail:
- for (; i > 0; i--)
+ while (--i >= 0)
driver_unregister(&idxd_drvs[i]->drv);
return rc;
}
@@ -1840,7 +1840,7 @@ int idxd_register_bus_type(void)
return 0;
bus_err:
- for (; i > 0; i--)
+ while (--i >= 0)
bus_unregister(idxd_bus_types[i]);
return rc;
}
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index f133ae8dece1..6ad8afbb95f2 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
err_free:
+ mtk_hsdma_hw_deinit(hsdma);
of_dma_controller_free(pdev->dev.of_node);
err_unregister:
dma_async_device_unregister(dd);
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index 584c931e807a..d29d01e730aa 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -350,7 +350,7 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
ret = dma_async_device_register(ddev);
if (ret)
- return ret;
+ goto disable_xdmac;
ret = of_dma_controller_register(dev->of_node,
of_dma_simple_xlate, mdev);
@@ -363,6 +363,8 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
unregister_dmac:
dma_async_device_unregister(ddev);
+disable_xdmac:
+ disable_xdmac(mdev);
return ret;
}
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d5773d474d8f..88579857ca1d 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -630,7 +630,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
GFP_NOWAIT);
if (!async_desc)
- goto err_out;
+ return NULL;
if (flags & DMA_PREP_FENCE)
async_desc->flags |= DESC_FLAG_NWD;
@@ -670,10 +670,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
}
return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
-
-err_out:
- kfree(async_desc);
- return NULL;
}
/**
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index d2334f535de2..1a0bf6b0567a 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1416,7 +1416,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
len = 1 << bit;
ring->alloc_size = (len + (len - 1));
dev_dbg(gpii->gpi_dev->dev,
- "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+ "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
elements, el_size, (elements * el_size), len,
ring->alloc_size);
@@ -1424,7 +1424,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
ring->alloc_size,
&ring->dma_handle, GFP_KERNEL);
if (!ring->pre_aligned) {
- dev_err(gpii->gpi_dev->dev, "could not alloc size:%lu mem for ring\n",
+ dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
ring->alloc_size);
return -ENOMEM;
}
@@ -1444,8 +1444,8 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
smp_wmb();
dev_dbg(gpii->gpi_dev->dev,
- "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
- ring->dma_handle, ring->phys_addr, ring->len,
+ "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
+ &ring->dma_handle, &ring->phys_addr, ring->len,
ring->el_size, ring->elements);
return 0;
@@ -1948,7 +1948,7 @@ static int gpi_ch_init(struct gchan *gchan)
return ret;
error_start_chan:
- for (i = i - 1; i >= 0; i++) {
+ for (i = i - 1; i >= 0; i--) {
gpi_stop_chan(&gpii->gchan[i]);
gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
}
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index e4637ec786d3..36ba8b43e78d 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -199,7 +199,7 @@
#define STM32_MDMA_MAX_CHANNELS 63
#define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128
-#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
+#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
enum stm32_mdma_trigger_mode {
STM32_MDMA_BUFFER,
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 87157cbae1b8..298460438bb4 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4698,9 +4698,9 @@ static int pktdma_setup_resources(struct udma_dev *ud)
ud->tchan_tpl.levels = 1;
}
- ud->tchan_tpl.levels = ud->tchan_tpl.levels;
- ud->tchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
- ud->tchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+ ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+ ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+ ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
sizeof(unsigned long), GFP_KERNEL);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 22faea653ea8..79777550a6ff 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2781,7 +2781,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
has_dre = false;
if (!has_dre)
- xdev->common.copy_align = fls(width - 1);
+ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
@@ -2900,7 +2900,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
struct device_node *node)
{
- int ret, i, nr_channels = 1;
+ int ret, i;
+ u32 nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
@@ -3112,7 +3113,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
/* Register the DMA engine with the core */
- dma_async_device_register(&xdev->common);
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error;
+ }
err = of_dma_controller_register(node, of_dma_xilinx_xlate,
xdev);
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 1d2e5b85d7ca..c027d99f2a59 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -13,6 +13,7 @@ config IMX_DSP
config IMX_SCU
bool "IMX SCU Protocol driver"
depends on IMX_MBOX
+ select SOC_BUS
help
The System Controller Firmware (SCFW) is a low-level system function
which runs on a dedicated Cortex-M core to provide power, clock, and
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index c70f46e80a3b..dea65d85594f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -521,7 +521,8 @@ config GPIO_SAMA5D2_PIOBU
config GPIO_SIFIVE
bool "SiFive GPIO support"
- depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+ depends on OF_GPIO
+ select IRQ_DOMAIN_HIERARCHY
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select REGMAP_MMIO
@@ -597,6 +598,8 @@ config GPIO_TEGRA
default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
help
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 672681a976f5..a912a8fed197 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -676,20 +676,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
else
state->duty_cycle = 1;
+ val = (unsigned long long) u; /* on duration */
regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
- val = (unsigned long long) u * NSEC_PER_SEC;
+ val += (unsigned long long) u; /* period = on + off duration */
+ val *= NSEC_PER_SEC;
do_div(val, mvpwm->clk_rate);
- if (val < state->duty_cycle) {
+ if (val > UINT_MAX)
+ state->period = UINT_MAX;
+ else if (val)
+ state->period = val;
+ else
state->period = 1;
- } else {
- val -= state->duty_cycle;
- if (val > UINT_MAX)
- state->period = UINT_MAX;
- else if (val)
- state->period = val;
- else
- state->period = 1;
- }
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
if (u)
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 12b679ca552c..1a7b51163528 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1979,6 +1979,21 @@ struct gpio_chardev_data {
#endif
};
+static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
+{
+ struct gpio_device *gdev = cdev->gdev;
+ struct gpiochip_info chipinfo;
+
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
+ strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
+ strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
+ chipinfo.lines = gdev->ngpio;
+ if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+ return -EFAULT;
+ return 0;
+}
+
#ifdef CONFIG_GPIO_CDEV_V1
/*
* returns 0 if the versions match, else the previously selected ABI version
@@ -1993,6 +2008,41 @@ static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
return abiv;
}
+
+static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
+ bool watch)
+{
+ struct gpio_desc *desc;
+ struct gpioline_info lineinfo;
+ struct gpio_v2_line_info lineinfo_v2;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+
+ /* this doubles as a range check on line_offset */
+ desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (watch) {
+ if (lineinfo_ensure_abi_version(cdev, 1))
+ return -EPERM;
+
+ if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
+ return -EBUSY;
+ }
+
+ gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+ gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+ if (watch)
+ clear_bit(lineinfo.line_offset, cdev->watched_lines);
+ return -EFAULT;
+ }
+
+ return 0;
+}
#endif
static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
@@ -2030,6 +2080,22 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
return 0;
}
+static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+{
+ __u32 offset;
+
+ if (copy_from_user(&offset, ip, sizeof(offset)))
+ return -EFAULT;
+
+ if (offset >= cdev->gdev->ngpio)
+ return -EINVAL;
+
+ if (!test_and_clear_bit(offset, cdev->watched_lines))
+ return -EBUSY;
+
+ return 0;
+}
+
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
@@ -2037,80 +2103,24 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
- struct gpio_chip *gc = gdev->chip;
void __user *ip = (void __user *)arg;
- __u32 offset;
/* We fail any subsequent ioctl():s when the chip is gone */
- if (!gc)
+ if (!gdev->chip)
return -ENODEV;
/* Fill in the struct and pass to userspace */
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
- struct gpiochip_info chipinfo;
-
- memset(&chipinfo, 0, sizeof(chipinfo));
-
- strscpy(chipinfo.name, dev_name(&gdev->dev),
- sizeof(chipinfo.name));
- strscpy(chipinfo.label, gdev->label,
- sizeof(chipinfo.label));
- chipinfo.lines = gdev->ngpio;
- if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
- return -EFAULT;
- return 0;
+ return chipinfo_get(cdev, ip);
#ifdef CONFIG_GPIO_CDEV_V1
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
- struct gpio_desc *desc;
- struct gpioline_info lineinfo;
- struct gpio_v2_line_info lineinfo_v2;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- /* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
- return -EFAULT;
- return 0;
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
return linehandle_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
- } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
- struct gpio_desc *desc;
- struct gpioline_info lineinfo;
- struct gpio_v2_line_info lineinfo_v2;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- /* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- if (lineinfo_ensure_abi_version(cdev, 1))
- return -EPERM;
-
- if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
- return -EBUSY;
-
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
- clear_bit(lineinfo.line_offset, cdev->watched_lines);
- return -EFAULT;
- }
-
- return 0;
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ return lineinfo_get_v1(cdev, ip,
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL);
#endif /* CONFIG_GPIO_CDEV_V1 */
} else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
@@ -2119,16 +2129,7 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
return linereq_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
- if (copy_from_user(&offset, ip, sizeof(offset)))
- return -EFAULT;
-
- if (offset >= cdev->gdev->ngpio)
- return -EINVAL;
-
- if (!test_and_clear_bit(offset, cdev->watched_lines))
- return -EBUSY;
-
- return 0;
+ return lineinfo_unwatch(cdev, ip);
}
return -EINVAL;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index b02cc2abd3b6..b78a634cca24 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1489,6 +1489,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
type = IRQ_TYPE_NONE;
}
+ if (gc->to_irq)
+ chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+
gc->to_irq = gpiochip_to_irq;
gc->irq.default_type = type;
gc->irq.lock_key = lock_key;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 306077884a67..6107ac91db25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -112,6 +112,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
union igp_info {
struct atom_integrated_system_info_v1_11 v11;
struct atom_integrated_system_info_v1_12 v12;
+ struct atom_integrated_system_info_v2_1 v21;
};
union umc_info {
@@ -209,24 +210,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
if (adev->flags & AMD_IS_APU) {
igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 11:
- mem_channel_number = igp_info->v11.umachannelnumber;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
- mem_type = igp_info->v11.memorytype;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 11:
+ case 12:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
+ mem_type = igp_info->v11.memorytype;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
- case 12:
- mem_channel_number = igp_info->v12.umachannelnumber;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
- mem_type = igp_info->v12.memorytype;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ mem_channel_number = igp_info->v21.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
+ mem_type = igp_info->v21.memorytype;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1cb7d73f7317..cab1ebaf6d62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -81,7 +81,6 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
@@ -2548,11 +2547,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
- amdgpu_amdkfd_device_fini(adev);
-
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ amdgpu_amdkfd_device_fini(adev);
+
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
@@ -3034,7 +3033,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#endif
default:
if (amdgpu_dc > 0)
- DRM_INFO("Display Core has been requested via kernel parameter "
+ DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
"but isn't supported by ASIC, ignoring\n");
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 72efd579ec5e..7169fb5e3d9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1085,6 +1085,8 @@ static const struct pci_device_id pciidlist[] = {
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 523d22db094b..347fec669424 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -563,7 +563,7 @@ static int psp_asd_load(struct psp_context *psp)
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@@ -1315,8 +1315,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->hdcp_context.hdcp_initialized)
- return 0;
+ if (!psp->hdcp_context.hdcp_initialized) {
+ if (psp->hdcp_context.hdcp_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_hdcp_unload(psp);
if (ret)
@@ -1324,6 +1328,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
psp->hdcp_context.hdcp_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
&psp->hdcp_context.hdcp_shared_mc_addr,
@@ -1462,8 +1467,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->dtm_context.dtm_initialized)
- return 0;
+ if (!psp->dtm_context.dtm_initialized) {
+ if (psp->dtm_context.dtm_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_dtm_unload(psp);
if (ret)
@@ -1471,6 +1480,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
psp->dtm_context.dtm_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
&psp->dtm_context.dtm_shared_mc_addr,
@@ -2589,11 +2599,10 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
- psp->asd_fw_version = le32_to_cpu(desc->fw_version);
+ psp->asd_fw_version = le32_to_cpu(desc->fw_version);
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
- psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_start_addr = ucode_start_addr;
- psp->asd_fw = psp->ta_fw;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index c136bd449744..82e952696d24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1518,7 +1518,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
int i = 0;
- int ret = 0;
+ int ret = 0, status;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
@@ -1543,12 +1543,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
- ret = amdgpu_vram_mgr_query_page_status(
+ status = amdgpu_vram_mgr_query_page_status(
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
data->bps[i].retired_page);
- if (ret == -EBUSY)
+ if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (ret == -ENOENT)
+ else if (status == -ENOENT)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 1dd040166c63..19d9aa76cfbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -30,6 +30,7 @@
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
+#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -62,7 +63,8 @@
static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_VEGA20) ||
- (adev->asic_type == CHIP_ARCTURUS))
+ (adev->asic_type == CHIP_ARCTURUS) ||
+ (adev->asic_type == CHIP_SIENNA_CICHLID))
return true;
return false;
@@ -100,6 +102,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
case CHIP_ARCTURUS:
return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+ case CHIP_SIENNA_CICHLID:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
+ break;
+
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index ba1086784525..346963e3cf73 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -99,6 +99,10 @@
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1
#define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441
#define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1
#define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261
@@ -115,6 +119,8 @@
#define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1
#define mmSPI_CONFIG_CNTL_Vangogh 0x2440
#define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1
+#define mmGCR_GENERAL_CNTL_Vangogh 0x1580
+#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0
#define mmCP_HYP_PFP_UCODE_ADDR 0x5814
#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
@@ -160,6 +166,9 @@
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid 0x15db
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
+
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -3237,7 +3246,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3324,6 +3333,7 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -7192,6 +7202,9 @@ static int gfx_v10_0_hw_init(void *handle)
if (adev->asic_type == CHIP_SIENNA_CICHLID)
gfx_v10_3_program_pbb_mode(adev);
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ gfx_v10_3_set_power_brake_sequence(adev);
+
return r;
}
@@ -7377,8 +7390,16 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
- ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
+ break;
+ default:
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+ break;
+ }
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock;
@@ -9169,6 +9190,31 @@ static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
}
}
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
+ (0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) |
+ (0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) |
+ (0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA,
+ (0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) |
+ (0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) |
+ (0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) |
+ (0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid,
+ (0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) |
+ (0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) |
+ (0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
+
+ WREG32_SOC15(GC, 0, mmDIDT_IND_DATA,
+ (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
+}
+
const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_GFX,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index b72c8e4ca36b..1961745e89c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -310,7 +310,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -491,12 +491,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
{
uint32_t def, data, def1, data1;
- def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
+ def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
- data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+ data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -505,8 +504,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
} else {
- data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+ data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -516,7 +514,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
if (def != data)
- WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+ WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
if (def1 != data1)
WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
}
@@ -525,17 +523,44 @@ static void
mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- uint32_t def, data;
-
- def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
- data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
- else
- data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ uint32_t def, data, def1, data1, def2, data2;
+
+ def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+ def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+ def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
+ data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+ data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ } else {
+ data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+ data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ }
if (def != data)
- WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+ WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
+ if (def1 != data1)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
+ if (def2 != data2)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
}
static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
@@ -554,26 +579,39 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
- int data, data1;
+ int data, data1, data2, data3;
if (amdgpu_sriov_vf(adev))
*flags = 0;
- data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
- data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+ data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+ data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
/* AMD_CG_SUPPORT_MC_MGCG */
- if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
- !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
- *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
+ && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ }
/* AMD_CG_SUPPORT_MC_LS */
- if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
+ && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
+ && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
*flags |= AMD_CG_SUPPORT_MC_LS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index d65a5339d354..3ba7bdfde65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -47,7 +47,7 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
- GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8a23636ecc27..0b3516c4eefb 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1239,7 +1239,8 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
- if (adev->pdev->device == 0x1636)
+ if ((adev->pdev->device == 0x1636) ||
+ (adev->pdev->device == 0x164c))
adev->apu_flags |= AMD_APU_IS_RENOIR;
else
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 8cac497c2c45..a5640a6138cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1040,11 +1040,14 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
(struct crat_subtype_iolink *)sub_type_hdr);
if (ret < 0)
return ret;
- crat_table->length += (sub_type_hdr->length * entries);
- crat_table->total_entries += entries;
- sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
- sub_type_hdr->length * entries);
+ if (entries) {
+ crat_table->length += (sub_type_hdr->length * entries);
+ crat_table->total_entries += entries;
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length * entries);
+ }
#else
pr_info("IO link not available for non x86 platforms\n");
#endif
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 797b5d4b43e5..e509a175ed17 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 519080e9a233..c6da89df055d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -939,41 +939,6 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
}
#endif
-#ifdef CONFIG_DEBUG_FS
-static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
-{
- dm->crc_win_x_start_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_X_START", 0, U16_MAX);
- if (!dm->crc_win_x_start_property)
- return -ENOMEM;
-
- dm->crc_win_y_start_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_Y_START", 0, U16_MAX);
- if (!dm->crc_win_y_start_property)
- return -ENOMEM;
-
- dm->crc_win_x_end_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_X_END", 0, U16_MAX);
- if (!dm->crc_win_x_end_property)
- return -ENOMEM;
-
- dm->crc_win_y_end_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_Y_END", 0, U16_MAX);
- if (!dm->crc_win_y_end_property)
- return -ENOMEM;
-
- return 0;
-}
-#endif
-
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1121,10 +1086,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_init_callbacks(adev->dm.dc, &init_params);
}
#endif
-#ifdef CONFIG_DEBUG_FS
- if (create_crtc_crc_properties(&adev->dm))
- DRM_ERROR("amdgpu: failed to create crc property.\n");
-#endif
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -2386,8 +2347,7 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
- aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
- drm_connector_list_update(connector);
+ drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
@@ -5334,64 +5294,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->crc_src = cur->crc_src;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
-#ifdef CONFIG_DEBUG_FS
- state->crc_window = cur->crc_window;
-#endif
+
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
}
-#ifdef CONFIG_DEBUG_FS
-static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct dm_crtc_state *dm_new_state =
- to_dm_crtc_state(crtc_state);
-
- if (property == adev->dm.crc_win_x_start_property)
- dm_new_state->crc_window.x_start = val;
- else if (property == adev->dm.crc_win_y_start_property)
- dm_new_state->crc_window.y_start = val;
- else if (property == adev->dm.crc_win_x_end_property)
- dm_new_state->crc_window.x_end = val;
- else if (property == adev->dm.crc_win_y_end_property)
- dm_new_state->crc_window.y_end = val;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
- const struct drm_crtc_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct dm_crtc_state *dm_state =
- to_dm_crtc_state(state);
-
- if (property == adev->dm.crc_win_x_start_property)
- *val = dm_state->crc_window.x_start;
- else if (property == adev->dm.crc_win_y_start_property)
- *val = dm_state->crc_window.y_start;
- else if (property == adev->dm.crc_win_x_end_property)
- *val = dm_state->crc_window.x_end;
- else if (property == adev->dm.crc_win_y_end_property)
- *val = dm_state->crc_window.y_end;
- else
- return -EINVAL;
-
- return 0;
-}
-#endif
-
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
@@ -5458,10 +5366,6 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-#ifdef CONFIG_DEBUG_FS
- .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
- .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
-#endif
};
static enum drm_connector_status
@@ -6663,25 +6567,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
- struct amdgpu_crtc *acrtc)
-{
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_x_start_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_y_start_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_x_end_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_y_end_property,
- 0);
-}
-#endif
-
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t crtc_index)
@@ -6729,9 +6614,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
-#ifdef CONFIG_DEBUG_FS
- attach_crtc_crc_properties(dm, acrtc);
-#endif
+
return 0;
fail:
@@ -8368,7 +8251,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- bool configure_crc = false;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -8378,30 +8260,21 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
- }
+
#ifdef CONFIG_DEBUG_FS
- if (new_crtc_state->active &&
- amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
/**
* Frontend may have changed so reapply the CRC capture
* settings for the stream.
*/
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
- if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
- if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
- configure_crc = true;
- } else {
- if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
- configure_crc = true;
- }
- if (configure_crc)
+ if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
amdgpu_dm_crtc_configure_crc_source(
- crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
- }
+ crtc, dm_new_crtc_state,
+ dm_new_crtc_state->crc_src);
+ }
#endif
+ }
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 2ee6edb3df93..1182dafcef02 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -336,32 +336,6 @@ struct amdgpu_display_manager {
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#ifdef CONFIG_DEBUG_FS
- /**
- * @crc_win_x_start_property:
- *
- * X start of the crc calculation window
- */
- struct drm_property *crc_win_x_start_property;
- /**
- * @crc_win_y_start_property:
- *
- * Y start of the crc calculation window
- */
- struct drm_property *crc_win_y_start_property;
- /**
- * @crc_win_x_end_property:
- *
- * X end of the crc calculation window
- */
- struct drm_property *crc_win_x_end_property;
- /**
- * @crc_win_y_end_property:
- *
- * Y end of the crc calculation window
- */
- struct drm_property *crc_win_y_end_property;
-#endif
/**
* @mst_encoders:
*
@@ -448,15 +422,6 @@ struct dm_plane_state {
struct dc_plane_state *dc_state;
};
-#ifdef CONFIG_DEBUG_FS
-struct crc_rec {
- uint16_t x_start;
- uint16_t y_start;
- uint16_t x_end;
- uint16_t y_end;
- };
-#endif
-
struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
@@ -479,9 +444,6 @@ struct dm_crtc_state {
struct dc_info_packet vrr_infopacket;
int abm_level;
-#ifdef CONFIG_DEBUG_FS
- struct crc_rec crc_window;
-#endif
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 7b886a779a8c..66cb8730586b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -81,41 +81,6 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
return pipe_crc_sources;
}
-static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state)
-{
- dm_crtc_state->crc_window.x_start = 0;
- dm_crtc_state->crc_window.y_start = 0;
- dm_crtc_state->crc_window.x_end = 0;
- dm_crtc_state->crc_window.y_end = 0;
-}
-
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
-{
- bool ret = true;
-
- if ((dm_crtc_state->crc_window.x_start != 0) ||
- (dm_crtc_state->crc_window.y_start != 0) ||
- (dm_crtc_state->crc_window.x_end != 0) ||
- (dm_crtc_state->crc_window.y_end != 0))
- ret = false;
-
- return ret;
-}
-
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
- struct dm_crtc_state *dm_old_crtc_state)
-{
- bool ret = false;
-
- if ((dm_new_crtc_state->crc_window.x_start != dm_old_crtc_state->crc_window.x_start) ||
- (dm_new_crtc_state->crc_window.y_start != dm_old_crtc_state->crc_window.y_start) ||
- (dm_new_crtc_state->crc_window.x_end != dm_old_crtc_state->crc_window.x_end) ||
- (dm_new_crtc_state->crc_window.y_end != dm_old_crtc_state->crc_window.y_end))
- ret = true;
-
- return ret;
-}
-
int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt)
@@ -140,7 +105,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
int ret = 0;
- struct crc_params *crc_window = NULL, tmp_window;
/* Configuration will be deferred to stream enable. */
if (!stream_state)
@@ -150,24 +114,8 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
/* Enable CRTC CRC generation if necessary. */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
- if (!enable)
- amdgpu_dm_set_crc_window_default(dm_crtc_state);
-
- if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
- crc_window = &tmp_window;
-
- tmp_window.windowa_x_start = dm_crtc_state->crc_window.x_start;
- tmp_window.windowa_y_start = dm_crtc_state->crc_window.y_start;
- tmp_window.windowa_x_end = dm_crtc_state->crc_window.x_end;
- tmp_window.windowa_y_end = dm_crtc_state->crc_window.y_end;
- tmp_window.windowb_x_start = dm_crtc_state->crc_window.x_start;
- tmp_window.windowb_y_start = dm_crtc_state->crc_window.y_start;
- tmp_window.windowb_x_end = dm_crtc_state->crc_window.x_end;
- tmp_window.windowb_y_end = dm_crtc_state->crc_window.y_end;
- }
-
if (!dc_stream_configure_crc(stream_state->ctx->dc,
- stream_state, crc_window, enable, enable)) {
+ stream_state, NULL, enable, enable)) {
ret = -EINVAL;
goto unlock;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 0235bfb246e5..f7d731797d3f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -47,9 +47,6 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
/* amdgpu_dm_crc.c */
#ifdef CONFIG_DEBUG_FS
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
- struct dm_crtc_state *dm_old_crtc_state);
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 64f515d74410..f3c00f479e1c 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
calcs_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-calcs_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d59b380e7b7f..ff96bee57bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -104,13 +104,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
@@ -125,13 +118,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
@@ -146,13 +132,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 5b466f440d67..ab98c259ef69 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -251,6 +251,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
bool update_uclk = false;
+ bool p_state_change_support;
if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
return;
@@ -291,8 +292,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+ p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9e1071b2181f..f4a2088ab179 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2487,9 +2487,14 @@ enum dc_status dc_link_validate_mode_timing(
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
- struct dc *dc = link->ctx->dc;
+ struct dc *dc = NULL;
struct abm *abm = NULL;
+ if (!link || !link->ctx)
+ return NULL;
+
+ dc = link->ctx->dc;
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx.stream;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 2fc12239b22c..f95bade59624 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2399,6 +2399,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
initial_link_setting;
uint32_t link_bw;
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
@@ -3045,14 +3048,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link)
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_disable_stream(pipe_ctx);
}
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link)
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
@@ -3992,7 +3995,7 @@ bool dc_link_dp_set_test_pattern(
unsigned int cust_pattern_size)
{
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
- struct pipe_ctx *pipe_ctx = &pipes[0];
+ struct pipe_ctx *pipe_ctx = NULL;
unsigned int lane;
unsigned int i;
unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
@@ -4002,12 +4005,18 @@ bool dc_link_dp_set_test_pattern(
memset(&training_pattern, 0, sizeof(training_pattern));
for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream == NULL)
+ continue;
+
if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
pipe_ctx = &pipes[i];
break;
}
}
+ if (pipe_ctx == NULL)
+ return false;
+
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 733e6e6e43bd..62ad1a11bff9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -31,11 +31,4 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
-# fix:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# aarch64 does not support soft-float, so use hard-float and handle this in code
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only
-endif
-
AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index cfc130e2d6fd..017b67b830e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -647,8 +647,13 @@ static void power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->funcs.dpp_pg_control(hws, plane_id, true);
- hws->funcs.hubp_pg_control(hws, plane_id, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, plane_id, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, plane_id, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
@@ -1082,8 +1087,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->funcs.dpp_pg_control(hws, dpp->inst, false);
- hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 100ce0e28fd5..b096011acb49 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -470,7 +470,7 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- uint32_t val = 0;
+ uint32_t val = 0xf;
if (opp_id < MAX_OPP && REG(MUX[opp_id]))
REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index bdc37831535e..90e912fef2b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -608,8 +608,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
- .force_single_disp_pipe_split = true,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
.disable_stereo_support = true,
@@ -1534,15 +1534,8 @@ static bool dcn10_resource_construct(
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
-#if defined(CONFIG_ARM64)
- /* Aarch64 does not support -msoft-float/-mfloat-abi=soft */
- DC_FP_START();
- dcn10_resource_construct_fp(dc);
- DC_FP_END();
-#else
/* Other architectures we build for build this with soft-float */
dcn10_resource_construct_fp(dc);
-#endif
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 624cb1341ef1..5fcaf78334ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -17,10 +17,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index cb822df21b7c..480d928cb1ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1062,8 +1062,13 @@ static void dcn20_power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
- dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e04ecf0fc0db..5ed18cac57e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2517,8 +2517,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
* if this primary pipe has a bottom pipe in prev. state
* and if the bottom pipe is still available (which it should be),
* pick that pipe as secondary
- * Same logic applies for ODM pipes. Since mpo is not allowed with odm
- * check in else case.
+ * Same logic applies for ODM pipes
*/
if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
@@ -2526,7 +2525,9 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
}
- } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+ }
+ if (secondary_pipe == NULL &&
+ dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 1ee5fc03b7b3..bb8c95141082 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -13,10 +13,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 1c88d2edd381..b000b43a820d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.num_banks = 8,
.num_chans = 4,
.vmm_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 23.84,
+ .dram_clock_change_latency_us = 11.72,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index 248c2711aace..c20331eb62e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -41,11 +41,6 @@ CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 2fd5d34e4ba6..3ca7d911d25c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -21,10 +21,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 4825c5c1c6ed..35f5bf08ae96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1731,6 +1731,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
index 36e44e1b07fa..8d4924b7dc22 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
@@ -20,10 +20,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index a02a33dcd70b..6bb7f2905821 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
dml_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dml_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 860e72a51534..80170f9721ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2635,14 +2635,15 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
- mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
- mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.DRAMClockChangeWatermark >
- dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
- mode_lib->vba.MinTTUVBlank[k] += 25;
+ if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ if (mode_lib->vba.DRAMClockChangeWatermark >
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ mode_lib->vba.MinTTUVBlank[k] += 25;
+ }
}
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index f2624a1156e5..8d31eb75c6a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -10,10 +10,6 @@ ifdef CONFIG_PPC64
dsc_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dsc_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 95cb56929e79..126c2f3a4dd3 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -55,10 +55,6 @@
#include <asm/fpu/api.h>
#define DC_FP_START() kernel_fpu_begin()
#define DC_FP_END() kernel_fpu_end()
-#elif defined(CONFIG_ARM64)
-#include <asm/neon.h>
-#define DC_FP_START() kernel_neon_begin()
-#define DC_FP_END() kernel_neon_end()
#elif defined(CONFIG_PPC64)
#include <asm/switch_to.h>
#include <asm/cputable.h>
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index e57e64bbacdc..88322781e447 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -251,7 +251,7 @@ static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cl
smu10_data->gfx_actual_soft_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- smu10_data->gfx_actual_soft_min_freq,
+ clock,
NULL);
}
return 0;
@@ -558,7 +558,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
-
+ /* disabled fine grain tuning function by default */
+ data->fine_grain_enabled = 0;
return result;
}
@@ -597,6 +598,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
+ uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
if (hwmgr->smu_version < 0x1E3700) {
pr_info("smu firmware version too old, can not set dpm level\n");
@@ -613,6 +615,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_max_freq_limit/100,
@@ -648,6 +658,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -658,6 +676,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
min_mclk,
@@ -668,6 +694,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
SMU10_UMD_PSTATE_GFXCLK,
@@ -703,6 +737,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -741,6 +783,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_min_freq_limit/100,
@@ -759,6 +809,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
+ data->fine_grain_enabled = 1;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
@@ -948,6 +999,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
+ uint32_t min_freq, max_freq = 0;
+ uint32_t ret = 0;
switch (type) {
case PP_SCLK:
@@ -983,18 +1036,28 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
- (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
- size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+ (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- uint32_t min_freq, max_freq = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
size = sprintf(buf, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
@@ -1414,23 +1477,96 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
+ uint32_t min_freq, max_freq = 0;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
if (!hwmgr->od_enabled) {
pr_err("Fine grain not support\n");
return -EINVAL;
}
- if (size != 2) {
- pr_err("Input parameter number not correct\n");
+ if (!smu10_data->fine_grain_enabled) {
+ pr_err("Fine grain not started\n");
return -EINVAL;
}
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
- if (input[0] == 0)
- smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
- else if (input[0] == 1)
- smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
- else
+ if (size != 2) {
+ pr_err("Input parameter number not correct\n");
return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (input[1] < min_freq) {
+ pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], min_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_min_freq = input[1];
+ } else if (input[0] == 1) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (input[1] > max_freq) {
+ pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ } else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+ smu10_data->gfx_actual_soft_min_freq = min_freq;
+ smu10_data->gfx_actual_soft_max_freq = max_freq;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
+ pr_err("The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ smu10_data->gfx_actual_soft_min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ smu10_data->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
index 6c9b5f060902..808e0ecbe1f0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
@@ -283,6 +283,7 @@ struct smu10_hwmgr {
uint32_t vclk_soft_min;
uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
+ uint32_t gfx_actual_soft_max_freq;
uint32_t gfx_min_freq_limit;
uint32_t gfx_max_freq_limit; /* in 10Khz*/
@@ -299,6 +300,8 @@ struct smu10_hwmgr {
bool need_min_deep_sleep_dcefclk;
uint32_t deep_sleep_dcefclk;
uint32_t num_active_display;
+
+ bool fine_grain_enabled;
};
struct pp_hwmgr;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 9608745d732f..12b36eb0ff6a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2372,7 +2372,7 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t *req, bool write,
{
int i;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = 1;
req->I2CSpeed = 2;
req->SlaveAddress = address;
req->NumCmds = numbytes;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 8cb4fcee9a2c..5c1482d4ca43 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -252,7 +252,8 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = metrics->CurrentSocketPower;
+ *value = (metrics->CurrentSocketPower << 8) /
+ 1000 ;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->GfxTemperature / 100 *
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index dc75db8af371..9a9697038016 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -188,6 +188,7 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
return -EINVAL;
*freq = clk_table->SocClocks[dpm_level].Freq;
break;
+ case SMU_UCLK:
case SMU_MCLK:
if (dpm_level >= NUM_FCLK_DPM_LEVELS)
return -EINVAL;
@@ -1120,7 +1121,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
{
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL);
+ return 0;
}
static const struct pptable_funcs renoir_ppt_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 522d55004655..06abf2a7ce9e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -225,6 +225,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
break;
case SMU_FCLK:
case SMU_MCLK:
+ case SMU_UCLK:
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ba1507036f26..4a8cbec832bc 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3021,7 +3021,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
ret = handle_conflicting_encoders(state, true);
if (ret)
- return ret;
+ goto fail;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 02ca22e90290..0b232a73c1b7 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -387,9 +387,16 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
if (gbo->vmap_use_count > 0)
goto out;
- ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
- if (ret)
- return ret;
+ /*
+ * VRAM helpers unmap the BO only on demand. So the previous
+ * page mapping might still be around. Only vmap if the there's
+ * no mapping present.
+ */
+ if (dma_buf_map_is_null(&gbo->map)) {
+ ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
+ if (ret)
+ return ret;
+ }
out:
++gbo->vmap_use_count;
@@ -577,6 +584,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
return;
ttm_bo_vunmap(bo, &gbo->map);
+ dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
}
static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index e6231947f987..a0cb746bcb0a 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -1163,7 +1163,14 @@ retry:
if (ret)
goto out;
- if (old_fb->format != fb->format) {
+ /*
+ * Only check the FOURCC format code, excluding modifiers. This is
+ * enough for all legacy drivers. Atomic drivers have their own
+ * checks in their ->atomic_check implementation, which will
+ * return -EINVAL if any hw or driver constraint is violated due
+ * to modifier changes.
+ */
+ if (old_fb->format->format != fb->format->format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL;
goto out;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 6e74e6745eca..349146049849 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -388,19 +388,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
return -ENOENT;
*fence = drm_syncobj_fence_get(syncobj);
- drm_syncobj_put(syncobj);
if (*fence) {
ret = dma_fence_chain_find_seqno(fence, point);
if (!ret)
- return 0;
+ goto out;
dma_fence_put(*fence);
} else {
ret = -EINVAL;
}
if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
- return ret;
+ goto out;
memset(&wait, 0, sizeof(wait));
wait.task = current;
@@ -432,6 +431,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
if (wait.node.next)
drm_syncobj_remove_wait(syncobj, &wait);
+out:
+ drm_syncobj_put(syncobj);
+
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e5574e506a5c..6d9e81ea67f4 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -38,6 +38,7 @@ i915-y += i915_drv.o \
i915_config.o \
i915_irq.o \
i915_getparam.o \
+ i915_mitigations.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index a9439b415603..b3533a32f8ba 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1616,10 +1616,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
get_dsi_io_power_domains(i915,
enc_to_intel_dsi(encoder));
-
- if (crtc_state->dsc.compression_enable)
- intel_display_power_get(i915,
- intel_dsc_power_domain(crtc_state));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 92940a0c5ef8..d5ace48b1ace 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3725,7 +3725,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ce82d654d0f2..34d78c654df3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1436,6 +1436,9 @@ struct intel_dp {
bool ycbcr_444_to_420;
} dfp;
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
/* Display stream compression testing */
bool force_dsc_en;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2165398d2c7c..09123e8625c4 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1489,7 +1489,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
- cpu_latency_qos_update_request(&i915->pm_qos, 0);
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
intel_dp_check_edp(intel_dp);
@@ -1622,7 +1622,7 @@ done:
ret = recv_bytes;
out:
- cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
@@ -1898,6 +1898,9 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
+ if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+ cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
kfree(intel_dp->aux.name);
}
@@ -1950,6 +1953,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
+ cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
}
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
@@ -4010,7 +4014,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 tmp;
@@ -4029,8 +4034,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
enableddisabled(intel_dp->has_hdmi_sink));
- tmp = intel_dp->dfp.ycbcr_444_to_420 ?
- DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+ tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+ intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
@@ -4084,7 +4089,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
}
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp);
+ intel_dp_configure_protocol_converter(intel_dp, pipe_config);
intel_dp_start_link_train(intel_dp, pipe_config);
intel_dp_stop_link_train(intel_dp, pipe_config);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index b871a09b6901..05f7ddf7a795 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index b2a4bbcfdcd2..b9d8825e2bb1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -2210,6 +2210,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
if (content_protection_type_changed) {
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ drm_connector_get(&connector->base);
schedule_work(&hdcp->prop_work);
mutex_unlock(&hdcp->mutex);
}
@@ -2221,6 +2222,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
desired_and_not_enabled =
hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
mutex_unlock(&hdcp->mutex);
+ /*
+ * If HDCP already ENABLED and CP property is DESIRED, schedule
+ * prop_work to update correct CP property to user space.
+ */
+ if (!desired_and_not_enabled && !content_protection_type_changed) {
+ drm_connector_get(&connector->base);
+ schedule_work(&hdcp->prop_work);
+ }
}
if (desired_and_not_enabled || content_protection_type_changed)
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 9f23bac0d792..d64fce1a17cb 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -1650,16 +1650,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
val = pch_get_backlight(connector);
else
val = lpt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
if (cpu_mode) {
drm_dbg_kms(&dev_priv->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
- lpt_set_backlight(connector->base.state, panel->backlight.level);
+ lpt_set_backlight(connector->base.state, val);
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
@@ -1667,6 +1664,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
+ val = intel_panel_compute_brightness(connector, val);
+ panel->backlight.level = clamp(val, panel->backlight.min,
+ panel->backlight.max);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index d52f9c177908..f94025ec603a 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_prepare(encoder, pipe_config);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
- /* Deassert reset */
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ /*
+ * Give the panel time to power-on and then deassert its reset.
+ * Depending on the VBT MIPI sequences version the deassert-seq
+ * may contain the necessary delay, intel_dsi_msleep() will skip
+ * the delay in that case. If there is no deassert-seq, then an
+ * unconditional msleep is used to give the panel time to power-on.
+ */
+ if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ } else {
+ msleep(intel_dsi->panel_on_delay);
+ }
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index bcc80f428172..bd3046e5a934 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1046,7 +1046,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1296,6 +1296,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_pool;
}
+ memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
+
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index d93d85cd3027..94465374ca2f 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -7,8 +7,6 @@
#include "i915_drv.h"
#include "intel_gpu_commands.h"
-#define MAX_URB_ENTRIES 64
-#define STATE_SIZE (4 * 1024)
#define GT3_INLINE_DATA_DELAYS 0x1E00
#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
@@ -34,38 +32,59 @@ struct batch_chunk {
};
struct batch_vals {
- u32 max_primitives;
- u32 max_urb_entries;
- u32 cmd_size;
- u32 state_size;
+ u32 max_threads;
u32 state_start;
- u32 batch_size;
+ u32 surface_start;
u32 surface_height;
u32 surface_width;
- u32 scratch_size;
- u32 max_size;
+ u32 size;
};
+static inline int num_primitives(const struct batch_vals *bv)
+{
+ /*
+ * We need to saturate the GPU with work in order to dispatch
+ * a shader on every HW thread, and clear the thread-local registers.
+ * In short, we have to dispatch work faster than the shaders can
+ * run in order to fill the EU and occupy each HW thread.
+ */
+ return bv->max_threads;
+}
+
static void
batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
{
if (IS_HASWELL(i915)) {
- bv->max_primitives = 280;
- bv->max_urb_entries = MAX_URB_ENTRIES;
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ case 1:
+ bv->max_threads = 70;
+ break;
+ case 2:
+ bv->max_threads = 140;
+ break;
+ case 3:
+ bv->max_threads = 280;
+ break;
+ }
bv->surface_height = 16 * 16;
bv->surface_width = 32 * 2 * 16;
} else {
- bv->max_primitives = 128;
- bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ case 1: /* including vlv */
+ bv->max_threads = 36;
+ break;
+ case 2:
+ bv->max_threads = 128;
+ break;
+ }
bv->surface_height = 16 * 8;
bv->surface_width = 32 * 16;
}
- bv->cmd_size = bv->max_primitives * 4096;
- bv->state_size = STATE_SIZE;
- bv->state_start = bv->cmd_size;
- bv->batch_size = bv->cmd_size + bv->state_size;
- bv->scratch_size = bv->surface_height * bv->surface_width;
- bv->max_size = bv->batch_size + bv->scratch_size;
+ bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
+ bv->surface_start = bv->state_start + SZ_4K;
+ bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
}
static void batch_init(struct batch_chunk *bc,
@@ -155,7 +174,8 @@ static u32
gen7_fill_binding_table(struct batch_chunk *state,
const struct batch_vals *bv)
{
- u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+ u32 surface_start =
+ gen7_fill_surface_state(state, bv->surface_start, bv);
u32 *cs = batch_alloc_items(state, 32, 8);
u32 offset = batch_offset(state, cs);
@@ -214,9 +234,9 @@ static void
gen7_emit_state_base_address(struct batch_chunk *batch,
u32 surface_state_base)
{
- u32 *cs = batch_alloc_items(batch, 0, 12);
+ u32 *cs = batch_alloc_items(batch, 0, 10);
- *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+ *cs++ = STATE_BASE_ADDRESS | (10 - 2);
/* general */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* surface */
@@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
*cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0;
*cs++ = BASE_ADDRESS_MODIFY;
- *cs++ = 0;
- *cs++ = 0;
batch_advance(batch, cs);
}
@@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
u32 urb_size, u32 curbe_size,
u32 mode)
{
- u32 urb_entries = bv->max_urb_entries;
- u32 threads = bv->max_primitives - 1;
+ u32 threads = bv->max_threads - 1;
u32 *cs = batch_alloc_items(batch, 32, 8);
*cs++ = MEDIA_VFE_STATE | (8 - 2);
@@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
*cs++ = 0;
/* number of threads & urb entries for GPGPU vs Media Mode */
- *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+ *cs++ = threads << 16 | 1 << 8 | mode << 2;
*cs++ = 0;
@@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch,
{
unsigned int x_offset = (media_object_index % 16) * 64;
unsigned int y_offset = (media_object_index / 16) * 16;
- unsigned int inline_data_size;
- unsigned int media_batch_size;
- unsigned int i;
+ unsigned int pkt = 6 + 3;
u32 *cs;
- inline_data_size = 112 * 8;
- media_batch_size = inline_data_size + 6;
-
- cs = batch_alloc_items(batch, 8, media_batch_size);
+ cs = batch_alloc_items(batch, 8, pkt);
- *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+ *cs++ = MEDIA_OBJECT | (pkt - 2);
/* interface descriptor offset */
*cs++ = 0;
@@ -317,25 +329,44 @@ gen7_emit_media_object(struct batch_chunk *batch,
*cs++ = 0;
/* inline */
- *cs++ = (y_offset << 16) | (x_offset);
+ *cs++ = y_offset << 16 | x_offset;
*cs++ = 0;
*cs++ = GT3_INLINE_DATA_DELAYS;
- for (i = 3; i < inline_data_size; i++)
- *cs++ = 0;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
{
- u32 *cs = batch_alloc_items(batch, 0, 5);
+ u32 *cs = batch_alloc_items(batch, 0, 4);
- *cs++ = GFX_OP_PIPE_CONTROL(5);
- *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
- PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 8);
+
+ /* ivb: Stall before STATE_CACHE_INVALIDATE */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
*cs++ = 0;
+ *cs++ = 0;
+
batch_advance(batch, cs);
}
@@ -344,34 +375,34 @@ static void emit_batch(struct i915_vma * const vma,
const struct batch_vals *bv)
{
struct drm_i915_private *i915 = vma->vm->i915;
- unsigned int desc_count = 64;
- const u32 urb_size = 112;
+ const unsigned int desc_count = 1;
+ const unsigned int urb_size = 1;
struct batch_chunk cmds, state;
- u32 interface_descriptor;
+ u32 descriptors;
unsigned int i;
- batch_init(&cmds, vma, start, 0, bv->cmd_size);
- batch_init(&state, vma, start, bv->state_start, bv->state_size);
+ batch_init(&cmds, vma, start, 0, bv->state_start);
+ batch_init(&state, vma, start, bv->state_start, SZ_4K);
- interface_descriptor =
- gen7_fill_interface_descriptor(&state, bv,
- IS_HASWELL(i915) ?
- &cb_kernel_hsw :
- &cb_kernel_ivb,
- desc_count);
- gen7_emit_pipeline_flush(&cmds);
+ descriptors = gen7_fill_interface_descriptor(&state, bv,
+ IS_HASWELL(i915) ?
+ &cb_kernel_hsw :
+ &cb_kernel_ivb,
+ desc_count);
+
+ gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
- gen7_emit_state_base_address(&cmds, interface_descriptor);
+ gen7_emit_pipeline_invalidate(&cmds);
+
gen7_emit_pipeline_flush(&cmds);
+ gen7_emit_state_base_address(&cmds, descriptors);
+ gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+ gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
- gen7_emit_interface_descriptor_load(&cmds,
- interface_descriptor,
- desc_count);
-
- for (i = 0; i < bv->max_primitives; i++)
+ for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);
batch_add(&cmds, MI_BATCH_BUFFER_END);
@@ -385,15 +416,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
batch_get_defaults(engine->i915, &bv);
if (!vma)
- return bv.max_size;
+ return bv.size;
- GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+ GEM_BUG_ON(vma->obj->base.size < bv.size);
batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(batch))
return PTR_ERR(batch);
- emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+ emit_batch(vma, memset(batch, 0, bv.size), &bv);
i915_gem_object_flush_map(vma->obj);
__i915_gem_object_release_map(vma->obj);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index a24cc1ff08a0..0625cbb3b431 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
return true;
}
-static inline bool __request_completed(const struct i915_request *rq)
-{
- return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
-}
-
__maybe_unused static bool
check_signal_order(struct intel_context *ce, struct i915_request *rq)
{
@@ -257,7 +252,7 @@ static void signal_irq_work(struct irq_work *work)
list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
bool release;
- if (!__request_completed(rq))
+ if (!__i915_request_is_complete(rq))
break;
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
@@ -379,7 +374,7 @@ static void insert_breadcrumb(struct i915_request *rq)
* straight onto a signaled list, and queue the irq worker for
* its signal completion.
*/
- if (__request_completed(rq)) {
+ if (__i915_request_is_complete(rq)) {
if (__signal_request(rq) &&
llist_add(&rq->signal_node, &b->signaled_requests))
irq_work_queue(&b->irq_work);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7614a3d24fca..26c7d0a50585 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -3988,6 +3988,9 @@ err:
static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+
+ /* Called on error unwind, clear all flags to prevent further use */
+ memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
}
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index a41b43f445b8..ecf3a6118a6d 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -32,6 +32,7 @@
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
+#include "i915_mitigations.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_gt.h"
@@ -886,7 +887,8 @@ static int switch_context(struct i915_request *rq)
GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
- if (engine->wa_ctx.vma->private != ce) {
+ if (engine->wa_ctx.vma->private != ce &&
+ i915_mitigate_clear_residuals()) {
ret = clear_residuals(rq);
if (ret)
return ret;
@@ -1290,7 +1292,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
- if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+ if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
err = gen7_ctx_switch_bb_init(engine);
if (err)
goto err_ring_unpin;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 7ea94d201fe6..8015964043eb 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
struct intel_timeline_cacheline *cl =
container_of(rcu, typeof(*cl), rcu);
+ /* Must wait until after all *rq->hwsp are complete before removing */
+ i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+ __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
i915_active_fini(&cl->active);
kfree(cl);
}
@@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
-
- i915_gem_object_unpin_map(cl->hwsp->vma->obj);
- i915_vma_put(cl->hwsp->vma);
- __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
-
call_rcu(&cl->rcu, __rcu_cacheline_free);
}
@@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
return ERR_CAST(vaddr);
}
- i915_vma_get(hwsp->vma);
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 180c23e2e25e..602f1a0bc587 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \
fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \
fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index a15f87539657..62a5b0dd2003 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -217,6 +217,15 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
DDI_BUF_CTL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
}
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
+ /* No hpd_invert set in vgpu vbt, need to clear invert mask */
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
@@ -273,6 +282,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTA_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
@@ -301,6 +312,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTB_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
@@ -329,6 +342,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTC_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
}
@@ -661,44 +676,62 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
PORTD_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
} else if (IS_BROXTON(i915)) {
- if (connected) {
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+ } else {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
+ ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
- vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
- SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTA_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTA_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
+ }
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
- }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
- SFUSE_STRAP_DDIC_DETECTED;
- vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
- GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
- }
- } else {
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ SFUSE_STRAP_DDIB_DETECTED;
+ } else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
- ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
- }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIB_DETECTED;
- vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
- ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
- vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
- ~SFUSE_STRAP_DDIC_DETECTED;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTB_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTB_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
+ }
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ if (connected) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+ SFUSE_STRAP_DDIC_DETECTED;
+ } else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+ ~SFUSE_STRAP_DDIC_DETECTED;
}
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTC_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTC_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
}
- vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
- PORTB_HOTPLUG_STATUS_MASK;
- intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index e49944fde333..cbe5931906e0 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
- /* FixMe: Re-enable APL/BXT once vfio_edid enabled */
- else if (!IS_BROXTON(dev_priv))
+ else
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 93265951fdbb..b0899b665e85 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1166,7 +1166,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
if (IS_ERR(src)) {
- unsigned long x, n;
+ unsigned long x, n, remain;
void *ptr;
/*
@@ -1177,14 +1177,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
+ remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
- length = round_up(length,
+ remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
- for (n = offset >> PAGE_SHIFT; length; n++) {
- int len = min(length, PAGE_SIZE - x);
+ for (n = offset >> PAGE_SHIFT; remain; n++) {
+ int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
@@ -1193,13 +1194,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
kunmap_atomic(src);
ptr += len;
- length -= len;
+ remain -= len;
x = 0;
}
}
i915_gem_object_unpin_pages(src_obj);
+ memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
+
/* dst_obj is returned with vmap pinned */
return dst;
}
@@ -1392,11 +1395,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
#define LENGTH_BIAS 2
-static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
-}
-
/**
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
@@ -1538,16 +1536,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
ret = 0; /* allow execution */
}
}
-
- if (shadow_needs_clflush(shadow->obj))
- drm_clflush_virt_range(batch_end, 8);
}
- if (shadow_needs_clflush(shadow->obj)) {
- void *ptr = page_mask_bits(shadow->obj->mm.mapping);
-
- drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
- }
+ i915_gem_object_flush_map(shadow->obj);
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 320856b665a1..99eb0d7bbc44 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -578,8 +578,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
- cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
-
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -626,7 +624,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
@@ -648,8 +645,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
-
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
}
/**
@@ -1052,6 +1047,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+
i915_gem_suspend(i915);
drm_kms_helper_poll_disable(&i915->drm);
@@ -1065,6 +1062,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a3ee4f9dc0a..632c713227dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -891,9 +891,6 @@ struct drm_i915_private {
bool display_irqs_enabled;
- /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
- struct pm_qos_request pm_qos;
-
/* Sideband mailbox protection */
struct mutex sb_lock;
struct pm_qos_request sb_qos;
diff --git a/drivers/gpu/drm/i915/i915_mitigations.c b/drivers/gpu/drm/i915/i915_mitigations.c
new file mode 100644
index 000000000000..84f12598d145
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mitigations.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "i915_mitigations.h"
+
+static unsigned long mitigations __read_mostly = ~0UL;
+
+enum {
+ CLEAR_RESIDUALS = 0,
+};
+
+static const char * const names[] = {
+ [CLEAR_RESIDUALS] = "residuals",
+};
+
+bool i915_mitigate_clear_residuals(void)
+{
+ return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS);
+}
+
+static int mitigations_set(const char *val, const struct kernel_param *kp)
+{
+ unsigned long new = ~0UL;
+ char *str, *sep, *tok;
+ bool first = true;
+ int err = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations));
+
+ str = kstrdup(val, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ for (sep = str; (tok = strsep(&sep, ","));) {
+ bool enable = true;
+ int i;
+
+ /* Be tolerant of leading/trailing whitespace */
+ tok = strim(tok);
+
+ if (first) {
+ first = false;
+
+ if (!strcmp(tok, "auto"))
+ continue;
+
+ new = 0;
+ if (!strcmp(tok, "off"))
+ continue;
+ }
+
+ if (*tok == '!') {
+ enable = !enable;
+ tok++;
+ }
+
+ if (!strncmp(tok, "no", 2)) {
+ enable = !enable;
+ tok += 2;
+ }
+
+ if (*tok == '\0')
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ if (!strcmp(tok, names[i])) {
+ if (enable)
+ new |= BIT(i);
+ else
+ new &= ~BIT(i);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(names)) {
+ pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n",
+ DRIVER_NAME, val, tok);
+ err = -EINVAL;
+ break;
+ }
+ }
+ kfree(str);
+ if (err)
+ return err;
+
+ WRITE_ONCE(mitigations, new);
+ return 0;
+}
+
+static int mitigations_get(char *buffer, const struct kernel_param *kp)
+{
+ unsigned long local = READ_ONCE(mitigations);
+ int count, i;
+ bool enable;
+
+ if (!local)
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
+
+ if (local & BIT(BITS_PER_LONG - 1)) {
+ count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
+ enable = false;
+ } else {
+ enable = true;
+ count = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ if ((local & BIT(i)) != enable)
+ continue;
+
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
+ "%s%s,", enable ? "" : "!", names[i]);
+ }
+
+ buffer[count - 1] = '\n';
+ return count;
+}
+
+static const struct kernel_param_ops ops = {
+ .set = mitigations_set,
+ .get = mitigations_get,
+};
+
+module_param_cb_unsafe(mitigations, &ops, NULL, 0600);
+MODULE_PARM_DESC(mitigations,
+"Selectively enable security mitigations for all Intel® GPUs in the system.\n"
+"\n"
+" auto -- enables all mitigations required for the platform [default]\n"
+" off -- disables all mitigations\n"
+"\n"
+"Individual mitigations can be enabled by passing a comma-separated string,\n"
+"e.g. mitigations=residuals to enable only clearing residuals or\n"
+"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n"
+"Either '!' or 'no' may be used to switch from enabling the mitigation to\n"
+"disabling it.\n"
+"\n"
+"Active mitigations for Ivybridge, Baytrail, Haswell:\n"
+" residuals -- clear all thread-local registers between contexts"
+);
diff --git a/drivers/gpu/drm/i915/i915_mitigations.h b/drivers/gpu/drm/i915/i915_mitigations.h
new file mode 100644
index 000000000000..1359d8135287
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mitigations.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_MITIGATIONS_H__
+#define __I915_MITIGATIONS_H__
+
+#include <linux/types.h>
+
+bool i915_mitigate_clear_residuals(void);
+
+#endif /* __I915_MITIGATIONS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d76685ce0399..9856479b56d8 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -184,13 +184,24 @@ static u64 get_rc6(struct intel_gt *gt)
return val;
}
-static void park_rc6(struct drm_i915_private *i915)
+static void init_rc6(struct i915_pmu *pmu)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ intel_wakeref_t wakeref;
- if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
+ pmu->sample[__I915_SAMPLE_RC6].cur;
+ pmu->sleep_last = ktime_get();
+ }
+}
+static void park_rc6(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sleep_last = ktime_get();
}
@@ -201,6 +212,7 @@ static u64 get_rc6(struct intel_gt *gt)
return __get_rc6(gt);
}
+static void init_rc6(struct i915_pmu *pmu) { }
static void park_rc6(struct drm_i915_private *i915) {}
#endif
@@ -612,10 +624,8 @@ static void i915_pmu_enable(struct perf_event *event)
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
- intel_wakeref_t wakeref;
unsigned long flags;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
spin_lock_irqsave(&pmu->lock, flags);
/*
@@ -626,13 +636,6 @@ static void i915_pmu_enable(struct perf_event *event)
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
- if (pmu->enable_count[bit] == 0 &&
- config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
- pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
- pmu->sleep_last = ktime_get();
- }
-
pmu->enable |= BIT_ULL(bit);
pmu->enable_count[bit]++;
@@ -673,8 +676,6 @@ static void i915_pmu_enable(struct perf_event *event)
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)
@@ -1130,6 +1131,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
pmu->cpuhp.cpu = -1;
+ init_rc6(pmu);
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 620b6fab2c5c..92adfee30c7c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
static inline bool __i915_request_has_started(const struct i915_request *rq)
{
- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
}
/**
@@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
*/
static inline bool i915_request_started(const struct i915_request *rq)
{
+ bool result;
+
if (i915_request_signaled(rq))
return true;
- /* Remember: started but may have since been preempted! */
- return __i915_request_has_started(rq);
+ result = true;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ if (likely(!i915_request_signaled(rq)))
+ /* Remember: started but may have since been preempted! */
+ result = __i915_request_has_started(rq);
+ rcu_read_unlock();
+
+ return result;
}
/**
@@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq)
*/
static inline bool i915_request_is_running(const struct i915_request *rq)
{
+ bool result;
+
if (!i915_request_is_active(rq))
return false;
- return __i915_request_has_started(rq);
+ rcu_read_lock();
+ result = __i915_request_has_started(rq) && i915_request_is_active(rq);
+ rcu_read_unlock();
+
+ return result;
}
/**
@@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq)
return !list_empty(&rq->sched.link);
}
+static inline bool __i915_request_is_complete(const struct i915_request *rq)
+{
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
+}
+
static inline bool i915_request_completed(const struct i915_request *rq)
{
+ bool result;
+
if (i915_request_signaled(rq))
return true;
- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
+ result = true;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ if (likely(!i915_request_signaled(rq)))
+ result = __i915_request_is_complete(rq);
+ rcu_read_unlock();
+
+ return result;
}
static inline void i915_request_mark_complete(struct i915_request *rq)
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 7e82c41a85f1..bdc989183c64 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -534,8 +534,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
if (!gpu->aspace) {
dev_err(dev->dev, "No memory protection without MMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
return gpu;
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 93da6683a866..4534633fe7cd 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -564,8 +564,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index c0be3a0f36b2..82bebb40234d 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -692,8 +692,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 87c8b033ad1a..12e75ba360f9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -18,6 +18,10 @@ bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
+bool allow_vram_carveout = false;
+MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
+module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(2, 0, 0, 0),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6cf9975e951e..f09175698827 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -191,8 +191,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct io_pgtable_domain_attr pgtbl_cfg;
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
@@ -202,13 +200,18 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
if (!iommu)
return NULL;
- /*
- * This allows GPU to set the bus attributes required to use system
- * cache on behalf of the iommu page table walker.
- */
- if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+
+ if (adreno_is_a6xx(adreno_gpu)) {
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct io_pgtable_domain_attr pgtbl_cfg;
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
+ pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+ }
}
mmu = msm_iommu_new(&pdev->dev, iommu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index c3775f79525a..b3d9a333591b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -18,6 +18,7 @@
#include "adreno_pm4.xml.h"
extern bool snapshot_debugbus;
+extern bool allow_vram_carveout;
enum {
ADRENO_FW_PM4 = 0,
@@ -211,6 +212,11 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
+static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
+{
+ return ((gpu->revn < 700 && gpu->revn > 599));
+}
+
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
return gpu->revn == 618;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 6e971d552911..3bc7ed21de28 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -693,6 +693,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
+ if (state == ST_CONNECT_PENDING) {
+ /* wait until ST_CONNECTED */
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 97dca3e378b7..d1780bcac8cc 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -167,12 +167,18 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
rc = dp_panel_read_dpcd(dp_panel);
+ if (rc) {
+ DRM_ERROR("read dpcd failed %d\n", rc);
+ return rc;
+ }
+
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
- if (rc || !is_link_rate_valid(bw_code) ||
+ if (!is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes) ||
(bw_code > dp_panel->max_bw_code)) {
- DRM_ERROR("read dpcd failed %d\n", rc);
- return rc;
+ DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
+ dp_panel->link_info.num_lanes);
+ return -EINVAL;
}
if (dp_panel->dfp_present) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 535a0263ceeb..108c405e03dd 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -457,14 +457,14 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
drm_mode_config_init(ddev);
- /* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ ret = msm_init_vram(ddev);
if (ret)
goto err_destroy_mdss;
- ret = msm_init_vram(ddev);
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_msm_uninit;
+ goto err_destroy_mdss;
dma_set_max_seg_size(dev, UINT_MAX);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 9a7c49bc394f..9d10739c4eb2 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -96,6 +96,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!msm_gem_is_locked(obj));
+
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
@@ -988,6 +990,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (msm_obj->pages)
kvfree(msm_obj->pages);
+ put_iova_vmas(obj);
+
/* dma_buf_detach() grabs resv lock, so we need to unlock
* prior to drm_prime_gem_destroy
*/
@@ -997,11 +1001,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
} else {
msm_gem_vunmap(obj);
put_pages(obj);
+ put_iova_vmas(obj);
msm_gem_unlock(obj);
}
- put_iova_vmas(obj);
-
drm_gem_object_release(obj);
kfree(msm_obj);
@@ -1115,6 +1118,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
struct msm_gem_vma *vma;
struct page **pages;
+ drm_gem_private_object_init(dev, obj, size);
+
msm_gem_lock(obj);
vma = add_vma(obj, NULL);
@@ -1126,9 +1131,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
- drm_gem_private_object_init(dev, obj, size);
-
+ msm_gem_lock(obj);
pages = get_pages(obj);
+ msm_gem_unlock(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 6fdddb266fb1..4488e1c061b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -37,6 +37,7 @@ nouveau-y += dispnv50/wimmc37b.o
nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
nouveau-y += dispnv50/wndwc57e.o
+nouveau-y += dispnv50/wndwc67e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index 27ea3f34706d..abefc2343443 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index 121c24a18f11..31d8b2e4791d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
+ { GA102_DISP_CURSOR, 0, cursc37a_new },
{ TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
{ GK104_DISP_CURSOR, 0, curs907a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 33fff388dd83..c6367035970e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -222,7 +222,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
- const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
+ const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
struct nouveau_cli *cli = (void *)device->object.client;
@@ -271,7 +271,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
- if (!syncbuf)
+ if (syncbuf < 0)
return 0;
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 92bddc083617..38dec11e7dda 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -95,7 +95,7 @@ struct nv50_outp_atom {
int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size,
- u64 syncbuf, struct nv50_dmac *dmac);
+ s64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *);
/*
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index a1ac153d5e98..566fbddfc8d7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
+ { GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index 685b70871324..b390029c69ec 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -76,7 +76,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
int ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
- &oclass, 0, &args, sizeof(args), 0,
+ &oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0356474ad6f6..ce451242f79e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -784,6 +784,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
+ { GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
{}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 3278e2880034..f4e0c5080034 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -129,6 +129,14 @@ int wndwc37e_update(struct nv50_wndw *, u32 *);
int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
+bool wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int);
+int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_ilut_clr(struct nv50_wndw *);
+int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_csc_clr(struct nv50_wndw *);
+
+int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 429be0bb0222..abdd3bb658b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -80,7 +80,7 @@ wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
-static int
+int
wndwc57e_csc_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -98,7 +98,7 @@ wndwc57e_csc_clr(struct nv50_wndw *wndw)
return 0;
}
-static int
+int
wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -111,7 +111,7 @@ wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
-static int
+int
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -124,7 +124,7 @@ wndwc57e_ilut_clr(struct nv50_wndw *wndw)
return 0;
}
-static int
+int
wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -179,7 +179,7 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static bool
+bool
wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
if (size = size ? size : 1024, size != 256 && size != 1024)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
new file mode 100644
index 000000000000..7a370fa1df20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57e.h>
+
+static int
+wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 17)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE,
+ NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SET_PARAMS,
+ NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+ PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
+ NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
+ NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
+ NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+ return 0;
+}
+
+static const struct nv50_wndw_func
+wndwc67e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .sema_set = wndwc37e_sema_set,
+ .sema_clr = wndwc37e_sema_clr,
+ .ntfy_set = wndwc37e_ntfy_set,
+ .ntfy_clr = wndwc37e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .ilut_size = 1024,
+ .xlut_set = wndwc57e_ilut_set,
+ .xlut_clr = wndwc57e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwc67e_image_set,
+ .image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwc67e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwc67e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index cd9a2e687bb6..57d4f457a7d4 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -33,6 +33,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_PASCAL 0x0a
#define NV_DEVICE_INFO_V0_VOLTA 0x0b
#define NV_DEVICE_INFO_V0_TURING 0x0c
+#define NV_DEVICE_INFO_V0_AMPERE 0x0d
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 2c79beb41126..ba2c28ea43d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -88,6 +88,7 @@
#define GP102_DISP /* cl5070.h */ 0x00009870
#define GV100_DISP /* cl5070.h */ 0x0000c370
#define TU102_DISP /* cl5070.h */ 0x0000c570
+#define GA102_DISP /* cl5070.h */ 0x0000c670
#define GV100_DISP_CAPS 0x0000c373
@@ -103,6 +104,7 @@
#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
#define GV100_DISP_CURSOR /* cl507a.h */ 0x0000c37a
#define TU102_DISP_CURSOR /* cl507a.h */ 0x0000c57a
+#define GA102_DISP_CURSOR /* cl507a.h */ 0x0000c67a
#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
@@ -112,6 +114,7 @@
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c37b
#define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c57b
+#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c67b
#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
@@ -135,6 +138,7 @@
#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define GV100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c37d
#define TU102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c57d
+#define GA102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c67d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -145,6 +149,7 @@
#define GV100_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c37e
#define TU102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c57e
+#define GA102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c67e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 5c007ce62fc3..c920939a1467 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -120,6 +120,7 @@ struct nvkm_device {
GP100 = 0x130,
GV100 = 0x140,
TU100 = 0x160,
+ GA100 = 0x170,
} card_type;
u32 chipset;
u8 chiprev;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 5a96c942d912..0f6fa6631a19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -37,4 +37,5 @@ int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int ga102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 1a39e52e09e3..50cc7c05eac4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -32,4 +32,5 @@ int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int ga100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 34b56b10218a..2ecd52aec1d1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -86,6 +86,8 @@ int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int ga100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int ga102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index eaacf8d80527..cdcce5ece6ff 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -37,4 +37,5 @@ int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int ga102_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index 81b977319640..640f649ce497 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -92,6 +92,7 @@ int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
static inline int
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 6641fe4c252c..e45ca4583967 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -32,4 +32,5 @@ int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int ga100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index c7a94c94dbf3..72f35a2babcb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -256,6 +256,7 @@ nouveau_backlight_init(struct drm_connector *connector)
case NV_DEVICE_INFO_V0_PASCAL:
case NV_DEVICE_INFO_V0_VOLTA:
case NV_DEVICE_INFO_V0_TURING:
+ case NV_DEVICE_INFO_V0_AMPERE: //XXX: not confirmed
ret = nv50_backlight_init(nv_encoder, &props, &ops);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 8d0d30e08f57..529cb60d5efb 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -35,6 +35,7 @@ nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass,
struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { GA102_DISP, -1 },
{ TU102_DISP, -1 },
{ GV100_DISP, -1 },
{ GP102_DISP, -1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7851bec5f0e5..cdcc851e06f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1815,7 +1815,7 @@ nvf0_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1853,7 +1853,7 @@ nvf1_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1891,7 +1891,7 @@ nv106_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1929,7 +1929,7 @@ nv108_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1967,7 +1967,7 @@ nv117_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2003,7 +2003,7 @@ nv118_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2652,6 +2652,61 @@ nv168_chipset = {
.sec2 = tu102_sec2_new,
};
+static const struct nvkm_device_chip
+nv170_chipset = {
+ .name = "GA100",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .devinit = ga100_devinit_new,
+ .fb = ga100_fb_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .mc = ga100_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+};
+
+static const struct nvkm_device_chip
+nv172_chipset = {
+ .name = "GA102",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .devinit = ga100_devinit_new,
+ .fb = ga102_fb_new,
+ .gpio = ga102_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .mc = ga100_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .disp = ga102_disp_new,
+ .dma = gv100_dma_new,
+};
+
+static const struct nvkm_device_chip
+nv174_chipset = {
+ .name = "GA104",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .devinit = ga100_devinit_new,
+ .fb = ga102_fb_new,
+ .gpio = ga102_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .mc = ga100_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .disp = ga102_disp_new,
+ .dma = gv100_dma_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -3063,6 +3118,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x130: device->card_type = GP100; break;
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
+ case 0x170: device->card_type = GA100; break;
default:
break;
}
@@ -3160,10 +3216,23 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x166: device->chip = &nv166_chipset; break;
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
+ case 0x172: device->chip = &nv172_chipset; break;
+ case 0x174: device->chip = &nv174_chipset; break;
default:
- nvdev_error(device, "unknown chipset (%08x)\n", boot0);
- ret = -ENODEV;
- goto done;
+ if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
+ switch (device->chipset) {
+ case 0x170: device->chip = &nv170_chipset; break;
+ default:
+ break;
+ }
+ }
+
+ if (!device->chip) {
+ nvdev_error(device, "unknown chipset (%08x)\n", boot0);
+ ret = -ENODEV;
+ goto done;
+ }
+ break;
}
nvdev_info(device, "NVIDIA %s (%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 03c6d9aef075..147894798786 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -176,6 +176,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
+ case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
default:
args->v0.family = 0;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index cf075311cdd2..b03f043efe26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -17,6 +17,7 @@ nvkm-y += nvkm/engine/disp/gp100.o
nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
nvkm-y += nvkm/engine/disp/tu102.o
+nvkm-y += nvkm/engine/disp/ga102.o
nvkm-y += nvkm/engine/disp/vga.o
nvkm-y += nvkm/engine/disp/head.o
@@ -42,6 +43,7 @@ nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/sorgp100.o
nvkm-y += nvkm/engine/disp/sorgv100.o
nvkm-y += nvkm/engine/disp/sortu102.o
+nvkm-y += nvkm/engine/disp/sorga102.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/dp.o
@@ -75,6 +77,7 @@ nvkm-y += nvkm/engine/disp/rootgp100.o
nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/rootgv100.o
nvkm-y += nvkm/engine/disp/roottu102.o
+nvkm-y += nvkm/engine/disp/rootga102.o
nvkm-y += nvkm/engine/disp/capsgv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 3800aeb507d0..55fbfe28c6dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -33,6 +33,12 @@
#include <nvif/event.h>
+/* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
+ * the x86 option ROM. However, the relevant VBIOS table versions weren't modified,
+ * so we're unable to detect this in a nice way.
+ */
+#define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
+
struct lt_state {
struct nvkm_dp *dp;
u8 stat[6];
@@ -238,6 +244,19 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
+ if (AMPERE_IED_HACK(disp) && (lnkcmp = lt.dp->info.script[0])) {
+ /* Execute BeforeLinkTraining script from DP Info table. */
+ while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
+ lnkcmp += 3;
+ lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
+
+ nvbios_init(&dp->outp.disp->engine.subdev, lnkcmp,
+ init.outp = &dp->outp.info;
+ init.or = ior->id;
+ init.link = ior->asy.link;
+ );
+ }
+
/* Set desired link configuration on the source. */
if ((lnkcmp = lt.dp->info.lnkcmp)) {
if (dp->version < 0x30) {
@@ -316,12 +335,14 @@ nvkm_dp_train_init(struct nvkm_dp *dp)
);
}
- /* Execute BeforeLinkTraining script from DP Info table. */
- nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
- init.outp = &dp->outp.info;
- init.or = dp->outp.ior->id;
- init.link = dp->outp.ior->asy.link;
- );
+ if (!AMPERE_IED_HACK(dp->outp.disp)) {
+ /* Execute BeforeLinkTraining script from DP Info table. */
+ nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
+ init.outp = &dp->outp.info;
+ init.or = dp->outp.ior->id;
+ init.link = dp->outp.ior->asy.link;
+ );
+ }
}
static const struct dp_rates {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
new file mode 100644
index 000000000000..aa2e5645fe36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+ga102_disp = {
+ .init = tu102_disp_init,
+ .fini = gv100_disp_fini,
+ .intr = gv100_disp_intr,
+ .uevent = &gv100_disp_chan_uevent,
+ .super = gv100_disp_super,
+ .root = &ga102_disp_root_oclass,
+ .wndw = { .cnt = gv100_disp_wndw_cnt },
+ .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+ .sor = { .cnt = gv100_sor_cnt, .new = ga102_sor_new },
+ .ramht_size = 0x2000,
+};
+
+int
+ga102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return nv50_disp_new_(&ga102_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 09f3038eff26..9f0bb7c6b010 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -150,6 +150,8 @@ void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
+void tu102_sor_dp_vcpi(struct nvkm_ior *, int, u8, u8, u16, u16);
+
void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
@@ -207,4 +209,6 @@ int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
int gv100_sor_new(struct nvkm_disp *, int);
int tu102_sor_new(struct nvkm_disp *, int);
+
+int ga102_sor_new(struct nvkm_disp *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index a677161c7f3a..db31b37752a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -86,6 +86,8 @@ void gv100_disp_intr(struct nv50_disp *);
void gv100_disp_super(struct work_struct *);
int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
+int tu102_disp_init(struct nv50_disp *);
+
void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
void nv50_disp_update_sppll1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c
new file mode 100644
index 000000000000..9af07c3cf9fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+ga102_disp_root = {
+ .user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
+ {{0,0,GA102_DISP_CURSOR }, gv100_disp_curs_new },
+ {{0,0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+ {{0,0,GA102_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
+ {{0,0,GA102_DISP_WINDOW_CHANNEL_DMA }, gv100_disp_wndw_new },
+ {}
+ },
+};
+
+static int
+ga102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&ga102_disp_root, disp, oclass, data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+ga102_disp_root_oclass = {
+ .base.oclass = GA102_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = ga102_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 7070f5408d92..27bb170d0293 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -41,4 +41,5 @@ extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
extern const struct nvkm_disp_oclass tu102_disp_root_oclass;
+extern const struct nvkm_disp_oclass ga102_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
new file mode 100644
index 000000000000..033827de9116
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static int
+ga102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(sor);
+ const u32 loff = nv50_sor_link(sor);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+
+ switch (sor->dp.bw) {
+ case 0x06: clksor |= 0x00000000; break;
+ case 0x0a: clksor |= 0x00040000; break;
+ case 0x14: clksor |= 0x00080000; break;
+ case 0x1e: clksor |= 0x000c0000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
+ if (sor->dp.mst)
+ dpctrl |= 0x40000000;
+ if (sor->dp.ef)
+ dpctrl |= 0x00004000;
+
+ nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+
+ /*XXX*/
+ nvkm_msec(device, 40, NVKM_DELAY);
+ nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
+ nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
+
+ nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
+ return 0;
+}
+
+static void
+ga102_sor_clock(struct nvkm_ior *sor)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ u32 div2 = 0;
+ if (sor->asy.proto == TMDS) {
+ if (sor->tmds.high_speed)
+ div2 = 1;
+ }
+ nvkm_wr32(device, 0x00ec08 + (sor->id * 0x10), 0x00000000);
+ nvkm_wr32(device, 0x00ec04 + (sor->id * 0x10), div2);
+}
+
+static const struct nvkm_ior_func
+ga102_sor_hda = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = ga102_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = ga102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+ .hda = {
+ .hpd = gf119_hda_hpd,
+ .eld = gf119_hda_eld,
+ .device_entry = gv100_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+ga102_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = ga102_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = ga102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+};
+
+int
+ga102_sor_new(struct nvkm_disp *disp, int id)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda = nvkm_rd32(device, 0x08a15c);
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&ga102_sor_hda, disp, SOR, id);
+ return nvkm_ior_new_(&ga102_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index 59865a934c4b..0cf9e8752d25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -23,7 +23,7 @@
#include <subdev/timer.h>
-static void
+void
tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head,
u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
index 883ae4151ff8..4c85d1d4fbd4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
@@ -28,7 +28,7 @@
#include <core/gpuobj.h>
#include <subdev/timer.h>
-static int
+int
tu102_disp_init(struct nv50_disp *disp)
{
struct nvkm_device *device = disp->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 7deb81b6dbac..4b571cc6bc70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
image.base, image.type, image.size);
- if (!shadow_fetch(bios, mthd, image.size)) {
+ if (!shadow_fetch(bios, mthd, image.base + image.size)) {
nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index 3634cd0630b8..023ddc7c5399 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -64,6 +64,9 @@ pramin_init(struct nvkm_bios *bios, const char *name)
return NULL;
/* we can't get the bios image pointer without PDISP */
+ if (device->card_type >= GA100)
+ addr = device->chipset == 0x170; /*XXX: find the fuse reg for this */
+ else
if (device->card_type >= GM100)
addr = nvkm_rd32(device, 0x021c04);
else
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index b3429371ed82..d1abb64841da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -15,3 +15,4 @@ nvkm-y += nvkm/subdev/devinit/gm107.o
nvkm-y += nvkm/subdev/devinit/gm200.o
nvkm-y += nvkm/subdev/devinit/gv100.o
nvkm-y += nvkm/subdev/devinit/tu102.o
+nvkm-y += nvkm/subdev/devinit/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
new file mode 100644
index 000000000000..636a92128f6c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvbios_pll info;
+ int head = type - PLL_VPLL0;
+ int N, fN, M, P;
+ int ret;
+
+ ret = nvbios_pll_parse(device->bios, type, &info);
+ if (ret)
+ return ret;
+
+ ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+ if (ret < 0)
+ return ret;
+
+ switch (info.type) {
+ case PLL_VPLL0:
+ case PLL_VPLL1:
+ case PLL_VPLL2:
+ case PLL_VPLL3:
+ nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02080004);
+ nvkm_wr32(device, 0x00ef18 + (head * 0x40), (N << 16) | fN);
+ nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) | M);
+ nvkm_wr32(device, 0x00e9c0 + (head * 0x04), 0x00000001);
+ break;
+ default:
+ nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct nvkm_devinit_func
+ga100_devinit = {
+ .init = nv50_devinit_init,
+ .post = tu102_devinit_post,
+ .pll_set = ga100_devinit_pll_set,
+};
+
+int
+ga100_devinit_new(struct nvkm_device *device, int index, struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&ga100_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index 94723352137a..05961e624264 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -19,4 +19,5 @@ void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
int index, struct nvkm_devinit *);
int nv04_devinit_post(struct nvkm_devinit *, bool);
+int tu102_devinit_post(struct nvkm_devinit *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 397670e72fff..9a469bf482f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -65,7 +65,7 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
-static int
+int
tu102_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 43a42159a3d0..5d0bab8ecb43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -32,6 +32,8 @@ nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/gp10b.o
nvkm-y += nvkm/subdev/fb/gv100.o
+nvkm-y += nvkm/subdev/fb/ga100.o
+nvkm-y += nvkm/subdev/fb/ga102.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -52,6 +54,7 @@ nvkm-y += nvkm/subdev/fb/ramgk104.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
nvkm-y += nvkm/subdev/fb/ramgm200.o
nvkm-y += nvkm/subdev/fb/ramgp100.o
+nvkm-y += nvkm/subdev/fb/ramga102.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
nvkm-y += nvkm/subdev/fb/gddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
new file mode 100644
index 000000000000..bf82686851cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga100_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = gp100_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+ga100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&ga100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
new file mode 100644
index 000000000000..bcecf84a6e67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga102_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = ga102_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+ga102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&ga102_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 10ff5d053f7e..feda86a5fba8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -22,7 +22,7 @@
#include "gf100.h"
#include "ram.h"
-static int
+int
gv100_fb_init_page(struct nvkm_fb *fb)
{
return (fb->page == 16) ? 0 : -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 5be9c563350d..66932ac10d15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -82,4 +82,6 @@ int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
struct nvkm_fb **);
bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
int gp102_fb_vpr_scrub(struct nvkm_fb *);
+
+int gv100_fb_init_page(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index d723a9b4e3c4..ea7d66f3dd82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -70,4 +70,5 @@ int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int ga102_ram_new(struct nvkm_fb *, struct nvkm_ram **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
new file mode 100644
index 000000000000..298c136cefe0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static const struct nvkm_ram_func
+ga102_ram = {
+};
+
+int
+ga102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+ u32 size = nvkm_rd32(device, 0x1183a4);
+
+ return nvkm_ram_new_(&ga102_ram, fb, type, (u64)size << 20, pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
index b2ad5922a1c2..efbbaa080de5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/gpio/nv50.o
nvkm-y += nvkm/subdev/gpio/g94.o
nvkm-y += nvkm/subdev/gpio/gf119.o
nvkm-y += nvkm/subdev/gpio/gk104.o
+nvkm-y += nvkm/subdev/gpio/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
new file mode 100644
index 000000000000..62c791baf400
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ u8 ver, len;
+ u16 entry;
+ int ent = -1;
+
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
+ u32 data = nvbios_rd32(bios, entry);
+ u8 line = (data & 0x0000003f);
+ u8 defs = !!(data & 0x00000080);
+ u8 func = (data & 0x0000ff00) >> 8;
+ u8 unk0 = (data & 0x00ff0000) >> 16;
+ u8 unk1 = (data & 0x1f000000) >> 24;
+
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
+ continue;
+
+ nvkm_gpio_set(gpio, 0, func, line, defs);
+
+ nvkm_mask(device, 0x021200 + (line * 4), 0xff, unk0);
+ if (unk1--)
+ nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
+ }
+}
+
+static int
+ga102_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 data = ((dir ^ 1) << 13) | (out << 12);
+ nvkm_mask(device, 0x021200 + (line * 4), 0x00003000, data);
+ nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
+ return 0;
+}
+
+static int
+ga102_gpio_sense(struct nvkm_gpio *gpio, int line)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ return !!(nvkm_rd32(device, 0x021200 + (line * 4)) & 0x00004000);
+}
+
+static void
+ga102_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x021640);
+ u32 intr1 = nvkm_rd32(device, 0x02164c);
+ u32 stat0 = nvkm_rd32(device, 0x021648) & intr0;
+ u32 stat1 = nvkm_rd32(device, 0x021654) & intr1;
+ *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
+ *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
+ nvkm_wr32(device, 0x021640, intr0);
+ nvkm_wr32(device, 0x02164c, intr1);
+}
+
+static void
+ga102_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 inte0 = nvkm_rd32(device, 0x021648);
+ u32 inte1 = nvkm_rd32(device, 0x021654);
+ if (type & NVKM_GPIO_LO)
+ inte0 = (inte0 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
+ mask >>= 16;
+ data >>= 16;
+ if (type & NVKM_GPIO_LO)
+ inte1 = (inte1 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte1 = (inte1 & ~mask) | data;
+ nvkm_wr32(device, 0x021648, inte0);
+ nvkm_wr32(device, 0x021654, inte1);
+}
+
+static const struct nvkm_gpio_func
+ga102_gpio = {
+ .lines = 32,
+ .intr_stat = ga102_gpio_intr_stat,
+ .intr_mask = ga102_gpio_intr_mask,
+ .drive = ga102_gpio_drive,
+ .sense = ga102_gpio_sense,
+ .reset = ga102_gpio_reset,
+};
+
+int
+ga102_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+ return nvkm_gpio_new_(&ga102_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 723d0284caef..819703913a00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -7,6 +7,7 @@ nvkm-y += nvkm/subdev/i2c/g94.o
nvkm-y += nvkm/subdev/i2c/gf117.o
nvkm-y += nvkm/subdev/i2c/gf119.o
nvkm-y += nvkm/subdev/i2c/gk104.o
+nvkm-y += nvkm/subdev/i2c/gk110.o
nvkm-y += nvkm/subdev/i2c/gm200.o
nvkm-y += nvkm/subdev/i2c/pad.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 30b48896965e..f920eabf8628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -3,6 +3,13 @@
#define __NVKM_I2C_AUX_H__
#include "pad.h"
+static inline void
+nvkm_i2c_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ if (i2c->func->aux_autodpcd)
+ i2c->func->aux_autodpcd(i2c, aux, false);
+}
+
struct nvkm_i2c_aux_func {
bool address_only;
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index db7769cb33eb..47068f6f9c55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -77,7 +77,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
- struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ struct nvkm_i2c *i2c = aux->base.pad->i2c;
+ struct nvkm_device *device = i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
if (!(type & 1)) {
memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (!timeout--) {
AUX_ERR(&aux->base, "timeout %08x", ctrl);
ret = -EIO;
- goto out;
+ goto out_err;
}
} while (ctrl & 0x00010000);
ret = 0;
@@ -154,7 +157,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
memcpy(data, xbuf, *size);
*size = stat & 0x0000001f;
}
-
+out_err:
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
out:
g94_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index edb6148cbca0..8bd1d442e465 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -33,7 +33,7 @@ static void
gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
}
static int
@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
return -EBUSY;
}
- } while (ctrl & 0x03010000);
+ } while (ctrl & 0x07010000);
/* set some magic, and wait up to 1ms for it to appear */
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
timeout = 1000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
gm200_i2c_aux_fini(aux);
return -EBUSY;
}
- } while ((ctrl & 0x03000000) != urep);
+ } while ((ctrl & 0x07000000) != urep);
return 0;
}
@@ -77,7 +77,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
- struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ struct nvkm_i2c *i2c = aux->base.pad->i2c;
+ struct nvkm_device *device = i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
if (!(type & 1)) {
memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (!timeout--) {
AUX_ERR(&aux->base, "timeout %08x", ctrl);
ret = -EIO;
- goto out;
+ goto out_err;
}
} while (ctrl & 0x00010000);
ret = 0;
@@ -155,6 +158,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
*size = stat & 0x0000001f;
}
+out_err:
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
out:
gm200_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
new file mode 100644
index 000000000000..8e3bfa1af52a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "pad.h"
+
+static void
+gk110_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ nvkm_mask(i2c->subdev.device, 0x00e4f8 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
+static const struct nvkm_i2c_func
+gk110_i2c = {
+ .pad_x_new = gf119_i2c_pad_x_new,
+ .pad_s_new = gf119_i2c_pad_s_new,
+ .aux = 4,
+ .aux_stat = gk104_aux_stat,
+ .aux_mask = gk104_aux_mask,
+ .aux_autodpcd = gk110_aux_autodpcd,
+};
+
+int
+gk110_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+ return nvkm_i2c_new_(&gk110_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
index a23c5f315221..7b2375bff8a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
@@ -24,6 +24,12 @@
#include "priv.h"
#include "pad.h"
+static void
+gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ nvkm_mask(i2c->subdev.device, 0x00d968 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
static const struct nvkm_i2c_func
gm200_i2c = {
.pad_x_new = gf119_i2c_pad_x_new,
@@ -31,6 +37,7 @@ gm200_i2c = {
.aux = 8,
.aux_stat = gk104_aux_stat,
.aux_mask = gk104_aux_mask,
+ .aux_autodpcd = gm200_aux_autodpcd,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index 461016814f4f..44b7bb7d4777 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_I2C_PAD_H__
#define __NVKM_I2C_PAD_H__
-#include <subdev/i2c.h>
+#include "priv.h"
struct nvkm_i2c_pad {
const struct nvkm_i2c_pad_func *func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index bd86bc298ebe..e35f6036fcfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -23,6 +23,10 @@ struct nvkm_i2c_func {
/* mask on/off interrupt types for a given set of auxch
*/
void (*aux_mask)(struct nvkm_i2c *, u32, u32, u32);
+
+ /* enable/disable HW-initiated DPCD reads
+ */
+ void (*aux_autodpcd)(struct nvkm_i2c *, int aux, bool enable);
};
void g94_aux_stat(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 2340040942c9..1115376bc85f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
@@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
@@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
}
void
@@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
+ break;
+ );
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index f3915f85838e..22e487b493ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
@@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
@@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
}
void
@@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+ break;
+ );
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 2585ef07532a..ac2b34e9ac6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -14,3 +14,4 @@ nvkm-y += nvkm/subdev/mc/gk20a.o
nvkm-y += nvkm/subdev/mc/gp100.o
nvkm-y += nvkm/subdev/mc/gp10b.o
nvkm-y += nvkm/subdev/mc/tu102.o
+nvkm-y += nvkm/subdev/mc/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
new file mode 100644
index 000000000000..967eb3af11eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga100_mc_intr_unarm(struct nvkm_mc *mc)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81610, 0x00000004);
+}
+
+static void
+ga100_mc_intr_rearm(struct nvkm_mc *mc)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81608, 0x00000004);
+}
+
+static void
+ga100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 intr)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81210, mask & intr );
+ nvkm_wr32(mc->subdev.device, 0xb81410, mask & ~(mask & intr));
+}
+
+static u32
+ga100_mc_intr_stat(struct nvkm_mc *mc)
+{
+ u32 intr_top = nvkm_rd32(mc->subdev.device, 0xb81600), intr = 0x00000000;
+ if (intr_top & 0x00000004)
+ intr = nvkm_mask(mc->subdev.device, 0xb81010, 0x00000000, 0x00000000);
+ return intr;
+}
+
+static void
+ga100_mc_init(struct nvkm_mc *mc)
+{
+ nv50_mc_init(mc);
+ nvkm_wr32(mc->subdev.device, 0xb81210, 0xffffffff);
+}
+
+static const struct nvkm_mc_func
+ga100_mc = {
+ .init = ga100_mc_init,
+ .intr = gp100_mc_intr,
+ .intr_unarm = ga100_mc_intr_unarm,
+ .intr_rearm = ga100_mc_intr_rearm,
+ .intr_mask = ga100_mc_intr_mask,
+ .intr_stat = ga100_mc_intr_stat,
+ .reset = gk104_mc_reset,
+};
+
+int
+ga100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&ga100_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index de91e9a26172..6d5212ae2fd5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu)
{
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_mm *mm = &device->fb->ram->vram;
- const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
- const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
- const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
u8 heap = NVKM_MEM_VRAM;
int heapM, heapN, heapU;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d59ef6e92a40..23195d5d4e91 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -730,9 +730,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
rdev->mman.initialized = true;
- ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
- dma_addressing_limited(&rdev->pdev->dev));
-
r = radeon_ttm_init_vram(rdev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 7b2f60616750..11e0313db0ea 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -66,7 +66,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
-static spinlock_t shrinker_lock;
+static struct mutex shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@@ -79,12 +79,13 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
struct page *p;
void *vaddr;
- if (order) {
- gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ /* Don't set the __GFP_COMP flag for higher order allocations.
+ * Mapping pages directly into an userspace process and calling
+ * put_page() on a TTM allocated page is illegal.
+ */
+ if (order)
+ gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY |
__GFP_KSWAPD_RECLAIM;
- gfp_flags &= ~__GFP_MOVABLE;
- gfp_flags &= ~__GFP_COMP;
- }
if (!pool->use_dma_alloc) {
p = alloc_pages(gfp_flags, order);
@@ -190,7 +191,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
size_t size = (1ULL << order) * PAGE_SIZE;
addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(pool->dev, **dma_addr))
+ if (dma_mapping_error(pool->dev, addr))
return -EFAULT;
}
@@ -249,9 +250,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages);
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
}
/* Remove a pool_type from the global shrinker list and free all pages */
@@ -259,9 +260,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct page *p, *tmp;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
list_for_each_entry_safe(p, tmp, &pt->pages, lru)
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -302,7 +303,7 @@ static unsigned int ttm_pool_shrink(void)
unsigned int num_freed;
struct page *p;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
p = ttm_pool_type_take(pt);
@@ -314,7 +315,7 @@ static unsigned int ttm_pool_shrink(void)
}
list_move_tail(&pt->shrinker_list, &shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
return num_freed;
}
@@ -507,7 +508,6 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
-EXPORT_SYMBOL(ttm_pool_init);
/**
* ttm_pool_fini - Cleanup a pool
@@ -525,7 +525,6 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
-EXPORT_SYMBOL(ttm_pool_fini);
#ifdef CONFIG_DEBUG_FS
/* Count the number of pages available in a pool_type */
@@ -566,7 +565,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
seq_puts(m, "\t ");
for (i = 0; i < MAX_ORDER; ++i)
@@ -602,7 +601,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
atomic_long_read(&allocated_pages), page_pool_size);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
return 0;
}
@@ -646,7 +645,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
if (!page_pool_size)
page_pool_size = num_pages;
- spin_lock_init(&shrinker_lock);
+ mutex_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 555106220578..98cab0bbe92d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1267,6 +1267,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
card->dai_link = dai_link;
card->num_links = 1;
card->name = vc4_hdmi->variant->card_name;
+ card->driver_name = "vc4-hdmi";
card->dev = dev;
card->owner = THIS_MODULE;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 7bdda1b5b221..09fa75a2b289 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -899,6 +899,7 @@ config HID_SONY
depends on NEW_LEDS
depends on LEDS_CLASS
select POWER_SUPPLY
+ select CRC32
help
Support for
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 3d1ccac5d99a..2ab38b715347 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -154,7 +154,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
- &cl_data->sensor_phys_addr[i],
+ &cl_data->sensor_dma_addr[i],
GFP_KERNEL);
cl_data->sensor_sts[i] = 0;
cl_data->sensor_requested_cnt[i] = 0;
@@ -187,7 +187,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
}
info.period = msecs_to_jiffies(AMD_SFH_IDLE_LOOP);
info.sensor_idx = cl_idx;
- info.phys_address = cl_data->sensor_phys_addr[i];
+ info.dma_address = cl_data->sensor_dma_addr[i];
cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
if (!cl_data->report_descr[i]) {
@@ -212,7 +212,7 @@ cleanup:
if (cl_data->sensor_virt_addr[i]) {
dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
cl_data->sensor_virt_addr[i],
- cl_data->sensor_phys_addr[i]);
+ cl_data->sensor_dma_addr[i]);
}
kfree(cl_data->feature_report[i]);
kfree(cl_data->input_report[i]);
@@ -238,7 +238,7 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
if (cl_data->sensor_virt_addr[i]) {
dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
cl_data->sensor_virt_addr[i],
- cl_data->sensor_phys_addr[i]);
+ cl_data->sensor_dma_addr[i]);
}
}
kfree(cl_data);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index 6be0783d885c..d7eac1728e31 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -27,7 +27,7 @@ struct amdtp_cl_data {
int hid_descr_size[MAX_HID_DEVICES];
phys_addr_t phys_addr_base;
u32 *sensor_virt_addr[MAX_HID_DEVICES];
- phys_addr_t sensor_phys_addr[MAX_HID_DEVICES];
+ dma_addr_t sensor_dma_addr[MAX_HID_DEVICES];
u32 sensor_sts[MAX_HID_DEVICES];
u32 sensor_requested_cnt[MAX_HID_DEVICES];
u8 report_type[MAX_HID_DEVICES];
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index a51c7b76283b..dbac16641662 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -41,7 +41,7 @@ void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info i
cmd_param.s.buf_layout = 1;
cmd_param.s.buf_length = 16;
- writeq(info.phys_address, privdata->mmio + AMD_C2P_MSG2);
+ writeq(info.dma_address, privdata->mmio + AMD_C2P_MSG2);
writel(cmd_param.ul, privdata->mmio + AMD_C2P_MSG1);
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index e8be94f935b7..8f8d19b2cfe5 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -67,7 +67,7 @@ struct amd_mp2_dev {
struct amd_mp2_sensor_info {
u8 sensor_idx;
u32 period;
- phys_addr_t phys_address;
+ dma_addr_t dma_address;
};
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4c5f23640f9c..5ba0aa1d2335 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -389,6 +389,7 @@
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index dc7f6b4a775c..f23027d2795b 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -322,6 +322,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 1ffcfc9a1e03..45e7e0bdd382 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1869,6 +1869,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
0xc531),
.driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech G602 receiver (0xc537) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc537),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech lightspeed receiver (0xc539) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index f85781464807..7eb9a6ddb46a 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -4053,6 +4053,8 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX Master mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
.driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* MX Ergo trackball over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
.driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* MX Master 3 mouse over Bluetooth */
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index d670bcd57bde..8429ebe7097e 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
- if (cls->name == MT_CLS_WIN_8 &&
+ if ((cls->name == MT_CLS_WIN_8 ||
+ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -2054,6 +2055,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xce09) },
+
/* TopSeed panels */
{ .driver_data = MT_CLS_TOPSEED,
MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index d26d8cd98efc..56406cee401f 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -90,7 +90,7 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev,
goto cleanup;
} else if (rc < 0) {
hid_err(hdev,
- "failed retrieving string descriptor #%hhu: %d\n",
+ "failed retrieving string descriptor #%u: %d\n",
idx, rc);
goto cleanup;
}
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 41012681cafd..4399d6c6afef 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1482,7 +1482,7 @@ static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
wdata->state.cmd_err = err;
wiimote_cmd_complete(wdata);
} else if (err) {
- hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
+ hid_warn(wdata->hdev, "Remote error %u on req %u\n", err,
cmd);
}
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 045c464228d9..aa9e48876ced 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
}
if (flush)
- wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
else if (insert)
- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+ wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
raw_data, report_size);
return insert && !flush;
@@ -1270,6 +1270,38 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom,
group);
}
+static void wacom_devm_kfifo_release(struct device *dev, void *res)
+{
+ struct kfifo_rec_ptr_2 *devres = res;
+
+ kfifo_free(devres);
+}
+
+static int wacom_devm_kfifo_alloc(struct wacom *wacom)
+{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct kfifo_rec_ptr_2 *pen_fifo;
+ int error;
+
+ pen_fifo = devres_alloc(wacom_devm_kfifo_release,
+ sizeof(struct kfifo_rec_ptr_2),
+ GFP_KERNEL);
+
+ if (!pen_fifo)
+ return -ENOMEM;
+
+ error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ if (error) {
+ devres_free(pen_fifo);
+ return error;
+ }
+
+ devres_add(&wacom->hdev->dev, pen_fifo);
+ wacom_wac->pen_fifo = pen_fifo;
+
+ return 0;
+}
+
enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
{
struct wacom *wacom = led->wacom;
@@ -2724,7 +2756,7 @@ static int wacom_probe(struct hid_device *hdev,
if (features->check_for_hid_type && features->hid_type != hdev->type)
return -ENODEV;
- error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ error = wacom_devm_kfifo_alloc(wacom);
if (error)
return error;
@@ -2786,8 +2818,6 @@ static void wacom_remove(struct hid_device *hdev)
if (wacom->wacom_wac.features.type != REMOTE)
wacom_release_resources(wacom);
-
- kfifo_free(&wacom_wac->pen_fifo);
}
#ifdef CONFIG_PM
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index da612b6e9c77..195910dd2154 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -342,7 +342,7 @@ struct wacom_wac {
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
- struct kfifo_rec_ptr_2 pen_fifo;
+ struct kfifo_rec_ptr_2 *pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 502f8cd95f6d..d491fdcee61f 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2550,7 +2550,6 @@ static void hv_kexec_handler(void)
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
mb();
cpuhp_remove_state(hyperv_cpuhp_online);
- hyperv_cleanup();
};
static void hv_crash_handler(struct pt_regs *regs)
@@ -2566,7 +2565,6 @@ static void hv_crash_handler(struct pt_regs *regs)
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
hv_synic_disable_regs(cpu);
- hyperv_cleanup();
};
static int hv_synic_suspend(void)
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
index 9b306448b7a0..822c2e74b98d 100644
--- a/drivers/hwmon/amd_energy.c
+++ b/drivers/hwmon/amd_energy.c
@@ -222,7 +222,7 @@ static int amd_create_sensor(struct device *dev,
*/
cpus = num_present_cpus() / num_siblings;
- s_config = devm_kcalloc(dev, cpus + sockets,
+ s_config = devm_kcalloc(dev, cpus + sockets + 1,
sizeof(u32), GFP_KERNEL);
if (!s_config)
return -ENOMEM;
@@ -254,6 +254,7 @@ static int amd_create_sensor(struct device *dev,
scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
}
+ s_config[i] = 0;
return 0;
}
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 777439f48c14..111a91dc6b79 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -334,8 +334,18 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm_value = MAX_PWM;
- /* Set duty cycle to maximum allowed and enable PWM output */
pwm_init_state(ctx->pwm, &state);
+ /*
+ * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
+ * long. Check this here to prevent the fan running at a too low
+ * frequency.
+ */
+ if (state.period > ULONG_MAX / MAX_PWM + 1) {
+ dev_err(dev, "Configured period too big\n");
+ return -EINVAL;
+ }
+
+ /* Set duty cycle to maximum allowed and enable PWM output */
state.duty_cycle = ctx->pwm->args.period - 1;
state.enabled = true;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 52acd77438ed..251e75c9ba9d 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -269,6 +269,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Alder Lake-P */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index 3e7df1c0477f..81d7b21d31ec 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
static int stm_heartbeat_init(void)
{
- int i, ret = -ENOMEM;
+ int i, ret;
if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
return -EINVAL;
@@ -72,8 +72,10 @@ static int stm_heartbeat_init(void)
for (i = 0; i < nr_devs; i++) {
stm_heartbeat[i].data.name =
kasprintf(GFP_KERNEL, "heartbeat.%d", i);
- if (!stm_heartbeat[i].data.name)
+ if (!stm_heartbeat[i].data.name) {
+ ret = -ENOMEM;
goto fail_unregister;
+ }
stm_heartbeat[i].data.nr_chans = 1;
stm_heartbeat[i].data.link = stm_heartbeat_link;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index d4d60ad0eda0..ab1f39ac39f4 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1013,6 +1013,7 @@ config I2C_SIRF
config I2C_SPRD
tristate "Spreadtrum I2C interface"
depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
+ depends on COMMON_CLK
help
If you say yes to this option, support will be included for the
Spreadtrum I2C interface.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ae90713443fa..877fe3733a42 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1449,7 +1449,7 @@ static int i801_add_mux(struct i801_priv *priv)
/* Register GPIO descriptor lookup table */
lookup = devm_kzalloc(dev,
- struct_size(lookup, table, mux_config->n_gpios),
+ struct_size(lookup, table, mux_config->n_gpios + 1),
GFP_KERNEL);
if (!lookup)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b444fbf1a262..a8e8af57e33f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -241,6 +241,19 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = {
};
+static const struct platform_device_id imx_i2c_devtype[] = {
+ {
+ .name = "imx1-i2c",
+ .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata,
+ }, {
+ .name = "imx21-i2c",
+ .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
+
static const struct of_device_id i2c_imx_dt_ids[] = {
{ .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
{ .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
@@ -1330,7 +1343,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
return -ENOMEM;
match = device_get_match_data(&pdev->dev);
- i2c_imx->hwdata = match;
+ if (match)
+ i2c_imx->hwdata = match;
+ else
+ i2c_imx->hwdata = (struct imx_i2c_hwdata *)
+ platform_get_device_id(pdev)->driver_data;
/* Setup i2c_imx driver structure */
strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
@@ -1498,6 +1515,7 @@ static struct platform_driver i2c_imx_driver = {
.of_match_table = i2c_imx_dt_ids,
.acpi_match_table = i2c_imx_acpi_ids,
},
+ .id_table = imx_i2c_devtype,
};
static int __init i2c_adap_imx_init(void)
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 33de99b7bc20..0818d3e50734 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -38,6 +38,7 @@
#define I2C_IO_CONFIG_OPEN_DRAIN 0x0003
#define I2C_IO_CONFIG_PUSH_PULL 0x0000
#define I2C_SOFT_RST 0x0001
+#define I2C_HANDSHAKE_RST 0x0020
#define I2C_FIFO_ADDR_CLR 0x0001
#define I2C_DELAY_LEN 0x0002
#define I2C_TIME_CLR_VALUE 0x0000
@@ -45,6 +46,7 @@
#define I2C_WRRD_TRANAC_VALUE 0x0002
#define I2C_RD_TRANAC_VALUE 0x0001
#define I2C_SCL_MIS_COMP_VALUE 0x0000
+#define I2C_CHN_CLR_FLAG 0x0000
#define I2C_DMA_CON_TX 0x0000
#define I2C_DMA_CON_RX 0x0001
@@ -54,7 +56,9 @@
#define I2C_DMA_START_EN 0x0001
#define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000
+#define I2C_DMA_WARM_RST 0x0001
#define I2C_DMA_HARD_RST 0x0002
+#define I2C_DMA_HANDSHAKE_RST 0x0004
#define MAX_SAMPLE_CNT_DIV 8
#define MAX_STEP_CNT_DIV 64
@@ -475,11 +479,24 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
- writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
- udelay(50);
- writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
-
- mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ if (i2c->dev_comp->dma_sync) {
+ writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST,
+ i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST,
+ OFFSET_SOFTRESET);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET);
+ } else {
+ writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(50);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ }
/* Set ioconfig */
if (i2c->use_push_pull)
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index d9607905dc2f..845eda70b8ca 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+ if (data[i] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
length += data[i];
}
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 19cda6742423..2917fecf6c80 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -72,6 +72,8 @@
/* timeout (ms) for pm runtime autosuspend */
#define SPRD_I2C_PM_TIMEOUT 1000
+/* timeout (ms) for transfer message */
+#define I2C_XFER_TIMEOUT 1000
/* SPRD i2c data structure */
struct sprd_i2c {
@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, bool is_last_msg)
{
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+ unsigned long time_left;
i2c_dev->msg = msg;
i2c_dev->buf = msg->buf;
@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
sprd_i2c_opt_start(i2c_dev);
- wait_for_completion(&i2c_dev->complete);
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ msecs_to_jiffies(I2C_XFER_TIMEOUT));
+ if (!time_left)
+ return -ETIMEDOUT;
return i2c_dev->err;
}
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index ec7a7e917edd..c0c7d01473f2 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -80,7 +80,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
flags &= ~I2C_M_RECV_LEN;
}
- return (flags != 0) ? -EINVAL : 0;
+ return 0;
}
/**
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 6f08c0c3238d..8b113ae32dc7 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -326,6 +326,8 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
/* read back register to make sure that register writes completed */
if (reg != I2C_TX_FIFO)
readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+ else if (i2c_dev->is_vi)
+ readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
}
static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
@@ -339,6 +341,21 @@ static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
}
+static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned int reg, unsigned int len)
+{
+ u32 *data32 = data;
+
+ /*
+ * VI I2C controller has known hardware bug where writes get stuck
+ * when immediate multiple writes happen to TX_FIFO register.
+ * Recommended software work around is to read I2C register after
+ * each write to TX_FIFO register to flush out the data.
+ */
+ while (len--)
+ i2c_writel(i2c_dev, *data32++, reg);
+}
+
static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
unsigned int reg, unsigned int len)
{
@@ -533,7 +550,7 @@ static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev,
void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg);
u32 val;
- if (!i2c_dev->atomic_mode)
+ if (!i2c_dev->atomic_mode && !in_irq())
return readl_relaxed_poll_timeout(addr, val, !(val & mask),
delay_us, timeout_us);
@@ -811,7 +828,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
i2c_dev->msg_buf_remaining = buf_remaining;
i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
- i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ if (i2c_dev->is_vi)
+ i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ else
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
buf += words_to_transfer * BYTES_PER_FIFO_WORD;
}
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 2162bc80f09e..013ad33fbbc8 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -223,7 +223,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
sense_rq->rq_disk = rq->rq_disk;
sense_rq->cmd_flags = REQ_OP_DRV_IN;
ide_req(sense_rq)->type = ATA_PRIV_SENSE;
- sense_rq->rq_flags |= RQF_PREEMPT;
req->cmd[0] = GPCMD_REQUEST_SENSE;
req->cmd[4] = cmd_len;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 1a53c7a75224..4867b67b60d6 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -515,15 +515,10 @@ repeat:
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
- *
- * We let requests forced at head of queue with ide-preempt
- * though. I hope that doesn't happen too much, hopefully not
- * unless the subdriver triggers such a thing in its own PM
- * state machine.
*/
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
ata_pm_request(rq) == 0 &&
- (rq->rq_flags & RQF_PREEMPT) == 0) {
+ (rq->rq_flags & RQF_PM) == 0) {
/* there should be no pending command at this point */
ide_unlock_port(hwif);
goto plug_device;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 192e6c65d34e..82ab308f1aaf 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -77,7 +77,7 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
ide_req(rq)->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d79335506ecd..28f93b9aa51b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -963,6 +963,39 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.enter = NULL }
};
+/*
+ * Note, depending on HW and FW revision, SnowRidge SoC may or may not support
+ * C6, and this is indicated in the CPUID mwait leaf.
+ */
+static struct cpuidle_state snr_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 15,
+ .target_residency = 25,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 130,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static const struct idle_cpu idle_cpu_nehalem __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@@ -1084,6 +1117,12 @@ static const struct idle_cpu idle_cpu_dnv __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_snr __initconst = {
+ .state_table = snr_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
@@ -1122,7 +1161,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_dnv),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
{}
};
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index b11c8c47ba2a..e946903b0993 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
flags, indio_dev->name, indio_dev);
if (ret)
- goto error_kfifo_free;
+ return ret;
indio_dev->setup_ops = setup_ops;
indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
return 0;
-
-error_kfifo_free:
- iio_kfifo_free(indio_dev->buffer);
- return ret;
}
static const char * const chan_name_ain[] = {
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 0507283bd4c1..2dbd2646e44e 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -23,35 +23,31 @@
* @sdata: Sensor data.
*
* returns:
- * 0 - no new samples available
- * 1 - new samples available
- * negative - error or unknown
+ * false - no new samples available or read error
+ * true - new samples available
*/
-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
- struct st_sensor_data *sdata)
+static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
+ struct st_sensor_data *sdata)
{
int ret, status;
/* How would I know if I can't check it? */
if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
- return -EINVAL;
+ return true;
/* No scan mask, no interrupt */
if (!indio_dev->active_scan_mask)
- return 0;
+ return false;
ret = regmap_read(sdata->regmap,
sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
dev_err(sdata->dev, "error checking samples available\n");
- return ret;
+ return false;
}
- if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
- return 1;
-
- return 0;
+ return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
}
/**
@@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
/* Tell the interrupt handler that we're dealing with edges */
if (irq_trig == IRQF_TRIGGER_FALLING ||
- irq_trig == IRQF_TRIGGER_RISING)
+ irq_trig == IRQF_TRIGGER_RISING) {
+ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+ dev_err(&indio_dev->dev,
+ "edge IRQ not supported w/o stat register.\n");
+ err = -EOPNOTSUPP;
+ goto iio_trigger_free;
+ }
sdata->edge_irq = true;
- else
+ } else {
/*
* If we're not using edges (i.e. level interrupts) we
* just mask off the IRQ, handle one interrupt, then
@@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
* interrupt handler top half again and start over.
*/
irq_trig |= IRQF_ONESHOT;
+ }
/*
* If the interrupt pin is Open Drain, by definition this
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 28921b62e642..e9297c25d4ef 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
return ret;
if (pwr_down)
- st->pwr_down_mask |= (1 << chan->channel);
- else
st->pwr_down_mask &= ~(1 << chan->channel);
+ else
+ st->pwr_down_mask |= (1 << chan->channel);
ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index a2f820997afc..37fd0b65a014 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -601,7 +601,7 @@ static int sx9310_read_thresh(struct sx9310_data *data,
return ret;
regval = FIELD_GET(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
- if (regval > ARRAY_SIZE(sx9310_pthresh_codes))
+ if (regval >= ARRAY_SIZE(sx9310_pthresh_codes))
return -EINVAL;
*val = sx9310_pthresh_codes[regval];
@@ -1305,7 +1305,8 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
if (ret)
break;
- pos = min(max(ilog2(pos), 3), 10) - 3;
+ /* Powers of 2, except for a gap between 16 and 64 */
+ pos = clamp(ilog2(pos), 3, 11) - (pos >= 32 ? 4 : 3);
reg_def->def &= ~SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK;
reg_def->def |= FIELD_PREP(SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK,
pos);
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 503fe54a0bb9..608ccb1d8bc8 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
if (ret < 0)
return ret;
+ /*
+ * Give the mlx90632 some time to reset properly before sending a new I2C command
+ * if this is not done, the following I2C command(s) will not be accepted.
+ */
+ usleep_range(150, 200);
+
ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
(MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
(MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 7f70e5a7de10..97a77ea8d3c9 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -131,8 +131,10 @@ static ssize_t default_roce_mode_store(struct config_item *item,
return ret;
gid_type = ib_cache_gid_parse_type_str(buf);
- if (gid_type < 0)
+ if (gid_type < 0) {
+ cma_configfs_params_put(cma_dev);
return -EINVAL;
+ }
ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type);
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index e0a41c867002..ff1551b3cf61 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -254,6 +254,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
} else {
ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
&rt->next_id, GFP_KERNEL);
+ ret = (ret < 0) ? ret : 0;
}
out:
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 7dab9a27a145..da2512c30ffd 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -95,8 +95,6 @@ struct ucma_context {
u64 uid;
struct list_head list;
- /* sync between removal event and id destroy, protected by file mut */
- int destroying;
struct work_struct close_work;
};
@@ -122,7 +120,7 @@ static DEFINE_XARRAY_ALLOC(ctx_table);
static DEFINE_XARRAY_ALLOC(multicast_table);
static const struct file_operations ucma_fops;
-static int __destroy_id(struct ucma_context *ctx);
+static int ucma_destroy_private_ctx(struct ucma_context *ctx);
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
@@ -179,19 +177,14 @@ static void ucma_close_id(struct work_struct *work)
/* once all inflight tasks are finished, we close all underlying
* resources. The context is still alive till its explicit destryoing
- * by its creator.
+ * by its creator. This puts back the xarray's reference.
*/
ucma_put_ctx(ctx);
wait_for_completion(&ctx->comp);
/* No new events will be generated after destroying the id. */
rdma_destroy_id(ctx->cm_id);
- /*
- * At this point ctx->ref is zero so the only place the ctx can be is in
- * a uevent or in __destroy_id(). Since the former doesn't touch
- * ctx->cm_id and the latter sync cancels this, there is no races with
- * this store.
- */
+ /* Reading the cm_id without holding a positive ref is not allowed */
ctx->cm_id = NULL;
}
@@ -204,7 +197,6 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return NULL;
INIT_WORK(&ctx->close_work, ucma_close_id);
- refcount_set(&ctx->ref, 1);
init_completion(&ctx->comp);
/* So list_del() will work if we don't do ucma_finish_ctx() */
INIT_LIST_HEAD(&ctx->list);
@@ -218,6 +210,13 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return ctx;
}
+static void ucma_set_ctx_cm_id(struct ucma_context *ctx,
+ struct rdma_cm_id *cm_id)
+{
+ refcount_set(&ctx->ref, 1);
+ ctx->cm_id = cm_id;
+}
+
static void ucma_finish_ctx(struct ucma_context *ctx)
{
lockdep_assert_held(&ctx->file->mut);
@@ -303,7 +302,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
ctx = ucma_alloc_ctx(listen_ctx->file);
if (!ctx)
goto err_backlog;
- ctx->cm_id = cm_id;
+ ucma_set_ctx_cm_id(ctx, cm_id);
uevent = ucma_create_uevent(listen_ctx, event);
if (!uevent)
@@ -321,8 +320,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
return 0;
err_alloc:
- xa_erase(&ctx_table, ctx->id);
- kfree(ctx);
+ ucma_destroy_private_ctx(ctx);
err_backlog:
atomic_inc(&listen_ctx->backlog);
/* Returning error causes the new ID to be destroyed */
@@ -356,8 +354,12 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
wake_up_interruptible(&ctx->file->poll_wait);
}
- if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying)
- queue_work(system_unbound_wq, &ctx->close_work);
+ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
+ xa_lock(&ctx_table);
+ if (xa_load(&ctx_table, ctx->id) == ctx)
+ queue_work(system_unbound_wq, &ctx->close_work);
+ xa_unlock(&ctx_table);
+ }
return 0;
}
@@ -461,13 +463,12 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
ret = PTR_ERR(cm_id);
goto err1;
}
- ctx->cm_id = cm_id;
+ ucma_set_ctx_cm_id(ctx, cm_id);
resp.id = ctx->id;
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp))) {
- xa_erase(&ctx_table, ctx->id);
- __destroy_id(ctx);
+ ucma_destroy_private_ctx(ctx);
return -EFAULT;
}
@@ -477,8 +478,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
return 0;
err1:
- xa_erase(&ctx_table, ctx->id);
- kfree(ctx);
+ ucma_destroy_private_ctx(ctx);
return ret;
}
@@ -516,68 +516,73 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
rdma_unlock_handler(mc->ctx->cm_id);
}
-/*
- * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
- * this point, no new events will be reported from the hardware. However, we
- * still need to cleanup the UCMA context for this ID. Specifically, there
- * might be events that have not yet been consumed by the user space software.
- * mutex. After that we release them as needed.
- */
-static int ucma_free_ctx(struct ucma_context *ctx)
+static int ucma_cleanup_ctx_events(struct ucma_context *ctx)
{
int events_reported;
struct ucma_event *uevent, *tmp;
LIST_HEAD(list);
- ucma_cleanup_multicast(ctx);
-
- /* Cleanup events not yet reported to the user. */
+ /* Cleanup events not yet reported to the user.*/
mutex_lock(&ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
- if (uevent->ctx == ctx || uevent->conn_req_ctx == ctx)
+ if (uevent->ctx != ctx)
+ continue;
+
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
+ xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id,
+ uevent->conn_req_ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) == uevent->conn_req_ctx) {
list_move_tail(&uevent->list, &list);
+ continue;
+ }
+ list_del(&uevent->list);
+ kfree(uevent);
}
list_del(&ctx->list);
events_reported = ctx->events_reported;
mutex_unlock(&ctx->file->mut);
/*
- * If this was a listening ID then any connections spawned from it
- * that have not been delivered to userspace are cleaned up too.
- * Must be done outside any locks.
+ * If this was a listening ID then any connections spawned from it that
+ * have not been delivered to userspace are cleaned up too. Must be done
+ * outside any locks.
*/
list_for_each_entry_safe(uevent, tmp, &list, list) {
- list_del(&uevent->list);
- if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
- uevent->conn_req_ctx != ctx)
- __destroy_id(uevent->conn_req_ctx);
+ ucma_destroy_private_ctx(uevent->conn_req_ctx);
kfree(uevent);
}
-
- mutex_destroy(&ctx->mutex);
- kfree(ctx);
return events_reported;
}
-static int __destroy_id(struct ucma_context *ctx)
+/*
+ * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie
+ * the ctx is not public to the user). This either because:
+ * - ucma_finish_ctx() hasn't been called
+ * - xa_cmpxchg() succeed to remove the entry (only one thread can succeed)
+ */
+static int ucma_destroy_private_ctx(struct ucma_context *ctx)
{
+ int events_reported;
+
/*
- * If the refcount is already 0 then ucma_close_id() has already
- * destroyed the cm_id, otherwise holding the refcount keeps cm_id
- * valid. Prevent queue_work() from being called.
+ * Destroy the underlying cm_id. New work queuing is prevented now by
+ * the removal from the xarray. Once the work is cancled ref will either
+ * be 0 because the work ran to completion and consumed the ref from the
+ * xarray, or it will be positive because we still have the ref from the
+ * xarray. This can also be 0 in cases where cm_id was never set
*/
- if (refcount_inc_not_zero(&ctx->ref)) {
- rdma_lock_handler(ctx->cm_id);
- ctx->destroying = 1;
- rdma_unlock_handler(ctx->cm_id);
- ucma_put_ctx(ctx);
- }
-
cancel_work_sync(&ctx->close_work);
- /* At this point it's guaranteed that there is no inflight closing task */
- if (ctx->cm_id)
+ if (refcount_read(&ctx->ref))
ucma_close_id(&ctx->close_work);
- return ucma_free_ctx(ctx);
+
+ events_reported = ucma_cleanup_ctx_events(ctx);
+ ucma_cleanup_multicast(ctx);
+
+ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL,
+ GFP_KERNEL) != NULL);
+ mutex_destroy(&ctx->mutex);
+ kfree(ctx);
+ return events_reported;
}
static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
@@ -596,14 +601,17 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
xa_lock(&ctx_table);
ctx = _ucma_find_context(cmd.id, file);
- if (!IS_ERR(ctx))
- __xa_erase(&ctx_table, ctx->id);
+ if (!IS_ERR(ctx)) {
+ if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) != ctx)
+ ctx = ERR_PTR(-ENOENT);
+ }
xa_unlock(&ctx_table);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- resp.events_reported = __destroy_id(ctx);
+ resp.events_reported = ucma_destroy_private_ctx(ctx);
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
@@ -1777,15 +1785,16 @@ static int ucma_close(struct inode *inode, struct file *filp)
* prevented by this being a FD release function. The list_add_tail() in
* ucma_connect_event_handler() can run concurrently, however it only
* adds to the list *after* a listening ID. By only reading the first of
- * the list, and relying on __destroy_id() to block
+ * the list, and relying on ucma_destroy_private_ctx() to block
* ucma_connect_event_handler(), no additional locking is needed.
*/
while (!list_empty(&file->ctx_list)) {
struct ucma_context *ctx = list_first_entry(
&file->ctx_list, struct ucma_context, list);
- xa_erase(&ctx_table, ctx->id);
- __destroy_id(ctx);
+ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) != ctx);
+ ucma_destroy_private_ctx(ctx);
}
kfree(file);
return 0;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 7ca4112e3e8f..917338db7ac1 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -135,7 +135,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
*/
if (mask)
pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
- return rounddown_pow_of_two(pgsz_bitmap);
+ return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
}
EXPORT_SYMBOL(ib_umem_find_best_pgsz);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index a7401398cb34..d109bb3822a5 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2474,7 +2474,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 55d538625e36..ad8253245a85 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -532,7 +532,7 @@ struct hns_roce_qp_table {
struct hns_roce_hem_table sccc_table;
struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
- spinlock_t bank_lock;
+ struct mutex bank_mutex;
};
struct hns_roce_cq_table {
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index d8e2fe5558d2..1116371adf74 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -209,7 +209,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hr_qp->doorbell_qpn = 1;
} else {
- spin_lock(&qp_table->bank_lock);
+ mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
@@ -217,12 +217,12 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to alloc QPN, ret = %d\n", ret);
- spin_unlock(&qp_table->bank_lock);
+ mutex_unlock(&qp_table->bank_mutex);
return ret;
}
qp_table->bank[bankid].inuse++;
- spin_unlock(&qp_table->bank_lock);
+ mutex_unlock(&qp_table->bank_mutex);
hr_qp->doorbell_qpn = (u32)num;
}
@@ -408,9 +408,9 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
- spin_lock(&hr_dev->qp_table.bank_lock);
+ mutex_lock(&hr_dev->qp_table.bank_mutex);
hr_dev->qp_table.bank[bankid].inuse--;
- spin_unlock(&hr_dev->qp_table.bank_lock);
+ mutex_unlock(&hr_dev->qp_table.bank_mutex);
}
static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
@@ -1371,6 +1371,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
unsigned int i;
mutex_init(&qp_table->scc_mutex);
+ mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa);
reserved_from_bot = hr_dev->caps.reserved_qps;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3bae9ba0ead8..aabdc07e4753 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3311,8 +3311,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
@@ -3324,8 +3323,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
- unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
@@ -3956,7 +3954,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
err = set_has_smi_cap(dev);
if (err)
- return err;
+ goto err_mp;
if (!mlx5_core_mp_enabled(mdev)) {
for (i = 1; i <= dev->num_ports; i++) {
@@ -4319,7 +4317,7 @@ static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
return err;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index bc98bd950d99..3acb5c10b155 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -434,9 +434,9 @@ static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
__func__, dev->id, pd->id);
}
- kfree(uctx->cntxt_pd);
uctx->cntxt_pd = NULL;
_ocrdma_dealloc_pd(dev, pd);
+ kfree(pd);
}
static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index e59615a4c9d9..586b0e52ba7f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -214,7 +214,7 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
struct usnic_vnic_res *vnic_res;
int len;
- len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
+ len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu",
qp_grp->ibqp.qp_num,
usnic_ib_qp_grp_state_to_string(qp_grp->state),
qp_grp->owner_pid,
@@ -224,14 +224,13 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
res_chunk = qp_grp->res_chunk_list[i];
for (j = 0; j < res_chunk->cnt; j++) {
vnic_res = res_chunk->res[j];
- len += sysfs_emit_at(
- buf, len, "%s[%d] ",
+ len += sysfs_emit_at(buf, len, " %s[%d]",
usnic_vnic_res_type_to_str(vnic_res->type),
vnic_res->vnic_idx);
}
}
- len = sysfs_emit_at(buf, len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 38a37770c016..3705c6b8b223 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -214,6 +214,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
}
usnic_uiom_free_dev_list(dev_list);
+ dev_list = NULL;
}
/* Try to find resources on an unused vf */
@@ -239,6 +240,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
qp_grp_check:
if (IS_ERR_OR_NULL(qp_grp)) {
usnic_err("Failed to allocate qp_grp\n");
+ if (usnic_ib_share_vf)
+ usnic_uiom_free_dev_list(dev_list);
return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
}
return qp_grp;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index c142f5e7f25f..de57f2fed743 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags)
return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
}
+static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
+{
+ switch (type) {
+ case PVRDMA_NETWORK_ROCE_V1:
+ return RDMA_NETWORK_ROCE_V1;
+ case PVRDMA_NETWORK_IPV4:
+ return RDMA_NETWORK_IPV4;
+ case PVRDMA_NETWORK_IPV6:
+ return RDMA_NETWORK_IPV6;
+ default:
+ return RDMA_NETWORK_IPV6;
+ }
+}
+
void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
const struct pvrdma_qp_cap *src);
void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index a119ac3e103c..6aa40bd2fd52 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -367,7 +367,7 @@ retry:
wc->dlid_path_bits = cqe->dlid_path_bits;
wc->port_num = cqe->port_num;
wc->vendor_err = cqe->vendor_err;
- wc->network_hdr_type = cqe->network_hdr_type;
+ wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
/* Update shared ring state */
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c4b06ced30a7..943914c2a50c 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -8,6 +8,7 @@
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/if.h>
+#include <linux/if_vlan.h>
#include <net/udp_tunnel.h>
#include <net/sch_generic.h>
#include <linux/netfilter.h>
@@ -153,9 +154,14 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct udphdr *udph;
struct net_device *ndev = skb->dev;
+ struct net_device *rdev = ndev;
struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ if (!rxe && is_vlan_dev(rdev)) {
+ rdev = vlan_dev_real_dev(ndev);
+ rxe = rxe_get_dev_from_net(rdev);
+ }
if (!rxe)
goto drop;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5a098083a9d2..c7e3b6a4af38 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -872,6 +872,11 @@ static enum resp_states do_complete(struct rxe_qp *qp,
else
wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ if (is_vlan_dev(skb->dev)) {
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ wc->vlan_id = vlan_dev_vlan_id(skb->dev);
+ }
+
if (pkt->mask & RXE_IMMDT_MASK) {
wc->wc_flags |= IB_WC_WITH_IMM;
wc->ex.imm_data = immdt_imm(pkt);
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index 41dba7090c2a..c770951a909c 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -96,9 +96,10 @@ static int imx_icc_node_init_qos(struct icc_provider *provider,
return -ENODEV;
}
/* Allow scaling to be disabled on a per-node basis */
- if (!dn || !of_device_is_available(dn)) {
+ if (!of_device_is_available(dn)) {
dev_warn(dev, "Missing property %s, skip scaling %s\n",
adj->phandle_name, node->name);
+ of_node_put(dn);
return 0;
}
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
index ba43a15aefec..d7768d3c6d8a 100644
--- a/drivers/interconnect/imx/imx8mq.c
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/interconnect-provider.h>
#include <dt-bindings/interconnect/imx8mq.h>
#include "imx.h"
@@ -94,6 +95,7 @@ static struct platform_driver imx8mq_icc_driver = {
.remove = imx8mq_icc_remove,
.driver = {
.name = "imx8mq-interconnect",
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index a8f93ba265f8..b3fb5b02bcf1 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -42,13 +42,23 @@ config INTERCONNECT_QCOM_QCS404
This is a driver for the Qualcomm Network-on-Chip on qcs404-based
platforms.
+config INTERCONNECT_QCOM_RPMH_POSSIBLE
+ tristate
+ default INTERCONNECT_QCOM
+ depends on QCOM_RPMH || (COMPILE_TEST && !QCOM_RPMH)
+ depends on QCOM_COMMAND_DB || (COMPILE_TEST && !QCOM_COMMAND_DB)
+ depends on OF || COMPILE_TEST
+ help
+ Compile-testing RPMH drivers is possible on other platforms,
+ but in order to avoid link failures, drivers must not be built-in
+ when QCOM_RPMH or QCOM_COMMAND_DB are loadable modules
+
config INTERCONNECT_QCOM_RPMH
tristate
config INTERCONNECT_QCOM_SC7180
tristate "Qualcomm SC7180 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -57,8 +67,7 @@ config INTERCONNECT_QCOM_SC7180
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -67,8 +76,7 @@ config INTERCONNECT_QCOM_SDM845
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -77,8 +85,7 @@ config INTERCONNECT_QCOM_SM8150
config INTERCONNECT_QCOM_SM8250
tristate "Qualcomm SM8250 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index f54cd79b43e4..6a1f7048dacc 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1973,8 +1973,6 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
return r;
}
- iommu->int_enabled = true;
-
return 0;
}
@@ -2169,6 +2167,7 @@ static int iommu_init_irq(struct amd_iommu *iommu)
if (ret)
return ret;
+ iommu->int_enabled = true;
enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 7e2c445a1fae..f0adbc48fd17 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3854,6 +3854,9 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
struct amd_iommu *iommu;
int devid = -1;
+ if (!amd_iommu_irq_remap)
+ return 0;
+
if (x86_fwspec_is_ioapic(fwspec))
devid = get_ioapic_devid(fwspec->param[0]);
else if (x86_fwspec_is_hpet(fwspec))
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 5dff7ffbef11..bcda17012aee 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -196,6 +196,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
set_bit(qsmmu->bypass_cbndx, smmu->context_map);
+ arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
+
reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
}
@@ -323,7 +325,9 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
}
static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
+ { .compatible = "qcom,msm8998-smmu-v2" },
{ .compatible = "qcom,sc7180-smmu-500" },
+ { .compatible = "qcom,sdm630-smmu-v2" },
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f0305e6aac1b..4078358ed66e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -863,33 +863,6 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0;
- /*
- * The Intel graphic driver is used to assume that the returned
- * sg list is not combound. This blocks the efforts of converting
- * Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
- * device driver work and should be removed once it's fixed in i915
- * driver.
- */
- if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
- to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
- (to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- for_each_sg(sg, s, nents, i) {
- unsigned int s_iova_off = sg_dma_address(s);
- unsigned int s_length = sg_dma_len(s);
- unsigned int s_iova_len = s->length;
-
- s->offset += s_iova_off;
- s->length = s_length;
- sg_dma_address(s) = dma_addr + s_iova_off;
- sg_dma_len(s) = s_length;
- dma_addr += s_iova_len;
-
- pr_info_once("sg combining disabled due to i915 driver\n");
- }
-
- return nents;
- }
-
for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
unsigned int s_iova_off = sg_dma_address(s);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index b46dbfa6d0ed..004feaed3c72 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
int mask = ilog2(__roundup_pow_of_two(npages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
- if (WARN_ON_ONCE(!ALIGN(addr, align)))
- addr &= ~(align - 1);
+ if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
+ addr = ALIGN_DOWN(addr, align);
desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) |
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 788119c5b021..f665322a0991 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -38,7 +38,6 @@
#include <linux/dmi.h>
#include <linux/pci-ats.h>
#include <linux/memblock.h>
-#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
#include <linux/crash_dump.h>
#include <linux/numa.h>
@@ -719,6 +718,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
+static void domain_update_iotlb(struct dmar_domain *domain);
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
@@ -744,6 +745,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
+
+ domain_update_iotlb(domain);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -1464,17 +1467,22 @@ static void domain_update_iotlb(struct dmar_domain *domain)
assert_spin_locked(&device_domain_lock);
- list_for_each_entry(info, &domain->devices, link) {
- struct pci_dev *pdev;
-
- if (!info->dev || !dev_is_pci(info->dev))
- continue;
-
- pdev = to_pci_dev(info->dev);
- if (pdev->ats_enabled) {
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
+
+ if (!has_iotlb_device) {
+ struct subdev_domain_info *sinfo;
+
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ if (info && info->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
}
domain->has_iotlb_device = has_iotlb_device;
@@ -1555,25 +1563,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
#endif
}
+static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
+ u64 addr, unsigned int mask)
+{
+ u16 sid, qdep;
+
+ if (!info || !info->ats_enabled)
+ return;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = info->ats_qdep;
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
+}
+
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
- u16 sid, qdep;
unsigned long flags;
struct device_domain_info *info;
+ struct subdev_domain_info *sinfo;
if (!domain->has_iotlb_device)
return;
spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (!info->ats_enabled)
- continue;
+ list_for_each_entry(info, &domain->devices, link)
+ __iommu_flush_dev_iotlb(info, addr, mask);
- sid = info->bus << 8 | info->devfn;
- qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, addr, mask);
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ __iommu_flush_dev_iotlb(info, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -1877,6 +1897,7 @@ static struct dmar_domain *alloc_domain(int flags)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ INIT_LIST_HEAD(&domain->subdevices);
return domain;
}
@@ -2547,7 +2568,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->iommu = iommu;
info->pasid_table = NULL;
info->auxd_enabled = 0;
- INIT_LIST_HEAD(&info->auxiliary_domains);
+ INIT_LIST_HEAD(&info->subdevices);
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -4475,33 +4496,61 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
domain->type == IOMMU_DOMAIN_UNMANAGED;
}
-static void auxiliary_link_device(struct dmar_domain *domain,
- struct device *dev)
+static inline struct subdev_domain_info *
+lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
+{
+ struct subdev_domain_info *sinfo;
+
+ if (!list_empty(&domain->subdevices)) {
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ if (sinfo->pdev == dev)
+ return sinfo;
+ }
+ }
+
+ return NULL;
+}
+
+static int auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info))
- return;
+ return -EINVAL;
- domain->auxd_refcnt++;
- list_add(&domain->auxd, &info->auxiliary_domains);
+ if (!sinfo) {
+ sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+ sinfo->domain = domain;
+ sinfo->pdev = dev;
+ list_add(&sinfo->link_phys, &info->subdevices);
+ list_add(&sinfo->link_domain, &domain->subdevices);
+ }
+
+ return ++sinfo->users;
}
-static void auxiliary_unlink_device(struct dmar_domain *domain,
- struct device *dev)
+static int auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
+ int ret;
assert_spin_locked(&device_domain_lock);
- if (WARN_ON(!info))
- return;
+ if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
+ return -EINVAL;
- list_del(&domain->auxd);
- domain->auxd_refcnt--;
+ ret = --sinfo->users;
+ if (!ret) {
+ list_del(&sinfo->link_phys);
+ list_del(&sinfo->link_domain);
+ kfree(sinfo);
+ }
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
+ return ret;
}
static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -4530,6 +4579,19 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
}
spin_lock_irqsave(&device_domain_lock, flags);
+ ret = auxiliary_link_device(domain, dev);
+ if (ret <= 0)
+ goto link_failed;
+
+ /*
+ * Subdevices from the same physical device can be attached to the
+ * same domain. For such cases, only the first subdevice attachment
+ * needs to go through the full steps in this function. So if ret >
+ * 1, just goto out.
+ */
+ if (ret > 1)
+ goto out;
+
/*
* iommu->lock must be held to attach domain to iommu and setup the
* pasid entry for second level translation.
@@ -4548,10 +4610,9 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
domain->default_pasid);
if (ret)
goto table_failed;
- spin_unlock(&iommu->lock);
-
- auxiliary_link_device(domain, dev);
+ spin_unlock(&iommu->lock);
+out:
spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
@@ -4560,8 +4621,10 @@ table_failed:
domain_detach_iommu(domain, iommu);
attach_failed:
spin_unlock(&iommu->lock);
+ auxiliary_unlink_device(domain, dev);
+link_failed:
spin_unlock_irqrestore(&device_domain_lock, flags);
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
ioasid_put(domain->default_pasid);
return ret;
@@ -4581,14 +4644,18 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
info = get_domain_info(dev);
iommu = info->iommu;
- auxiliary_unlink_device(domain, dev);
-
- spin_lock(&iommu->lock);
- intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
- domain_detach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
+ if (!auxiliary_unlink_device(domain, dev)) {
+ spin_lock(&iommu->lock);
+ intel_pasid_tear_down_entry(iommu, dev,
+ domain->default_pasid, false);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+ }
spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
+ ioasid_put(domain->default_pasid);
}
static int prepare_domain_attach_device(struct iommu_domain *domain,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index aeffda92b10b..685200a5cff0 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1353,6 +1353,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data = irq_domain_get_irq_data(domain, virq + i);
irq_cfg = irqd_cfg(irq_data);
if (!irq_data || !irq_cfg) {
+ if (!i)
+ kfree(data);
ret = -EINVAL;
goto out_free_data;
}
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 4fa248b98031..18a9f05df407 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -118,8 +118,10 @@ void intel_svm_check(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_SVM_CAPABLE;
}
-static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
- unsigned long address, unsigned long pages, int ih)
+static void __flush_svm_range_dev(struct intel_svm *svm,
+ struct intel_svm_dev *sdev,
+ unsigned long address,
+ unsigned long pages, int ih)
{
struct qi_desc desc;
@@ -142,7 +144,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ qi_submit_sync(sdev->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@@ -166,7 +168,23 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ qi_submit_sync(sdev->iommu, &desc, 1, 0);
+ }
+}
+
+static void intel_flush_svm_range_dev(struct intel_svm *svm,
+ struct intel_svm_dev *sdev,
+ unsigned long address,
+ unsigned long pages, int ih)
+{
+ unsigned long shift = ilog2(__roundup_pow_of_two(pages));
+ unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
+ unsigned long start = ALIGN_DOWN(address, align);
+ unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
+
+ while (start < end) {
+ __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
+ start += align;
}
}
@@ -211,7 +229,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+ intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
svm->pasid, true);
rcu_read_unlock();
@@ -281,6 +299,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct dmar_domain *dmar_domain;
struct device_domain_info *info;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int ret = 0;
if (WARN_ON(!iommu) || !data)
@@ -363,6 +382,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
}
sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ sdev->iommu = iommu;
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@@ -381,12 +401,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
* each bind of a new device even with an existing PASID, we need to
* call the nested mode setup function here.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
data->hpasid, ret);
@@ -486,6 +506,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int pasid_max;
int ret;
@@ -546,6 +567,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
goto out;
}
sdev->dev = dev;
+ sdev->iommu = iommu;
ret = intel_iommu_enable_pasid(iommu, dev);
if (ret) {
@@ -575,7 +597,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
kfree(sdev);
goto out;
}
- svm->iommu = iommu;
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;
@@ -605,14 +626,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
}
}
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
@@ -632,14 +653,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
* Binding a new device with existing PASID, need to setup
* the PASID entry.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
kfree(sdev);
goto out;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 4bb3293ae4d7..d20b8b333d30 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -358,7 +358,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
* @iovad: - iova domain in question.
* @pfn: - page frame number
* This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
+ * given domain which matches the given pfn.
*/
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
{
@@ -601,7 +601,7 @@ void queue_iova(struct iova_domain *iovad,
EXPORT_SYMBOL_GPL(queue_iova);
/**
- * put_iova_domain - destroys the iova doamin
+ * put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question.
* All the iova's in that domain are destroyed.
*/
@@ -712,9 +712,9 @@ EXPORT_SYMBOL_GPL(reserve_iova);
/**
* copy_reserved_iova - copies the reserved between domains
- * @from: - source doamin from where to copy
+ * @from: - source domain from where to copy
* @to: - destination domin where to copy
- * This function copies reserved iova's from one doamin to
+ * This function copies reserved iova's from one domain to
* other.
*/
void
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 94920a51c628..b147f22a78f4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -493,8 +493,9 @@ config TI_SCI_INTA_IRQCHIP
TI System Controller, say Y here. Otherwise, say N.
config TI_PRUSS_INTC
- tristate "TI PRU-ICSS Interrupt Controller"
- depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ tristate
+ depends on TI_PRUSS
+ default TI_PRUSS
select IRQ_DOMAIN
help
This enables support for the PRU-ICSS Local Interrupt Controller
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 5f5eb8877c41..25c9a9c06e41 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -167,7 +167,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
{
int cpu = smp_processor_id();
@@ -195,7 +195,7 @@ static struct irq_chip bcm2836_arm_irqchip_ipi = {
.name = "IPI",
.irq_mask = bcm2836_arm_irqchip_dummy_op,
.irq_unmask = bcm2836_arm_irqchip_dummy_op,
- .irq_eoi = bcm2836_arm_irqchip_ipi_eoi,
+ .irq_ack = bcm2836_arm_irqchip_ipi_ack,
.ipi_send_mask = bcm2836_arm_irqchip_ipi_send_mask,
};
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 9ed1bc473663..09b91b81851c 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -142,8 +142,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
-int __init liointc_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init liointc_of_init(struct device_node *node,
+ struct device_node *parent)
{
struct irq_chip_generic *gc;
struct irq_domain *domain;
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 95d4fd8f7a96..0bbb0b2d0dd5 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
+ ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+ &mips_mt_cpu_irq_controller,
+ NULL);
+
+ if (ret)
+ return ret;
+
ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
if (ret)
return ret;
diff --git a/drivers/irqchip/irq-sl28cpld.c b/drivers/irqchip/irq-sl28cpld.c
index 0aa50d025ef6..fbb354413ffa 100644
--- a/drivers/irqchip/irq-sl28cpld.c
+++ b/drivers/irqchip/irq-sl28cpld.c
@@ -66,7 +66,7 @@ static int sl28cpld_intc_probe(struct platform_device *pdev)
irqchip->chip.num_regs = 1;
irqchip->chip.status_base = base + INTC_IP;
irqchip->chip.mask_base = base + INTC_IE;
- irqchip->chip.mask_invert = true,
+ irqchip->chip.mask_invert = true;
irqchip->chip.ack_base = base + INTC_IP;
return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
index 26cf0ac9c4ad..c9a53c222472 100644
--- a/drivers/isdn/mISDN/Kconfig
+++ b/drivers/isdn/mISDN/Kconfig
@@ -13,6 +13,7 @@ if MISDN != n
config MISDN_DSP
tristate "Digital Audio Processing of transparent data"
depends on MISDN
+ select BITREVERSE
help
Enable support for digital audio processing capability.
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 8f39f9ba5c80..4c2ce210c123 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -19,6 +19,7 @@ if NVM
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
+ select CRC32
help
Allows an open-channel SSD to be exposed as a block device to the
host. The target assumes the device exposes raw flash and must be
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index c1bcac71008c..28ddcaa5358b 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -844,11 +844,10 @@ static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
ret = nvm_submit_io_sync_raw(dev, &rqd);
+ __free_page(page);
if (ret)
return ret;
- __free_page(page);
-
return rqd.error;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b7e2d9666614..9e44c09f6410 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -605,6 +605,7 @@ config DM_INTEGRITY
select BLK_DEV_INTEGRITY
select DM_BUFIO
select CRYPTO
+ select CRYPTO_SKCIPHER
select ASYNC_XOR
help
This device-mapper target emulates a block device that has
@@ -622,6 +623,7 @@ config DM_ZONED
tristate "Drive-managed zoned block device target support"
depends on BLK_DEV_DM
depends on BLK_DEV_ZONED
+ select CRC32
help
This device-mapper target takes a host-managed or host-aware zoned
block device and exposes most of its capacity as a regular block
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
index 6469223f0b77..d636b7b2d070 100644
--- a/drivers/md/bcache/features.c
+++ b/drivers/md/bcache/features.c
@@ -17,7 +17,7 @@ struct feature {
};
static struct feature feature_list[] = {
- {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
+ {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
"large_bucket"},
{0, 0, 0 },
};
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
index a1653c478041..84fc2c0f0101 100644
--- a/drivers/md/bcache/features.h
+++ b/drivers/md/bcache/features.h
@@ -13,11 +13,15 @@
/* Feature set definition */
/* Incompat feature set */
-#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */
+/* 32bit bucket size, obsoleted */
+#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001
+/* real bucket size is (1 << bucket_size) */
+#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002
-#define BCH_FEATURE_COMPAT_SUUP 0
-#define BCH_FEATURE_RO_COMPAT_SUUP 0
-#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET
+#define BCH_FEATURE_COMPAT_SUPP 0
+#define BCH_FEATURE_RO_COMPAT_SUPP 0
+#define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
+ BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
((sb)->feature_compat & (mask))
@@ -77,7 +81,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
~BCH##_FEATURE_INCOMPAT_##flagname; \
}
-BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
+
+static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
+}
int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a4752ac410dc..2047a9cccdb5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -64,9 +64,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
{
unsigned int bucket_size = le16_to_cpu(s->bucket_size);
- if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
- bch_has_feature_large_bucket(sb))
- bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
+ if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+ if (bch_has_feature_large_bucket(sb)) {
+ unsigned int max, order;
+
+ max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
+ order = le16_to_cpu(s->bucket_size);
+ /*
+ * bcache tool will make sure the overflow won't
+ * happen, an error message here is enough.
+ */
+ if (order > max)
+ pr_err("Bucket size (1 << %u) overflows\n",
+ order);
+ bucket_size = 1 << order;
+ } else if (bch_has_feature_obso_large_bucket(sb)) {
+ bucket_size +=
+ le16_to_cpu(s->obso_bucket_size_hi) << 16;
+ }
+ }
return bucket_size;
}
@@ -228,6 +244,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->feature_compat = le64_to_cpu(s->feature_compat);
sb->feature_incompat = le64_to_cpu(s->feature_incompat);
sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
+
+ /* Check incompatible features */
+ err = "Unsupported compatible feature found";
+ if (bch_has_unknown_compat_features(sb))
+ goto err;
+
+ err = "Unsupported read-only compatible feature found";
+ if (bch_has_unknown_ro_compat_features(sb))
+ goto err;
+
+ err = "Unsupported incompatible feature found";
+ if (bch_has_unknown_incompat_features(sb))
+ goto err;
+
err = read_super_common(sb, bdev, s);
if (err)
goto err;
@@ -1302,6 +1332,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
+ if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(dc->disk.disk, 1);
+ }
+
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
@@ -1524,6 +1560,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_link(d, c, "volume");
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(d->disk, 1);
+ }
+
return 0;
err:
kobject_put(&d->kobj);
@@ -2083,6 +2125,9 @@ static int run_cache_set(struct cache_set *c)
c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c);
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb))
+ pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
+
list_for_each_entry_safe(dc, t, &uncached_devices, list)
bch_cached_dev_attach(dc, c, NULL);
@@ -2644,8 +2689,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
}
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
- char *pdev_set_uuid = pdev->dc->sb.set_uuid;
char *set_uuid = c->set_uuid;
if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9c1a86bde658..fce4cbf9529d 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1534,6 +1534,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
}
EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
+{
+ return c->dm_io;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
+
sector_t dm_bufio_get_block_number(struct dm_buffer *b)
{
return b->block;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 5f9f9b3a226d..5a55617a08e6 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1454,13 +1454,16 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
static void kcryptd_async_done(struct crypto_async_request *async_req,
int error);
-static void crypt_alloc_req_skcipher(struct crypt_config *cc,
+static int crypt_alloc_req_skcipher(struct crypt_config *cc,
struct convert_context *ctx)
{
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
- if (!ctx->r.req)
- ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
+ if (!ctx->r.req) {
+ ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+ if (!ctx->r.req)
+ return -ENOMEM;
+ }
skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
@@ -1471,13 +1474,18 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
skcipher_request_set_callback(ctx->r.req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
+
+ return 0;
}
-static void crypt_alloc_req_aead(struct crypt_config *cc,
+static int crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
- if (!ctx->r.req_aead)
- ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
+ if (!ctx->r.req_aead) {
+ ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+ if (!ctx->r.req_aead)
+ return -ENOMEM;
+ }
aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
@@ -1488,15 +1496,17 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
aead_request_set_callback(ctx->r.req_aead,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
+
+ return 0;
}
-static void crypt_alloc_req(struct crypt_config *cc,
+static int crypt_alloc_req(struct crypt_config *cc,
struct convert_context *ctx)
{
if (crypt_integrity_aead(cc))
- crypt_alloc_req_aead(cc, ctx);
+ return crypt_alloc_req_aead(cc, ctx);
else
- crypt_alloc_req_skcipher(cc, ctx);
+ return crypt_alloc_req_skcipher(cc, ctx);
}
static void crypt_free_req_skcipher(struct crypt_config *cc,
@@ -1529,17 +1539,28 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
* Encrypt / decrypt data from one bio to another one (can be the same one)
*/
static blk_status_t crypt_convert(struct crypt_config *cc,
- struct convert_context *ctx, bool atomic)
+ struct convert_context *ctx, bool atomic, bool reset_pending)
{
unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r;
- atomic_set(&ctx->cc_pending, 1);
+ /*
+ * if reset_pending is set we are dealing with the bio for the first time,
+ * else we're continuing to work on the previous bio, so don't mess with
+ * the cc_pending counter
+ */
+ if (reset_pending)
+ atomic_set(&ctx->cc_pending, 1);
while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
- crypt_alloc_req(cc, ctx);
+ r = crypt_alloc_req(cc, ctx);
+ if (r) {
+ complete(&ctx->restart);
+ return BLK_STS_DEV_RESOURCE;
+ }
+
atomic_inc(&ctx->cc_pending);
if (crypt_integrity_aead(cc))
@@ -1553,7 +1574,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
* but the driver request queue is full, let's wait.
*/
case -EBUSY:
- wait_for_completion(&ctx->restart);
+ if (in_interrupt()) {
+ if (try_wait_for_completion(&ctx->restart)) {
+ /*
+ * we don't have to block to wait for completion,
+ * so proceed
+ */
+ } else {
+ /*
+ * we can't wait for completion without blocking
+ * exit and continue processing in a workqueue
+ */
+ ctx->r.req = NULL;
+ ctx->cc_sector += sector_step;
+ tag_offset++;
+ return BLK_STS_DEV_RESOURCE;
+ }
+ } else {
+ wait_for_completion(&ctx->restart);
+ }
reinit_completion(&ctx->restart);
fallthrough;
/*
@@ -1691,6 +1730,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}
+static void kcryptd_io_bio_endio(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ bio_endio(io->base_bio);
+}
+
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
@@ -1713,7 +1758,23 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
kfree(io->integrity_metadata);
base_bio->bi_status = error;
- bio_endio(base_bio);
+
+ /*
+ * If we are running this function from our tasklet,
+ * we can't call bio_endio() here, because it will call
+ * clone_endio() from dm.c, which in turn will
+ * free the current struct dm_crypt_io structure with
+ * our tasklet. In this case we need to delay bio_endio()
+ * execution to after the tasklet is done and dequeued.
+ */
+ if (tasklet_trylock(&io->tasklet)) {
+ tasklet_unlock(&io->tasklet);
+ bio_endio(base_bio);
+ return;
+ }
+
+ INIT_WORK(&io->work, kcryptd_io_bio_endio);
+ queue_work(cc->io_queue, &io->work);
}
/*
@@ -1945,6 +2006,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
}
}
+static void kcryptd_crypt_write_continue(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ struct crypt_config *cc = io->cc;
+ struct convert_context *ctx = &io->ctx;
+ int crypt_finished;
+ sector_t sector = io->sector;
+ blk_status_t r;
+
+ wait_for_completion(&ctx->restart);
+ reinit_completion(&ctx->restart);
+
+ r = crypt_convert(cc, &io->ctx, true, false);
+ if (r)
+ io->error = r;
+ crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
+ if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
+ /* Wait for completion signaled by kcryptd_async_done() */
+ wait_for_completion(&ctx->restart);
+ crypt_finished = 1;
+ }
+
+ /* Encryption was already finished, submit io now */
+ if (crypt_finished) {
+ kcryptd_crypt_write_io_submit(io, 0);
+ io->sector = sector;
+ }
+
+ crypt_dec_pending(io);
+}
+
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@@ -1973,7 +2065,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
r = crypt_convert(cc, ctx,
- test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
+ test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
+ /*
+ * Crypto API backlogged the request, because its queue was full
+ * and we're in softirq context, so continue from a workqueue
+ * (TODO: is it actually possible to be in softirq in the write path?)
+ */
+ if (r == BLK_STS_DEV_RESOURCE) {
+ INIT_WORK(&io->work, kcryptd_crypt_write_continue);
+ queue_work(cc->crypt_queue, &io->work);
+ return;
+ }
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
@@ -1998,6 +2100,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
crypt_dec_pending(io);
}
+static void kcryptd_crypt_read_continue(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ struct crypt_config *cc = io->cc;
+ blk_status_t r;
+
+ wait_for_completion(&io->ctx.restart);
+ reinit_completion(&io->ctx.restart);
+
+ r = crypt_convert(cc, &io->ctx, true, false);
+ if (r)
+ io->error = r;
+
+ if (atomic_dec_and_test(&io->ctx.cc_pending))
+ kcryptd_crypt_read_done(io);
+
+ crypt_dec_pending(io);
+}
+
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@@ -2009,7 +2130,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
io->sector);
r = crypt_convert(cc, &io->ctx,
- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ /*
+ * Crypto API backlogged the request, because its queue was full
+ * and we're in softirq context, so continue from a workqueue
+ */
+ if (r == BLK_STS_DEV_RESOURCE) {
+ INIT_WORK(&io->work, kcryptd_crypt_read_continue);
+ queue_work(cc->crypt_queue, &io->work);
+ return;
+ }
if (r)
io->error = r;
@@ -2091,8 +2221,12 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
(bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
- if (in_irq()) {
- /* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
+ /*
+ * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
+ * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
+ * it is being executed with irqs disabled.
+ */
+ if (in_irq() || irqs_disabled()) {
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;
@@ -3166,12 +3300,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd-%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1, devname);
else
- cc->crypt_queue = alloc_workqueue("kcryptd-%s",
- WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM |
- WQ_UNBOUND | WQ_SYSFS,
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
num_online_cpus(), devname);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 5a7a1b90e671..b64fede032dc 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -257,8 +257,9 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
- bool fix_padding;
bool discard;
+ bool fix_padding;
+ bool legacy_recalculate;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
@@ -386,6 +387,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic)
return READ_ONCE(ic->failed);
}
+static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+{
+ if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
+ !ic->legacy_recalculate)
+ return true;
+ return false;
+}
+
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
unsigned j, unsigned char seq)
{
@@ -1379,12 +1388,52 @@ thorough_test:
#undef MAY_BE_HASH
}
-static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
+struct flush_request {
+ struct dm_io_request io_req;
+ struct dm_io_region io_reg;
+ struct dm_integrity_c *ic;
+ struct completion comp;
+};
+
+static void flush_notify(unsigned long error, void *fr_)
+{
+ struct flush_request *fr = fr_;
+ if (unlikely(error != 0))
+ dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
+ complete(&fr->comp);
+}
+
+static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
{
int r;
+
+ struct flush_request fr;
+
+ if (!ic->meta_dev)
+ flush_data = false;
+ if (flush_data) {
+ fr.io_req.bi_op = REQ_OP_WRITE,
+ fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ fr.io_req.mem.type = DM_IO_KMEM,
+ fr.io_req.mem.ptr.addr = NULL,
+ fr.io_req.notify.fn = flush_notify,
+ fr.io_req.notify.context = &fr;
+ fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
+ fr.io_reg.bdev = ic->dev->bdev,
+ fr.io_reg.sector = 0,
+ fr.io_reg.count = 0,
+ fr.ic = ic;
+ init_completion(&fr.comp);
+ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+ BUG_ON(r);
+ }
+
r = dm_bufio_write_dirty_buffers(ic->bufio);
if (unlikely(r))
dm_integrity_io_error(ic, "writing tags", r);
+
+ if (flush_data)
+ wait_for_completion(&fr.comp);
}
static void sleep_on_endio_wait(struct dm_integrity_c *ic)
@@ -2110,7 +2159,7 @@ offload_to_thread:
if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
integrity_metadata(&dio->work);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
dio->completion = NULL;
@@ -2195,7 +2244,7 @@ static void integrity_commit(struct work_struct *w)
flushes = bio_list_get(&ic->flush_bio_list);
if (unlikely(ic->mode != 'J')) {
spin_unlock_irq(&ic->endio_wait.lock);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
goto release_flush_bios;
}
@@ -2409,7 +2458,7 @@ skip_io:
complete_journal_op(&comp);
wait_for_completion_io(&comp.comp);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
}
static void integrity_writer(struct work_struct *w)
@@ -2451,7 +2500,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
{
int r;
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
if (dm_integrity_failed(ic))
return;
@@ -2654,7 +2703,7 @@ static void bitmap_flush_work(struct work_struct *work)
unsigned long limit;
struct bio *bio;
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
range.logical_sector = 0;
range.n_sectors = ic->provided_data_sectors;
@@ -2663,9 +2712,7 @@ static void bitmap_flush_work(struct work_struct *work)
add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
- dm_integrity_flush_buffers(ic);
- if (ic->meta_dev)
- blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
+ dm_integrity_flush_buffers(ic, true);
limit = ic->provided_data_sectors;
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
@@ -2934,11 +2981,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
if (ic->meta_dev)
queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
}
if (ic->mode == 'B') {
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
#if 1
/* set to 0 to test bitmap replay code */
init_journal(ic, 0, ic->journal_sections, 0);
@@ -3102,6 +3149,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
+ arg_count += ic->legacy_recalculate;
DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
@@ -3125,6 +3173,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
}
if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
DMEMIT(" fix_padding");
+ if (ic->legacy_recalculate)
+ DMEMIT(" legacy_recalculate");
#define EMIT_ALG(a, n) \
do { \
@@ -3754,7 +3804,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
- {0, 9, "Invalid number of feature args"},
+ {0, 16, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;
@@ -3902,6 +3952,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ic->discard = true;
} else if (!strcmp(opt_string, "fix_padding")) {
ic->fix_padding = true;
+ } else if (!strcmp(opt_string, "legacy_recalculate")) {
+ ic->legacy_recalculate = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -4197,6 +4249,20 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
+ } else {
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ ti->error = "Recalculate can only be specified with internal_hash";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
+ dm_integrity_disable_recalculate(ic)) {
+ ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
+ r = -EOPNOTSUPP;
+ goto bad;
}
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 23c38777e8f6..cab12b2251ba 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3729,10 +3729,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
/*
- * RAID1 and RAID10 personalities require bio splitting,
- * RAID0/4/5/6 don't and process large discard bios properly.
+ * RAID0 and RAID10 personalities require bio splitting,
+ * RAID1/4/5/6 don't and process large discard bios properly.
*/
- if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
+ if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
limits->discard_granularity = chunk_size_bytes;
limits->max_discard_sectors = rs->md.chunk_sectors;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 4668b2cd98f4..11890db71f3f 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -141,6 +141,11 @@ struct dm_snapshot {
* for them to be committed.
*/
struct bio_list bios_queued_during_merge;
+
+ /*
+ * Flush data after merge.
+ */
+ struct bio flush_bio;
};
/*
@@ -1121,6 +1126,17 @@ shut:
static void error_bios(struct bio *bio);
+static int flush_data(struct dm_snapshot *s)
+{
+ struct bio *flush_bio = &s->flush_bio;
+
+ bio_reset(flush_bio);
+ bio_set_dev(flush_bio, s->origin->bdev);
+ flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+ return submit_bio_wait(flush_bio);
+}
+
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
struct dm_snapshot *s = context;
@@ -1134,6 +1150,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
goto shut;
}
+ if (flush_data(s) < 0) {
+ DMERR("Flush after merge failed: shutting down merge");
+ goto shut;
+ }
+
if (s->store->type->commit_merge(s->store,
s->num_merging_chunks) < 0) {
DMERR("Write error in exception store: shutting down merge");
@@ -1318,6 +1339,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
+ bio_init(&s->flush_bio, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -1504,6 +1526,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ bio_uninit(&s->flush_bio);
+
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 188f41287f18..4acf2342f7ad 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -363,14 +363,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
{
int r;
dev_t dev;
+ unsigned int major, minor;
+ char dummy;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
BUG_ON(!t);
- dev = dm_get_dev_t(path);
- if (!dev)
- return -ENODEV;
+ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
+ /* Extract the major/minor numbers */
+ dev = MKDEV(major, minor);
+ if (MAJOR(dev) != major || MINOR(dev) != minor)
+ return -EOVERFLOW;
+ } else {
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
+ }
dd = find_device(&t->devices, dev);
if (!dd) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b3c3c8b4cb42..7bac564f3faa 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -562,7 +562,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
* subset of the parent bdev; require extra privileges.
*/
if (!capable(CAP_SYS_RAWIO)) {
- DMWARN_LIMIT(
+ DMDEBUG_LIMIT(
"%s: sending ioctl %x to DM device without required privilege.",
current->comm, cmd);
r = -ENOIOCTLCMD;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ca409428b4fc..04384452a7ab 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -639,8 +639,10 @@ static void md_submit_flush_data(struct work_struct *ws)
* could wait for this and below md_handle_request could wait for those
* bios because of suspend check
*/
+ spin_lock_irq(&mddev->lock);
mddev->prev_flush_start = mddev->start_flush;
mddev->flush_bio = NULL;
+ spin_unlock_irq(&mddev->lock);
wake_up(&mddev->sb_wait);
if (bio->bi_iter.bi_size == 0) {
diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
index 3a947159b25a..ea6f8ee8161c 100644
--- a/drivers/media/cec/platform/Makefile
+++ b/drivers/media/cec/platform/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_CEC_MESON_AO) += meson/
obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
obj-$(CONFIG_CEC_SECO) += seco/
obj-$(CONFIG_CEC_STI) += sti/
+obj-$(CONFIG_CEC_STM32) += stm32/
obj-$(CONFIG_CEC_TEGRA) += tegra/
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 96d3b2b2aa31..3f61f5863bf7 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -118,8 +118,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
return -EINVAL;
}
} else {
- length = (b->memory == VB2_MEMORY_USERPTR ||
- b->memory == VB2_MEMORY_DMABUF)
+ length = (b->memory == VB2_MEMORY_USERPTR)
? b->length : vb->planes[0].length;
if (b->bytesused > length)
diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c
index eb7b6f01f623..58ca47e904a1 100644
--- a/drivers/media/i2c/ccs-pll.c
+++ b/drivers/media/i2c/ccs-pll.c
@@ -772,14 +772,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
switch (pll->bus_type) {
case CCS_PLL_BUS_TYPE_CSI2_DPHY:
- /* CSI transfers 2 bits per clock per lane; thus times 2 */
- op_sys_clk_freq_hz_sdr = pll->link_freq * 2
- * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
- 1 : pll->csi2.lanes);
- break;
case CCS_PLL_BUS_TYPE_CSI2_CPHY:
- op_sys_clk_freq_hz_sdr =
- pll->link_freq
+ op_sys_clk_freq_hz_sdr = pll->link_freq * 2
* (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
1 : pll->csi2.lanes);
break;
diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c
index 9a6097b088bd..6555bd4b325a 100644
--- a/drivers/media/i2c/ccs/ccs-data.c
+++ b/drivers/media/i2c/ccs/ccs-data.c
@@ -152,7 +152,7 @@ static int ccs_data_parse_version(struct bin_container *bin,
vv->version_major = ((u16)v->static_data_version_major[0] << 8) +
v->static_data_version_major[1];
vv->version_minor = ((u16)v->static_data_version_minor[0] << 8) +
- v->static_data_version_major[1];
+ v->static_data_version_minor[1];
vv->date_year = ((u16)v->year[0] << 8) + v->year[1];
vv->date_month = v->month;
vv->date_day = v->day;
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 36e354ecf71e..6cada8a6e50c 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
if (!q->sensor)
return -ENODEV;
- freq = v4l2_get_link_rate(q->sensor->ctrl_handler, bpp, lanes);
+ freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
if (freq < 0) {
dev_err(dev, "error %lld, invalid link_freq\n", freq);
return freq;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index bdd293faaad0..7233a7311757 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -349,8 +349,10 @@ static void venus_core_shutdown(struct platform_device *pdev)
{
struct venus_core *core = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(core->dev);
venus_shutdown(core);
venus_firmware_deinit(core);
+ pm_runtime_put_sync(core->dev);
}
static __maybe_unused int venus_runtime_suspend(struct device *dev)
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 98bff765b02e..e48d666f2c63 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -654,7 +654,7 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
out:
fwnode_handle_put(fwnode);
- return 0;
+ return ret;
}
static int rvin_parallel_init(struct rvin_dev *vin)
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index be8f2756a444..1524dc0fc566 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -320,7 +320,7 @@ again:
data->body);
spin_lock(&data->keylock);
if (scancode) {
- delay = nsecs_to_jiffies(dev->timeout) +
+ delay = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(100);
mod_timer(&data->rx_timeout, jiffies + delay);
} else {
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index a905113fef6e..0c6229592e13 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1551,7 +1551,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev->s_rx_carrier_range = ite_set_rx_carrier_range;
/* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */
rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR *
- itdev->params.sample_period;
+ itdev->params.sample_period / 1000;
rdev->timeout = IR_DEFAULT_TIMEOUT;
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR *
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 1d811e5ffb55..1fd62c1dac76 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -737,7 +737,7 @@ static unsigned int repeat_period(int protocol)
void rc_repeat(struct rc_dev *dev)
{
unsigned long flags;
- unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
+ unsigned int timeout = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(dev->last_protocol));
struct lirc_scancode sc = {
.scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
@@ -855,7 +855,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
ir_do_keydown(dev, protocol, scancode, keycode, toggle);
if (dev->keypressed) {
- dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
+ dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(protocol));
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
}
@@ -1928,6 +1928,8 @@ int rc_register_device(struct rc_dev *dev)
goto out_raw;
}
+ dev->registered = true;
+
rc = device_add(&dev->dev);
if (rc)
goto out_rx_free;
@@ -1937,8 +1939,6 @@ int rc_register_device(struct rc_dev *dev)
dev->device_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
- dev->registered = true;
-
/*
* once the the input device is registered in rc_setup_rx_device,
* userspace can open the input device and rc_open() will be called
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 8cc28c92d05d..96ae0294ac10 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -385,7 +385,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
mod_timer(&serial_ir.timeout_timer,
- jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));
+ jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout));
ir_raw_event_handle(serial_ir.rcdev);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 78007dba4677..133d20e40f82 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -442,7 +442,7 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
unsigned int div)
{
struct v4l2_ctrl *ctrl;
@@ -473,4 +473,4 @@ s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
return freq > 0 ? freq : -EINVAL;
}
-EXPORT_SYMBOL_GPL(v4l2_get_link_rate);
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 2aa6648fa41f..5a491d2cd1ae 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1512,6 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
struct pcr_handle *handle;
u32 base, len;
int ret, i, bar = 0;
+ u8 val;
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1577,7 +1578,11 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-
+ rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+ if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+ pcr->aspm_enabled = false;
+ else
+ pcr->aspm_enabled = true;
pcr->card_inserted = 0;
pcr->card_removed = 0;
INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index beb482310a58..b2b3d2b0f808 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -472,8 +472,11 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cntr = &hdev->aggregated_cs_counters;
cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
- if (!cs)
+ if (!cs) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
return -ENOMEM;
+ }
cs->ctx = ctx;
cs->submitted = false;
@@ -486,6 +489,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
if (!cs_cmpl) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_cs;
}
@@ -513,6 +518,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
if (!cs->jobs_in_queue_cnt) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_fence;
}
@@ -562,7 +569,7 @@ void hl_cs_rollback_all(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
- /* Make sure we don't have leftovers in the H/W queues mirror list */
+ /* Make sure we don't have leftovers in the CS mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
cs_get(cs);
cs->aborted = true;
@@ -764,11 +771,14 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
static int hl_cs_copy_chunk_array(struct hl_device *hdev,
struct hl_cs_chunk **cs_chunk_array,
- void __user *chunks, u32 num_chunks)
+ void __user *chunks, u32 num_chunks,
+ struct hl_ctx *ctx)
{
u32 size_to_copy;
if (num_chunks > HL_MAX_JOBS_PER_CS) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Number of chunks can NOT be larger than %d\n",
HL_MAX_JOBS_PER_CS);
@@ -777,11 +787,16 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
GFP_ATOMIC);
- if (!*cs_chunk_array)
+ if (!*cs_chunk_array) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
kfree(*cs_chunk_array);
return -EFAULT;
@@ -797,6 +812,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
struct hl_cs_counters_atomic *cntr;
+ struct hl_ctx *ctx = hpriv->ctx;
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
@@ -805,7 +821,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ hpriv->ctx);
if (rc)
goto out;
@@ -832,8 +849,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
goto free_cs_object;
}
@@ -841,8 +858,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
atomic64_inc(
- &hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ &ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
goto free_cs_object;
}
@@ -856,8 +873,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
- atomic64_inc(
- &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -891,7 +907,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = cs_parser(hpriv, job);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
@@ -901,8 +917,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (int_queues_only) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
@@ -1042,7 +1058,7 @@ out:
}
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
- struct hl_cs_chunk *chunk, u64 *signal_seq)
+ struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
{
u64 *signal_seq_arr = NULL;
u32 size_to_copy, signal_seq_arr_len;
@@ -1052,6 +1068,8 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
/* currently only one signal seq is supported */
if (signal_seq_arr_len != 1) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Wait for signal CS supports only one signal CS seq\n");
return -EINVAL;
@@ -1060,13 +1078,18 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
signal_seq_arr = kmalloc_array(signal_seq_arr_len,
sizeof(*signal_seq_arr),
GFP_ATOMIC);
- if (!signal_seq_arr)
+ if (!signal_seq_arr) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Failed to copy signal seq array from user\n");
rc = -EFAULT;
@@ -1153,6 +1176,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_compl *sig_waitcs_cmpl;
u32 q_idx, collective_engine_id = 0;
+ struct hl_cs_counters_atomic *cntr;
struct hl_fence *sig_fence = NULL;
struct hl_ctx *ctx = hpriv->ctx;
enum hl_queue_type q_type;
@@ -1160,9 +1184,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
u64 signal_seq;
int rc;
+ cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ ctx);
if (rc)
goto out;
@@ -1170,6 +1196,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
chunk = &cs_chunk_array[0];
if (chunk->queue_index >= hdev->asic_prop.max_queues) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
rc = -EINVAL;
@@ -1181,6 +1209,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
q_type = hw_queue_prop->type;
if (!hw_queue_prop->supports_sync_stream) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d does not support sync stream operations\n",
q_idx);
@@ -1190,6 +1220,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d is invalid\n", q_idx);
rc = -EINVAL;
@@ -1200,12 +1232,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
- rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq);
+ rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
if (rc)
goto free_cs_chunk_array;
sig_fence = hl_ctx_get_fence(ctx, signal_seq);
if (IS_ERR(sig_fence)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Failed to get signal CS with seq 0x%llx\n",
signal_seq);
@@ -1223,6 +1257,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
container_of(sig_fence, struct hl_cs_compl, base_fence);
if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"CS seq 0x%llx is not of a signal CS\n",
signal_seq);
@@ -1270,8 +1306,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
cs, q_idx, collective_engine_id);
- else
+ else {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
+ }
if (rc)
goto free_cs_object;
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 5871162a8442..69d04eca767f 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -17,12 +17,12 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (hdev->disabled)
- status = HL_DEVICE_STATUS_MALFUNCTION;
- else if (atomic_read(&hdev->in_reset))
+ if (atomic_read(&hdev->in_reset))
status = HL_DEVICE_STATUS_IN_RESET;
else if (hdev->needs_reset)
status = HL_DEVICE_STATUS_NEEDS_RESET;
+ else if (hdev->disabled)
+ status = HL_DEVICE_STATUS_MALFUNCTION;
else
status = HL_DEVICE_STATUS_OPERATIONAL;
@@ -1037,7 +1037,7 @@ kill_processes:
if (hard_reset) {
/* Release kernel context */
- if (hl_ctx_put(hdev->kernel_ctx) == 1)
+ if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
hdev->kernel_ctx = NULL;
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
@@ -1092,6 +1092,7 @@ kill_processes:
GFP_KERNEL);
if (!hdev->kernel_ctx) {
rc = -ENOMEM;
+ hl_mmu_fini(hdev);
goto out_err;
}
@@ -1103,6 +1104,7 @@ kill_processes:
"failed to init kernel ctx in hard reset\n");
kfree(hdev->kernel_ctx);
hdev->kernel_ctx = NULL;
+ hl_mmu_fini(hdev);
goto out_err;
}
}
@@ -1485,6 +1487,15 @@ void hl_device_fini(struct hl_device *hdev)
}
}
+ /* Disable PCI access from device F/W so it won't send us additional
+ * interrupts. We disable MSI/MSI-X at the halt_engines function and we
+ * can't have the F/W sending us interrupts after that. We need to
+ * disable the access here because if the device is marked disable, the
+ * message won't be send. Also, in case of heartbeat, the device CPU is
+ * marked as disable so this message won't be sent
+ */
+ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+
/* Mark device as disabled */
hdev->disabled = true;
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 0e1c629e9800..c9a12980218a 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -402,6 +402,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
}
counters->rx_throughput = result;
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
/* Fetch PCI tx counter */
pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -414,6 +418,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
counters->tx_throughput = result;
/* Fetch PCI replay counter */
+ memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
@@ -627,25 +632,38 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
security_status = RREG32(cpu_security_boot_status_reg);
/* We read security status multiple times during boot:
- * 1. preboot - we check if fw security feature is supported
- * 2. boot cpu - we get boot cpu security status
- * 3. FW application - we get FW application security status
+ * 1. preboot - a. Check whether the security status bits are valid
+ * b. Check whether fw security is enabled
+ * c. Check whether hard reset is done by preboot
+ * 2. boot cpu - a. Fetch boot cpu security status
+ * b. Check whether hard reset is done by boot cpu
+ * 3. FW application - a. Fetch fw application security status
+ * b. Check whether hard reset is done by fw app
*
* Preboot:
* Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set
* check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
*/
if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
- hdev->asic_prop.fw_security_status_valid = 1;
- prop->fw_security_disabled =
- !(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN);
+ prop->fw_security_status_valid = 1;
+
+ if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
+ prop->fw_security_disabled = false;
+ else
+ prop->fw_security_disabled = true;
+
+ if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
} else {
- hdev->asic_prop.fw_security_status_valid = 0;
+ prop->fw_security_status_valid = 0;
prop->fw_security_disabled = true;
}
+ dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
dev_info(hdev->dev, "firmware-level security is %s\n",
- prop->fw_security_disabled ? "disabled" : "enabled");
+ prop->fw_security_disabled ? "disabled" : "enabled");
return 0;
}
@@ -655,6 +673,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 status;
int rc;
@@ -723,11 +742,22 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
/* Read U-Boot version now in case we will later fail */
hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
+ /* Clear reset status since we need to read it again from boot CPU */
+ prop->hard_reset_done_by_fw = false;
+
/* Read boot_cpu security bits */
- if (hdev->asic_prop.fw_security_status_valid)
- hdev->asic_prop.fw_boot_cpu_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_boot_cpu_security_map =
RREG32(cpu_security_boot_status_reg);
+ if (prop->fw_boot_cpu_security_map &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
+ }
+
+ dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
if (rc) {
detect_cpu_boot_status(hdev, status);
rc = -EIO;
@@ -796,18 +826,21 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
goto out;
}
+ /* Clear reset status since we need to read again from app */
+ prop->hard_reset_done_by_fw = false;
+
/* Read FW application security bits */
- if (hdev->asic_prop.fw_security_status_valid) {
- hdev->asic_prop.fw_app_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_app_security_map =
RREG32(cpu_security_boot_status_reg);
- if (hdev->asic_prop.fw_app_security_map &
+ if (prop->fw_app_security_map &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- hdev->asic_prop.hard_reset_done_by_fw = true;
+ prop->hard_reset_done_by_fw = true;
}
- dev_dbg(hdev->dev, "Firmware hard-reset is %s\n",
- hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled");
+ dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
dev_info(hdev->dev, "Successfully loaded firmware to device\n");
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 571eda6ef5ab..60e16dc4bcac 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -944,7 +944,7 @@ struct hl_asic_funcs {
u32 (*get_signal_cb_size)(struct hl_device *hdev);
u32 (*get_wait_cb_size)(struct hl_device *hdev);
u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
u32 (*gen_wait_cb)(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
void (*reset_sob)(struct hl_device *hdev, void *data);
@@ -1000,6 +1000,7 @@ struct hl_va_range {
* @queue_full_drop_cnt: dropped due to queue full
* @device_in_reset_drop_cnt: dropped due to device in reset
* @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
+ * @validation_drop_cnt: dropped due to error in validation
*/
struct hl_cs_counters_atomic {
atomic64_t out_of_mem_drop_cnt;
@@ -1007,6 +1008,7 @@ struct hl_cs_counters_atomic {
atomic64_t queue_full_drop_cnt;
atomic64_t device_in_reset_drop_cnt;
atomic64_t max_cs_in_flight_drop_cnt;
+ atomic64_t validation_drop_cnt;
};
/**
@@ -2180,6 +2182,7 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops);
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 6bbb6bca6860..032d114f01ea 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -544,6 +544,7 @@ static struct pci_driver hl_pci_driver = {
.id_table = ids,
.probe = hl_pci_probe,
.remove = hl_pci_remove,
+ .shutdown = hl_pci_remove,
.driver.pm = &hl_pm_ops,
.err_handler = &hl_pci_err_handler,
};
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 32e6af1db4e3..d25892d61ec9 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -133,6 +133,8 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
&hw_idle.busy_engines_mask_ext, NULL);
+ hw_idle.busy_engines_mask =
+ lower_32_bits(hw_idle.busy_engines_mask_ext);
return copy_to_user(out, &hw_idle,
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
@@ -335,6 +337,8 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
atomic64_read(&cntr->device_in_reset_drop_cnt);
cs_counters.total_max_cs_in_flight_drop_cnt =
atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
+ cs_counters.total_validation_drop_cnt =
+ atomic64_read(&cntr->validation_drop_cnt);
if (hpriv->ctx) {
cs_counters.ctx_out_of_mem_drop_cnt =
@@ -352,6 +356,9 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
cs_counters.ctx_max_cs_in_flight_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
+ cs_counters.ctx_validation_drop_cnt =
+ atomic64_read(
+ &hpriv->ctx->cs_counters.validation_drop_cnt);
}
return copy_to_user(out, &cs_counters,
@@ -406,7 +413,7 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv,
static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
- struct hl_pll_frequency_info freq_info = {0};
+ struct hl_pll_frequency_info freq_info = { {0} };
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 7caf868d1585..76217258780a 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -418,8 +418,11 @@ static void init_signal_cs(struct hl_device *hdev,
"generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+ /* we set an EB since we must make sure all oeprations are done
+ * when sending the signal
+ */
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
- cs_cmpl->hw_sob->sob_id, 0);
+ cs_cmpl->hw_sob->sob_id, 0, true);
kref_get(&hw_sob->kref);
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index cbe9da4e0211..5d4fbdcaefe3 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -886,8 +886,10 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr, i;
+ bool is_host_addr;
u32 page_size;
+ is_host_addr = !hl_is_dram_va(hdev, vaddr);
page_size = phys_pg_pack->page_size;
next_vaddr = vaddr;
@@ -900,9 +902,13 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
/*
* unmapping on Palladium can be really long, so avoid a CPU
* soft lockup bug by sleeping a little between unmapping pages
+ *
+ * In addition, when unmapping host memory we pass through
+ * the Linux kernel to unpin the pages and that takes a long
+ * time. Therefore, sleep every 32K pages to avoid soft lockup
*/
- if (hdev->pldm)
- usleep_range(500, 1000);
+ if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
+ usleep_range(50, 200);
}
}
diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c
index 33ae953d3a36..28a4638741d8 100644
--- a/drivers/misc/habanalabs/common/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu.c
@@ -9,7 +9,7 @@
#include "habanalabs.h"
-static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -156,7 +156,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = is_dram_va(hdev, virt_addr);
+ is_dram_addr = hl_is_dram_va(hdev, virt_addr);
if (is_dram_addr)
mmu_prop = &prop->dmmu;
@@ -236,7 +236,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = is_dram_va(hdev, virt_addr);
+ is_dram_addr = hl_is_dram_va(hdev, virt_addr);
if (is_dram_addr)
mmu_prop = &prop->dmmu;
diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c
index 2ce6ea89d4fa..06d8a44dd5d4 100644
--- a/drivers/misc/habanalabs/common/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu_v1.c
@@ -467,8 +467,16 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
{
/* MMU H/W fini was already done in device hw_fini() */
- kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
- gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
+ kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+ gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ }
+
+ /* Make sure that if we arrive here again without init was called we
+ * won't cause kernel panic. This can happen for example if we fail
+ * during hard reset code at certain points
+ */
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
}
/**
diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c
index 923b2606e29f..b4725e6101f6 100644
--- a/drivers/misc/habanalabs/common/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -130,10 +130,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
return 0;
- if (val & PCI_CONFIG_ELBI_STS_ERR) {
- dev_err(hdev->dev, "Error writing to ELBI\n");
+ if (val & PCI_CONFIG_ELBI_STS_ERR)
return -EIO;
- }
if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
dev_err(hdev->dev, "ELBI write didn't finish in time\n");
@@ -160,8 +158,12 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
dbi_offset = addr & 0xFFF;
- rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
+ /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
+
+ rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
data);
if (rc)
@@ -244,9 +246,11 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
if (rc)
dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
@@ -294,9 +298,11 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
/* Enable */
rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
return rc;
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 1f1926607c5e..b328ddaa64ee 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -151,19 +151,6 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
[PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
};
-static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = {
- [CPU_PLL] = mmPSOC_CPU_PLL_NR,
- [PCI_PLL] = mmPSOC_PCI_PLL_NR,
- [SRAM_PLL] = mmSRAM_W_PLL_NR,
- [HBM_PLL] = mmPSOC_HBM_PLL_NR,
- [NIC_PLL] = mmNIC0_PLL_NR,
- [DMA_PLL] = mmDMA_W_PLL_NR,
- [MESH_PLL] = mmMESH_W_PLL_NR,
- [MME_PLL] = mmPSOC_MME_PLL_NR,
- [TPC_PLL] = mmPSOC_TPC_PLL_NR,
- [IF_PLL] = mmIF_W_PLL_NR
-};
-
static inline bool validate_packet_id(enum packet_id id)
{
switch (id) {
@@ -374,7 +361,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev);
static void gaudi_disable_clock_gating(struct hl_device *hdev);
static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
@@ -667,12 +654,6 @@ static int gaudi_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -685,6 +666,12 @@ static int gaudi_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
return 0;
pci_fini:
@@ -703,93 +690,60 @@ static int gaudi_early_fini(struct hl_device *hdev)
}
/**
- * gaudi_fetch_pll_frequency - Fetch PLL frequency values
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
*
* @hdev: pointer to hl_device structure
- * @pll_index: index of the pll to fetch frequency from
- * @pll_freq: pointer to store the pll frequency in MHz in each of the available
- * outputs. if a certain output is not available a 0 will be set
*
*/
-static int gaudi_fetch_pll_frequency(struct hl_device *hdev,
- enum gaudi_pll_index pll_index,
- u16 *pll_freq_arr)
+static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
- u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel,
- pll_base_addr = gaudi_pll_base_addresses[pll_index];
- u16 freq = 0;
- int i, rc;
-
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_app_security_map &
- CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
- rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr);
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
- if (rc)
- return rc;
- } else if (hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_disabled) {
/* Backward compatibility */
- nr = RREG32(pll_base_addr + PLL_NR_OFFSET);
- nf = RREG32(pll_base_addr + PLL_NF_OFFSET);
- od = RREG32(pll_base_addr + PLL_OD_OFFSET);
-
- for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) {
- div_fctr = RREG32(pll_base_addr +
- PLL_DIV_FACTOR_0_OFFSET + i * 4);
- div_sel = RREG32(pll_base_addr +
- PLL_DIV_SEL_0_OFFSET + i * 4);
+ div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+ div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+ nr = RREG32(mmPSOC_CPU_PLL_NR);
+ nf = RREG32(mmPSOC_CPU_PLL_NF);
+ od = RREG32(mmPSOC_CPU_PLL_OD);
- if (div_sel == DIV_SEL_REF_CLK ||
+ if (div_sel == DIV_SEL_REF_CLK ||
div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- freq = PLL_REF_CLK;
- else
- freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) /
- ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- freq = pll_clk;
- else
- freq = pll_clk / (div_fctr + 1);
- } else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d",
- div_sel);
- }
-
- pll_freq_arr[i] = freq;
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
}
} else {
- dev_err(hdev->dev, "Failed to fetch PLL frequency values\n");
- return -EIO;
- }
+ rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
- return 0;
-}
-
-/**
- * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
- *
- * @hdev: pointer to hl_device structure
- *
- */
-static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u16 pll_freq[HL_PLL_NUM_OUTPUTS];
- int rc;
+ if (rc)
+ return rc;
- rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq);
- if (rc)
- return rc;
+ freq = pll_freq_arr[2];
+ }
- prop->psoc_timestamp_frequency = pll_freq[2];
- prop->psoc_pci_pll_nr = 0;
- prop->psoc_pci_pll_nf = 0;
- prop->psoc_pci_pll_od = 0;
- prop->psoc_pci_pll_div_factor = 0;
+ prop->psoc_timestamp_frequency = freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
return 0;
}
@@ -884,11 +838,17 @@ static int gaudi_init_tpc_mem(struct hl_device *hdev)
size_t fw_size;
void *cpu_addr;
dma_addr_t dma_handle;
- int rc;
+ int rc, count = 5;
+again:
rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
+ if (rc == -EINTR && count-- > 0) {
+ msleep(50);
+ goto again;
+ }
+
if (rc) {
- dev_err(hdev->dev, "Firmware file %s is not found!\n",
+ dev_err(hdev->dev, "Failed to load firmware file %s\n",
GAUDI_TPC_FW_FILE);
goto out;
}
@@ -1110,7 +1070,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
prop->collective_sob_id, queue_id);
cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
- prop->collective_sob_id, cb_size);
+ prop->collective_sob_id, cb_size, false);
}
static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
@@ -2449,8 +2409,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_e2e(hdev);
gaudi_init_hbm_cred(hdev);
- hdev->asic_funcs->disable_clock_gating(hdev);
-
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
@@ -3462,6 +3420,9 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
if (hdev->in_debug)
return;
+ if (!hdev->asic_prop.fw_security_disabled)
+ return;
+
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
enable = !!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i])));
@@ -3513,7 +3474,7 @@ static void gaudi_disable_clock_gating(struct hl_device *hdev)
u32 qman_offset;
int i;
- if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
+ if (!hdev->asic_prop.fw_security_disabled)
return;
for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
@@ -3806,7 +3767,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
static void gaudi_pre_hw_init(struct hl_device *hdev)
{
/* Perform read from the device to make sure device is up */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
if (hdev->asic_prop.fw_security_disabled) {
/* Set the access through PCI bars (Linux driver only) as
@@ -3847,6 +3808,13 @@ static int gaudi_hw_init(struct hl_device *hdev)
return rc;
}
+ /* In case the clock gating was enabled in preboot we need to disable
+ * it here before touching the MME/TPC registers.
+ * There is no need to take clk gating mutex because when this function
+ * runs, no other relevant code can run
+ */
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
/* SRAM scrambler must be initialized after CPU is running from HBM */
gaudi_init_scrambler_sram(hdev);
@@ -3885,7 +3853,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return 0;
@@ -3927,7 +3895,10 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
/* I don't know what is the state of the CPU so make sure it is
* stopped in any means necessary
*/
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ if (hdev->asic_prop.hard_reset_done_by_fw)
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_RST_DEV);
+ else
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
@@ -3971,11 +3942,15 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- }
- dev_info(hdev->dev,
- "Issued HARD reset command, going to wait %dms\n",
- reset_timeout_ms);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ } else {
+ dev_info(hdev->dev,
+ "Firmware performs HARD reset, going to wait %dms\n",
+ reset_timeout_ms);
+ }
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -4027,7 +4002,8 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+ (dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
@@ -7936,7 +7912,7 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
}
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
struct hl_cb *cb = (struct hl_cb *) data;
struct packet_msg_short *pkt;
@@ -7953,7 +7929,7 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, eb);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index f2d91f4fcffe..a7ab2d7e57d4 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -105,13 +105,6 @@
#define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE)
#define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
-#define PLL_NR_OFFSET 0
-#define PLL_NF_OFFSET (mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR)
-#define PLL_OD_OFFSET (mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_FACTOR_0_OFFSET (mmPSOC_CPU_PLL_DIV_FACTOR_0 - \
- mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_SEL_0_OFFSET (mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR)
-
#define NUM_OF_SOB_IN_BLOCK \
(((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 2e3612e1ee28..88a09d42e111 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -9,6 +9,7 @@
#include "../include/gaudi/gaudi_coresight.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
#include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_reg_map.h"
#include <uapi/misc/habanalabs.h>
#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
@@ -874,7 +875,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return rc;
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 3e5eb9e3d7bd..63679a747d2c 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -613,12 +613,6 @@ static int goya_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -631,6 +625,12 @@ static int goya_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
@@ -694,32 +694,47 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
static void goya_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 trace_freq = 0;
- u32 pll_clk = 0;
- u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
- u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
- u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
- u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
- u32 od = RREG32(mmPSOC_PCI_PLL_OD);
-
- if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- trace_freq = PLL_REF_CLK;
- else
- trace_freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- trace_freq = pll_clk;
- else
- trace_freq = pll_clk / (div_fctr + 1);
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
+
+ if (hdev->asic_prop.fw_security_disabled) {
+ div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+ nr = RREG32(mmPSOC_PCI_PLL_NR);
+ nf = RREG32(mmPSOC_PCI_PLL_NF);
+ od = RREG32(mmPSOC_PCI_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK ||
+ div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
+ }
} else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d", div_sel);
+ rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+
+ if (rc)
+ return;
+
+ freq = pll_freq_arr[1];
}
- prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_timestamp_frequency = freq;
prop->psoc_pci_pll_nr = nr;
prop->psoc_pci_pll_nf = nf;
prop->psoc_pci_pll_od = od;
@@ -2704,7 +2719,8 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+ (dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
@@ -5324,7 +5340,7 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev)
}
static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
return 0;
}
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index e5801ecf0cb2..b637dfd69f6e 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -145,11 +145,15 @@
* implemented. This means that FW will
* perform hard reset procedure on
* receiving the halt-machine event.
- * Initialized in: linux
+ * Initialized in: preboot, u-boot, linux
*
* CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -171,6 +175,7 @@
#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9)
#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10)
#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
enum cpu_boot_status {
@@ -204,6 +209,8 @@ enum kmd_msg {
KMD_MSG_GOTO_WFE,
KMD_MSG_FIT_RDY,
KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
};
enum cpu_msg_status {
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 951b37da5e3c..41cab297d66e 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -55,12 +55,23 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_mem_or_io(pdev, 0);
- if (res && resource_type(res) == IORESOURCE_IO)
+ if (!res)
+ return -EINVAL;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
base = devm_ioport_map(dev, res->start, resource_size(res));
- else
+ if (!base)
+ return -ENOMEM;
+ break;
+ case IORESOURCE_MEM:
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ break;
+ default:
+ return -EINVAL;
+ }
atomic_notifier_chain_register(&panic_notifier_list,
&pvpanic_panic_nb);
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index de7cb0369c30..002426e3cf76 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -384,8 +384,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
"merging was advertised but not possible");
blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
- if (mmc_card_mmc(card))
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
block_size = card->ext_csd.data_sector_size;
+ WARN_ON(block_size != 512 && block_size != 4096);
+ }
blk_queue_logical_block_size(mq->queue, block_size);
/*
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index bbf3496f4495..f9780c65ebe9 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -314,11 +314,7 @@ err_clk:
static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
{
- int ret;
-
- ret = sdhci_pltfm_unregister(pdev);
- if (ret)
- dev_err(&pdev->dev, "failed to shutdown\n");
+ sdhci_pltfm_suspend(&pdev->dev);
}
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 4b673792b5a4..d90020ed3622 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -16,6 +16,8 @@
#include "sdhci-pltfm.h"
+#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16)
+
/* DWCMSHC specific Mode Select value */
#define DWCMSHC_CTRL_HS400 0x7
@@ -49,6 +51,29 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
sdhci_adma_write_desc(host, desc, addr, len, cmd);
}
+static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ /*
+ * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
+ * block count register which doesn't support stuff bits of
+ * CMD23 argument on dwcmsch host controller.
+ */
+ if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
+ host->flags &= ~SDHCI_AUTO_CMD23;
+ else
+ host->flags |= SDHCI_AUTO_CMD23;
+}
+
+static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ dwcmshc_check_auto_cmd23(mmc, mrq);
+
+ sdhci_request(mmc, mrq);
+}
+
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
@@ -133,6 +158,8 @@ static int dwcmshc_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
+ host->mmc_host_ops.request = dwcmshc_request;
+
err = sdhci_add_host(host);
if (err)
goto err_clk;
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index c67611fdaa8a..d19eef5f725f 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -168,7 +168,12 @@ static void xenon_reset_exit(struct sdhci_host *host,
/* Disable tuning request and auto-retuning again */
xenon_retune_setup(host);
- xenon_set_acg(host, true);
+ /*
+ * The ACG should be turned off at the early init time, in order
+ * to solve a possible issues with the 1.8V regulator stabilization.
+ * The feature is enabled in later stage.
+ */
+ xenon_set_acg(host, false);
xenon_set_sdclk_off_idle(host, sdhc_id, false);
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 5cdf05bcbf8f..3fa8c22d3f36 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1615,7 +1615,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
/* Extract interleaved payload data and ECC bits */
for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
if (buf)
- nand_extract_bits(buf, step * eccsize, tmp_buf,
+ nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
src_bit_off, eccsize * 8);
src_bit_off += eccsize * 8;
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index fdb112e8a90d..a304fda5d1fa 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -579,7 +579,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ebu_nand_controller *ebu_host;
struct nand_chip *nand;
- struct mtd_info *mtd = NULL;
+ struct mtd_info *mtd;
struct resource *res;
char *resname;
int ret;
@@ -647,12 +647,13 @@ static int ebu_nand_probe(struct platform_device *pdev)
ebu_host->ebu + EBU_ADDR_SEL(cs));
nand_set_flash_node(&ebu_host->chip, dev->of_node);
+
+ mtd = nand_to_mtd(&ebu_host->chip);
if (!mtd->name) {
dev_err(ebu_host->dev, "NAND label property is mandatory\n");
return -EINVAL;
}
- mtd = nand_to_mtd(&ebu_host->chip);
mtd->dev.parent = dev;
ebu_host->dev = dev;
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index f2b9250c0ea8..0750121ac371 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2210,6 +2210,9 @@ static int ns_attach_chip(struct nand_chip *chip)
{
unsigned int eccsteps, eccbytes;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
if (!bch)
return 0;
@@ -2233,8 +2236,6 @@ static int ns_attach_chip(struct nand_chip *chip)
return -EINVAL;
}
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_BCH;
chip->ecc.size = 512;
chip->ecc.strength = bch;
chip->ecc.bytes = eccbytes;
@@ -2273,8 +2274,6 @@ static int __init ns_init_module(void)
nsmtd = nand_to_mtd(chip);
nand_set_controller_data(chip, (void *)ns);
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
/* and 'badblocks' parameters to work */
chip->options |= NAND_SKIP_BBTSCAN;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index fbb9955f2467..2c3e65cb68f3 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -15,6 +15,7 @@
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/omap-dma.h>
@@ -1866,18 +1867,19 @@ static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
int off = BADBLOCK_MARKER_LENGTH;
- if (section >= chip->ecc.steps)
+ if (section >= engine_conf->nsteps)
return -ERANGE;
/*
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
- oobregion->offset = off + (section * (chip->ecc.bytes + 1));
- oobregion->length = chip->ecc.bytes;
+ oobregion->offset = off + (section * (engine_conf->code_size + 1));
+ oobregion->length = engine_conf->code_size;
return 0;
}
@@ -1885,7 +1887,8 @@ static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
int off = BADBLOCK_MARKER_LENGTH;
if (section)
@@ -1895,7 +1898,7 @@ static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
- off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+ off += ((engine_conf->code_size + 1) * engine_conf->nsteps);
if (off >= mtd->oobsize)
return -ERANGE;
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 8ea545bb924d..61d932c1b718 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -343,6 +343,7 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
@@ -382,9 +383,16 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
req->datalen);
- if (req->ooblen)
- memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
- req->ooblen);
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+ req->ooblen);
+ }
return 0;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 260f9f46668b..1ebb4b943876 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -42,6 +42,7 @@ config BONDING
tristate "Bonding driver support"
depends on INET
depends on IPV6 || IPV6=n
+ depends on TLS || TLS_DEVICE=n
help
Say 'Y' or 'M' if you wish to be able to 'bond' multiple Ethernet
Channels together. This is called 'Etherchannel' by Cisco,
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index cf607ffcf358..81223f6bebcc 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -67,7 +67,7 @@ static void regdump(struct net_device *dev)
/* set up the address register */
count = 0;
arcnet_outb((count >> 8) | RDDATAflag | AUTOINCflag,
- ioaddr, com20020_REG_W_ADDR_HI);
+ ioaddr, COM20020_REG_W_ADDR_HI);
arcnet_outb(count & 0xff, ioaddr, COM20020_REG_W_ADDR_LO);
for (count = 0; count < 256 + 32; count++) {
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 85ebd2b7e446..1b8f59713fc7 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -380,7 +380,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
goto free_dst;
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
- BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+ BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
@@ -532,10 +532,12 @@ static void bareudp_setup(struct net_device *dev)
dev->netdev_ops = &bareudp_netdev_ops;
dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->hard_header_len = 0;
dev->addr_len = 0;
@@ -644,6 +646,14 @@ static int bareudp_link_config(struct net_device *dev,
return 0;
}
+static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ list_del(&bareudp->next);
+ unregister_netdevice_queue(dev, head);
+}
+
static int bareudp_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -661,17 +671,13 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
err = bareudp_link_config(dev, tb);
if (err)
- return err;
+ goto err_unconfig;
return 0;
-}
-static void bareudp_dellink(struct net_device *dev, struct list_head *head)
-{
- struct bareudp_dev *bareudp = netdev_priv(dev);
-
- list_del(&bareudp->next);
- unregister_netdevice_queue(dev, head);
+err_unconfig:
+ bareudp_dellink(dev, NULL);
+ return err;
}
static size_t bareudp_get_size(const struct net_device *dev)
@@ -722,7 +728,6 @@ struct net_device *bareudp_dev_create(struct net *net, const char *name,
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
- LIST_HEAD(list_kill);
int err;
memset(tb, 0, sizeof(tb));
@@ -746,8 +751,7 @@ struct net_device *bareudp_dev_create(struct net *net, const char *name,
return dev;
err:
- bareudp_dellink(dev, &list_kill);
- unregister_netdevice_many(&list_kill);
+ bareudp_dellink(dev, NULL);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(bareudp_dev_create);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5fe5232cc3f3..74cbbb22470b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -83,6 +83,9 @@
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+#include <net/tls.h>
+#endif
#include "bonding_priv.h"
@@ -164,7 +167,7 @@ module_param(xmit_hash_policy, charp, 0);
MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
"0 for layer 2 (default), 1 for layer 3+4, "
"2 for layer 2+3, 3 for encap layer 2+3, "
- "4 for encap layer 3+4");
+ "4 for encap layer 3+4, 5 for vlan+srcmac");
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
@@ -301,6 +304,19 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
return dev_queue_xmit(skb);
}
+bool bond_sk_check(struct bonding *bond)
+{
+ switch (BOND_MODE(bond)) {
+ case BOND_MODE_8023AD:
+ case BOND_MODE_XOR:
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
/*---------------------------------- VLAN -----------------------------------*/
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
@@ -1212,6 +1228,13 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t mask;
struct slave *slave;
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ if (bond_sk_check(bond))
+ features |= BOND_TLS_FEATURES;
+ else
+ features &= ~BOND_TLS_FEATURES;
+#endif
+
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
@@ -1434,6 +1457,8 @@ static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
return NETDEV_LAG_HASH_E23;
case BOND_XMIT_POLICY_ENCAP34:
return NETDEV_LAG_HASH_E34;
+ case BOND_XMIT_POLICY_VLAN_SRCMAC:
+ return NETDEV_LAG_HASH_VLAN_SRCMAC;
default:
return NETDEV_LAG_HASH_UNKNOWN;
}
@@ -1922,6 +1947,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_unregister;
}
+ bond_lower_state_changed(new_slave);
+
res = bond_sysfs_slave_add(new_slave);
if (res) {
slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
@@ -3494,6 +3521,27 @@ static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk,
return true;
}
+static u32 bond_vlan_srcmac_hash(struct sk_buff *skb)
+{
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+ u32 srcmac_vendor = 0, srcmac_dev = 0;
+ u16 vlan;
+ int i;
+
+ for (i = 0; i < 3; i++)
+ srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
+
+ for (i = 3; i < ETH_ALEN; i++)
+ srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
+
+ if (!skb_vlan_tag_present(skb))
+ return srcmac_vendor ^ srcmac_dev;
+
+ vlan = skb_vlan_tag_get(skb);
+
+ return vlan ^ srcmac_vendor ^ srcmac_dev;
+}
+
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
struct flow_keys *fk)
@@ -3501,10 +3549,14 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
int noff, proto = -1;
- if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23) {
+ switch (bond->params.xmit_policy) {
+ case BOND_XMIT_POLICY_ENCAP23:
+ case BOND_XMIT_POLICY_ENCAP34:
memset(fk, 0, sizeof(*fk));
return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
fk, NULL, 0, 0, 0, 0);
+ default:
+ break;
}
fk->ports.ports = 0;
@@ -3539,6 +3591,16 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
return true;
}
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+{
+ hash ^= (__force u32)flow_get_u32_dst(flow) ^
+ (__force u32)flow_get_u32_src(flow);
+ hash ^= (hash >> 16);
+ hash ^= (hash >> 8);
+ /* discard lowest hash bit to deal with the common even ports pattern */
+ return hash >> 1;
+}
+
/**
* bond_xmit_hash - generate a hash value based on the xmit policy
* @bond: bonding device
@@ -3556,6 +3618,9 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
skb->l4_hash)
return skb->hash;
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
+ return bond_vlan_srcmac_hash(skb);
+
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
!bond_flow_dissect(bond, skb, &flow))
return bond_eth_hash(skb);
@@ -3569,12 +3634,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
else
memcpy(&hash, &flow.ports.ports, sizeof(hash));
}
- hash ^= (__force u32)flow_get_u32_dst(&flow) ^
- (__force u32)flow_get_u32_src(&flow);
- hash ^= (hash >> 16);
- hash ^= (hash >> 8);
- return hash >> 1;
+ return bond_ip_hash(hash, &flow);
}
/*-------------------------- Device entry points ----------------------------*/
@@ -4547,6 +4608,95 @@ static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
return NULL;
}
+static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
+{
+ switch (sk->sk_family) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (sk->sk_ipv6only ||
+ ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
+ flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
+ break;
+ }
+ fallthrough;
+#endif
+ default: /* AF_INET */
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
+ flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
+ break;
+ }
+
+ flow->ports.src = inet_sk(sk)->inet_sport;
+ flow->ports.dst = inet_sk(sk)->inet_dport;
+}
+
+/**
+ * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
+ * @sk: socket to use for headers
+ *
+ * This function will extract the necessary field from the socket and use
+ * them to generate a hash based on the LAYER34 xmit_policy.
+ * Assumes that sk is a TCP or UDP socket.
+ */
+static u32 bond_sk_hash_l34(struct sock *sk)
+{
+ struct flow_keys flow;
+ u32 hash;
+
+ bond_sk_to_flow(sk, &flow);
+
+ /* L4 */
+ memcpy(&hash, &flow.ports.ports, sizeof(hash));
+ /* L3 */
+ return bond_ip_hash(hash, &flow);
+}
+
+static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
+ struct sock *sk)
+{
+ struct bond_up_slave *slaves;
+ struct slave *slave;
+ unsigned int count;
+ u32 hash;
+
+ slaves = rcu_dereference(bond->usable_slaves);
+ count = slaves ? READ_ONCE(slaves->count) : 0;
+ if (unlikely(!count))
+ return NULL;
+
+ hash = bond_sk_hash_l34(sk);
+ slave = slaves->arr[hash % count];
+
+ return slave->dev;
+}
+
+static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
+ struct sock *sk)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct net_device *lower = NULL;
+
+ rcu_read_lock();
+ if (bond_sk_check(bond))
+ lower = __bond_sk_get_lower_dev(bond, sk);
+ rcu_read_unlock();
+
+ return lower;
+}
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ return bond_tx_drop(dev, skb);
+}
+#endif
+
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
@@ -4555,6 +4705,11 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
!bond_slave_override(bond, skb))
return NETDEV_TX_OK;
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
+ return bond_tls_device_xmit(bond, skb, dev);
+#endif
+
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
return bond_xmit_roundrobin(skb, dev);
@@ -4683,6 +4838,7 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_fix_features = bond_fix_features,
.ndo_features_check = passthru_features_check,
.ndo_get_xmit_slave = bond_xmit_get_slave,
+ .ndo_sk_get_lower_dev = bond_sk_get_lower_dev,
};
static const struct device_type bond_type = {
@@ -4754,6 +4910,10 @@ void bond_setup(struct net_device *bond_dev)
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_dev->features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ if (bond_sk_check(bond))
+ bond_dev->features |= BOND_TLS_FEATURES;
+#endif
}
/* Destroy a bonding device.
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index a4e4e15f574d..77d7c38bd435 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -96,12 +96,13 @@ static const struct bond_opt_value bond_pps_tbl[] = {
};
static const struct bond_opt_value bond_xmit_hashtype_tbl[] = {
- { "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
- { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
- { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
- { "encap2+3", BOND_XMIT_POLICY_ENCAP23, 0},
- { "encap3+4", BOND_XMIT_POLICY_ENCAP34, 0},
- { NULL, -1, 0},
+ { "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
+ { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
+ { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
+ { "encap2+3", BOND_XMIT_POLICY_ENCAP23, 0},
+ { "encap3+4", BOND_XMIT_POLICY_ENCAP34, 0},
+ { "vlan+srcmac", BOND_XMIT_POLICY_VLAN_SRCMAC, 0},
+ { NULL, -1, 0},
};
static const struct bond_opt_value bond_arp_validate_tbl[] = {
@@ -745,17 +746,30 @@ const struct bond_option *bond_opt_get(unsigned int option)
return &bond_opts[option];
}
-static void bond_set_xfrm_features(struct net_device *bond_dev, u64 mode)
+static bool bond_set_xfrm_features(struct bonding *bond)
{
if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD))
- return;
+ return false;
+
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
+ bond->dev->wanted_features |= BOND_XFRM_FEATURES;
+ else
+ bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
+
+ return true;
+}
+
+static bool bond_set_tls_features(struct bonding *bond)
+{
+ if (!IS_ENABLED(CONFIG_TLS_DEVICE))
+ return false;
- if (mode == BOND_MODE_ACTIVEBACKUP)
- bond_dev->wanted_features |= BOND_XFRM_FEATURES;
+ if (bond_sk_check(bond))
+ bond->dev->wanted_features |= BOND_TLS_FEATURES;
else
- bond_dev->wanted_features &= ~BOND_XFRM_FEATURES;
+ bond->dev->wanted_features &= ~BOND_TLS_FEATURES;
- netdev_update_features(bond_dev);
+ return true;
}
static int bond_option_mode_set(struct bonding *bond,
@@ -780,13 +794,20 @@ static int bond_option_mode_set(struct bonding *bond,
if (newval->value == BOND_MODE_ALB)
bond->params.tlb_dynamic_lb = 1;
- if (bond->dev->reg_state == NETREG_REGISTERED)
- bond_set_xfrm_features(bond->dev, newval->value);
-
/* don't cache arp_validate between modes */
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = newval->value;
+ if (bond->dev->reg_state == NETREG_REGISTERED) {
+ bool update = false;
+
+ update |= bond_set_xfrm_features(bond);
+ update |= bond_set_tls_features(bond);
+
+ if (update)
+ netdev_update_features(bond->dev);
+ }
+
return 0;
}
@@ -1219,6 +1240,10 @@ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
newval->string, newval->value);
bond->params.xmit_policy = newval->value;
+ if (bond->dev->reg_state == NETREG_REGISTERED)
+ if (bond_set_tls_features(bond))
+ netdev_update_features(bond->dev);
+
return 0;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 424970939fd4..1c28eade6bec 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
+ select CRC32
help
This is a driver for the Kvaser PCI Express CAN FD family.
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 22164300122d..a2b4463d8480 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -7,12 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan.o
-obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-y += dev.o
-can-dev-y += rx-offload.o
-
-can-dev-$(CONFIG_CAN_LEDS) += led.o
-
+obj-y += dev/
obj-y += rcar/
obj-y += spi/
obj-y += usb/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 5284f0ab3b06..9ad9b39f480e 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -484,7 +484,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_bytes += cf->len;
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
+ can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
/*
* we have to stop the queue and deliver all messages in case
@@ -856,7 +856,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
if (likely(reg_msr & AT91_MSR_MRDY &&
~reg_msr & AT91_MSR_MABT)) {
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
+ can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL);
dev->stats.tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 63f48b016ecd..ef474bae47a1 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -476,7 +476,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
*/
c_can_setup_tx_object(dev, IF_TX, frame, idx);
priv->dlc[idx] = frame->len;
- can_put_echo_skb(skb, dev, idx);
+ can_put_echo_skb(skb, dev, idx, 0);
/* Update the active bits */
atomic_add((1 << idx), &priv->tx_active);
@@ -733,7 +733,7 @@ static void c_can_do_tx(struct net_device *dev)
pend &= ~(1 << idx);
obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
c_can_inval_tx_object(dev, IF_RX, obj);
- can_get_echo_skb(dev, idx);
+ can_get_echo_skb(dev, idx, NULL);
bytes += priv->dlc[idx];
pkts++;
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 8d9f332c35e0..f8a130f594e2 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -702,8 +702,8 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
stats->tx_bytes += cf->len;
stats->tx_packets++;
- can_put_echo_skb(priv->tx_skb, dev, 0);
- can_get_echo_skb(dev, 0);
+ can_put_echo_skb(priv->tx_skb, dev, 0, 0);
+ can_get_echo_skb(dev, 0, NULL);
priv->tx_skb = NULL;
netif_wake_queue(dev);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
deleted file mode 100644
index 3486704c8a95..000000000000
--- a/drivers/net/can/dev.c
+++ /dev/null
@@ -1,1338 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
- * Copyright (C) 2006 Andrey Volkov, Varma Electronics
- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/workqueue.h>
-#include <linux/can.h>
-#include <linux/can/can-ml.h>
-#include <linux/can/dev.h>
-#include <linux/can/skb.h>
-#include <linux/can/netlink.h>
-#include <linux/can/led.h>
-#include <linux/of.h>
-#include <net/rtnetlink.h>
-
-#define MOD_DESC "CAN device driver interface"
-
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-
-/* CAN DLC to real data length conversion helpers */
-
-static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
- 8, 12, 16, 20, 24, 32, 48, 64};
-
-/* get data length from raw data length code (DLC) */
-u8 can_fd_dlc2len(u8 dlc)
-{
- return dlc2len[dlc & 0x0F];
-}
-EXPORT_SYMBOL_GPL(can_fd_dlc2len);
-
-static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
- 9, 9, 9, 9, /* 9 - 12 */
- 10, 10, 10, 10, /* 13 - 16 */
- 11, 11, 11, 11, /* 17 - 20 */
- 12, 12, 12, 12, /* 21 - 24 */
- 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
- 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
- 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
- 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
- 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
-
-/* map the sanitized data length to an appropriate data length code */
-u8 can_fd_len2dlc(u8 len)
-{
- if (unlikely(len > 64))
- return 0xF;
-
- return len2dlc[len];
-}
-EXPORT_SYMBOL_GPL(can_fd_len2dlc);
-
-#ifdef CONFIG_CAN_CALC_BITTIMING
-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-
-/* Bit-timing calculation derived from:
- *
- * Code based on LinCAN sources and H8S2638 project
- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
- * Copyright 2005 Stanislav Marek
- * email: pisa@cmp.felk.cvut.cz
- *
- * Calculates proper bit-timing parameters for a specified bit-rate
- * and sample-point, which can then be used to set the bit-timing
- * registers of the CAN controller. You can find more information
- * in the header file linux/can/netlink.h.
- */
-static int
-can_update_sample_point(const struct can_bittiming_const *btc,
- unsigned int sample_point_nominal, unsigned int tseg,
- unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
- unsigned int *sample_point_error_ptr)
-{
- unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
- unsigned int sample_point, best_sample_point = 0;
- unsigned int tseg1, tseg2;
- int i;
-
- for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
- 1000 - i;
- tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
- tseg1 = tseg - tseg2;
- if (tseg1 > btc->tseg1_max) {
- tseg1 = btc->tseg1_max;
- tseg2 = tseg - tseg1;
- }
-
- sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
- (tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
-
- if (sample_point <= sample_point_nominal &&
- sample_point_error < best_sample_point_error) {
- best_sample_point = sample_point;
- best_sample_point_error = sample_point_error;
- *tseg1_ptr = tseg1;
- *tseg2_ptr = tseg2;
- }
- }
-
- if (sample_point_error_ptr)
- *sample_point_error_ptr = best_sample_point_error;
-
- return best_sample_point;
-}
-
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
- unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
- unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
- unsigned int best_tseg = 0; /* current best value for tseg */
- unsigned int best_brp = 0; /* current best value for brp */
- unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
- u64 v64;
-
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800000)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500000)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
-
- /* tseg even = round down, odd = round up */
- for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
- tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_SYNC_SEG + tseg / 2;
-
- /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
- brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
-
- /* choose brp step which is possible in system */
- brp = (brp / btc->brp_inc) * btc->brp_inc;
- if (brp < btc->brp_min || brp > btc->brp_max)
- continue;
-
- bitrate = priv->clock.freq / (brp * tsegall);
- bitrate_error = abs(bt->bitrate - bitrate);
-
- /* tseg brp biterror */
- if (bitrate_error > best_bitrate_error)
- continue;
-
- /* reset sample point error if we have a better bitrate */
- if (bitrate_error < best_bitrate_error)
- best_sample_point_error = UINT_MAX;
-
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
- &tseg1, &tseg2, &sample_point_error);
- if (sample_point_error > best_sample_point_error)
- continue;
-
- best_sample_point_error = sample_point_error;
- best_bitrate_error = bitrate_error;
- best_tseg = tseg / 2;
- best_brp = brp;
-
- if (bitrate_error == 0 && sample_point_error == 0)
- break;
- }
-
- if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
- do_div(v64, bt->bitrate);
- bitrate_error = (u32)v64;
- if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
- }
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
- }
-
- /* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
- best_tseg, &tseg1, &tseg2,
- NULL);
-
- v64 = (u64)best_brp * 1000 * 1000 * 1000;
- do_div(v64, priv->clock.freq);
- bt->tq = (u32)v64;
- bt->prop_seg = tseg1 / 2;
- bt->phase_seg1 = tseg1 - bt->prop_seg;
- bt->phase_seg2 = tseg2;
-
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
- }
-
- bt->brp = best_brp;
-
- /* real bitrate */
- bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
-
- return 0;
-}
-#else /* !CONFIG_CAN_CALC_BITTIMING */
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- netdev_err(dev, "bit-timing calculation not available\n");
- return -EINVAL;
-}
-#endif /* CONFIG_CAN_CALC_BITTIMING */
-
-/* Checks the validity of the specified bit-timing parameters prop_seg,
- * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
- * prescaler value brp. You can find more information in the header
- * file linux/can/netlink.h.
- */
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- int tseg1, alltseg;
- u64 brp64;
-
- tseg1 = bt->prop_seg + bt->phase_seg1;
- if (!bt->sjw)
- bt->sjw = 1;
- if (bt->sjw > btc->sjw_max ||
- tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
- bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
- return -ERANGE;
-
- brp64 = (u64)priv->clock.freq * (u64)bt->tq;
- if (btc->brp_inc > 1)
- do_div(brp64, btc->brp_inc);
- brp64 += 500000000UL - 1;
- do_div(brp64, 1000000000UL); /* the practicable BRP */
- if (btc->brp_inc > 1)
- brp64 *= btc->brp_inc;
- bt->brp = (u32)brp64;
-
- if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
- return -EINVAL;
-
- alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
- bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
- bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
-
- return 0;
-}
-
-/* Checks the validity of predefined bitrate settings */
-static int
-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
- const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int i;
-
- for (i = 0; i < bitrate_const_cnt; i++) {
- if (bt->bitrate == bitrate_const[i])
- break;
- }
-
- if (i >= priv->bitrate_const_cnt)
- return -EINVAL;
-
- return 0;
-}
-
-static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc,
- const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
-{
- int err;
-
- /* Depending on the given can_bittiming parameter structure the CAN
- * timing parameters are calculated based on the provided bitrate OR
- * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
- * provided directly which are then checked and fixed up.
- */
- if (!bt->tq && bt->bitrate && btc)
- err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate && btc)
- err = can_fixup_bittiming(dev, bt, btc);
- else if (!bt->tq && bt->bitrate && bitrate_const)
- err = can_validate_bitrate(dev, bt, bitrate_const,
- bitrate_const_cnt);
- else
- err = -EINVAL;
-
- return err;
-}
-
-static void can_update_state_error_stats(struct net_device *dev,
- enum can_state new_state)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (new_state <= priv->state)
- return;
-
- switch (new_state) {
- case CAN_STATE_ERROR_WARNING:
- priv->can_stats.error_warning++;
- break;
- case CAN_STATE_ERROR_PASSIVE:
- priv->can_stats.error_passive++;
- break;
- case CAN_STATE_BUS_OFF:
- priv->can_stats.bus_off++;
- break;
- default:
- break;
- }
-}
-
-static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return CAN_ERR_CRTL_ACTIVE;
- case CAN_STATE_ERROR_WARNING:
- return CAN_ERR_CRTL_TX_WARNING;
- case CAN_STATE_ERROR_PASSIVE:
- return CAN_ERR_CRTL_TX_PASSIVE;
- default:
- return 0;
- }
-}
-
-static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return CAN_ERR_CRTL_ACTIVE;
- case CAN_STATE_ERROR_WARNING:
- return CAN_ERR_CRTL_RX_WARNING;
- case CAN_STATE_ERROR_PASSIVE:
- return CAN_ERR_CRTL_RX_PASSIVE;
- default:
- return 0;
- }
-}
-
-static const char *can_get_state_str(const enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return "Error Active";
- case CAN_STATE_ERROR_WARNING:
- return "Error Warning";
- case CAN_STATE_ERROR_PASSIVE:
- return "Error Passive";
- case CAN_STATE_BUS_OFF:
- return "Bus Off";
- case CAN_STATE_STOPPED:
- return "Stopped";
- case CAN_STATE_SLEEPING:
- return "Sleeping";
- default:
- return "<unknown>";
- }
-
- return "<unknown>";
-}
-
-void can_change_state(struct net_device *dev, struct can_frame *cf,
- enum can_state tx_state, enum can_state rx_state)
-{
- struct can_priv *priv = netdev_priv(dev);
- enum can_state new_state = max(tx_state, rx_state);
-
- if (unlikely(new_state == priv->state)) {
- netdev_warn(dev, "%s: oops, state did not change", __func__);
- return;
- }
-
- netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
- can_get_state_str(priv->state), priv->state,
- can_get_state_str(new_state), new_state);
-
- can_update_state_error_stats(dev, new_state);
- priv->state = new_state;
-
- if (!cf)
- return;
-
- if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
- cf->can_id |= CAN_ERR_BUSOFF;
- return;
- }
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= tx_state >= rx_state ?
- can_tx_state_to_frame(dev, tx_state) : 0;
- cf->data[1] |= tx_state <= rx_state ?
- can_rx_state_to_frame(dev, rx_state) : 0;
-}
-EXPORT_SYMBOL_GPL(can_change_state);
-
-/* Local echo of CAN messages
- *
- * CAN network devices *should* support a local echo functionality
- * (see Documentation/networking/can.rst). To test the handling of CAN
- * interfaces that do not support the local echo both driver types are
- * implemented. In the case that the driver does not support the echo
- * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
- * to perform the echo as a fallback solution.
- */
-static void can_flush_echo_skb(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- int i;
-
- for (i = 0; i < priv->echo_skb_max; i++) {
- if (priv->echo_skb[i]) {
- kfree_skb(priv->echo_skb[i]);
- priv->echo_skb[i] = NULL;
- stats->tx_dropped++;
- stats->tx_aborted_errors++;
- }
- }
-}
-
-/* Put the skb on the stack to be looped backed locally lateron
- *
- * The function is typically called in the start_xmit function
- * of the device driver. The driver must protect access to
- * priv->echo_skb, if necessary.
- */
-int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- BUG_ON(idx >= priv->echo_skb_max);
-
- /* check flag whether this packet has to be looped back */
- if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
- (skb->protocol != htons(ETH_P_CAN) &&
- skb->protocol != htons(ETH_P_CANFD))) {
- kfree_skb(skb);
- return 0;
- }
-
- if (!priv->echo_skb[idx]) {
- skb = can_create_echo_skb(skb);
- if (!skb)
- return -ENOMEM;
-
- /* make settings for echo to reduce code in irq context */
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->dev = dev;
-
- /* save this skb for tx interrupt echo handling */
- priv->echo_skb[idx] = skb;
- } else {
- /* locking problem with netif_stop_queue() ?? */
- netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
- kfree_skb(skb);
- return -EBUSY;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(can_put_echo_skb);
-
-struct sk_buff *
-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (idx >= priv->echo_skb_max) {
- netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
- __func__, idx, priv->echo_skb_max);
- return NULL;
- }
-
- if (priv->echo_skb[idx]) {
- /* Using "struct canfd_frame::len" for the frame
- * length is supported on both CAN and CANFD frames.
- */
- struct sk_buff *skb = priv->echo_skb[idx];
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
-
- /* get the real payload length for netdev statistics */
- if (cf->can_id & CAN_RTR_FLAG)
- *len_ptr = 0;
- else
- *len_ptr = cf->len;
-
- priv->echo_skb[idx] = NULL;
-
- return skb;
- }
-
- return NULL;
-}
-
-/* Get the skb from the stack and loop it back locally
- *
- * The function is typically called when the TX done interrupt
- * is handled in the device driver. The driver must protect
- * access to priv->echo_skb, if necessary.
- */
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
-{
- struct sk_buff *skb;
- u8 len;
-
- skb = __can_get_echo_skb(dev, idx, &len);
- if (!skb)
- return 0;
-
- skb_get(skb);
- if (netif_rx(skb) == NET_RX_SUCCESS)
- dev_consume_skb_any(skb);
- else
- dev_kfree_skb_any(skb);
-
- return len;
-}
-EXPORT_SYMBOL_GPL(can_get_echo_skb);
-
-/* Remove the skb from the stack and free it.
- *
- * The function is typically called when TX failed.
- */
-void can_free_echo_skb(struct net_device *dev, unsigned int idx)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- BUG_ON(idx >= priv->echo_skb_max);
-
- if (priv->echo_skb[idx]) {
- dev_kfree_skb_any(priv->echo_skb[idx]);
- priv->echo_skb[idx] = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(can_free_echo_skb);
-
-/* CAN device restart for bus-off recovery */
-static void can_restart(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct sk_buff *skb;
- struct can_frame *cf;
- int err;
-
- BUG_ON(netif_carrier_ok(dev));
-
- /* No synchronization needed because the device is bus-off and
- * no messages can come in or go out.
- */
- can_flush_echo_skb(dev);
-
- /* send restart message upstream */
- skb = alloc_can_err_skb(dev, &cf);
- if (!skb)
- goto restart;
-
- cf->can_id |= CAN_ERR_RESTARTED;
-
- netif_rx_ni(skb);
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
-restart:
- netdev_dbg(dev, "restarted\n");
- priv->can_stats.restarts++;
-
- /* Now restart the device */
- err = priv->do_set_mode(dev, CAN_MODE_START);
-
- netif_carrier_on(dev);
- if (err)
- netdev_err(dev, "Error %d during restart", err);
-}
-
-static void can_restart_work(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct can_priv *priv = container_of(dwork, struct can_priv,
- restart_work);
-
- can_restart(priv->dev);
-}
-
-int can_restart_now(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* A manual restart is only permitted if automatic restart is
- * disabled and the device is in the bus-off state
- */
- if (priv->restart_ms)
- return -EINVAL;
- if (priv->state != CAN_STATE_BUS_OFF)
- return -EBUSY;
-
- cancel_delayed_work_sync(&priv->restart_work);
- can_restart(dev);
-
- return 0;
-}
-
-/* CAN bus-off
- *
- * This functions should be called when the device goes bus-off to
- * tell the netif layer that no more packets can be sent or received.
- * If enabled, a timer is started to trigger bus-off recovery.
- */
-void can_bus_off(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (priv->restart_ms)
- netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
- priv->restart_ms);
- else
- netdev_info(dev, "bus-off\n");
-
- netif_carrier_off(dev);
-
- if (priv->restart_ms)
- schedule_delayed_work(&priv->restart_work,
- msecs_to_jiffies(priv->restart_ms));
-}
-EXPORT_SYMBOL_GPL(can_bus_off);
-
-static void can_setup(struct net_device *dev)
-{
- dev->type = ARPHRD_CAN;
- dev->mtu = CAN_MTU;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->tx_queue_len = 10;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
- dev->features = NETIF_F_HW_CSUM;
-}
-
-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
-{
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- sizeof(struct can_frame));
- if (unlikely(!skb))
- return NULL;
-
- skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- *cf = skb_put_zero(skb, sizeof(struct can_frame));
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_can_skb);
-
-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- struct canfd_frame **cfd)
-{
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- sizeof(struct canfd_frame));
- if (unlikely(!skb))
- return NULL;
-
- skb->protocol = htons(ETH_P_CANFD);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_canfd_skb);
-
-struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
-{
- struct sk_buff *skb;
-
- skb = alloc_can_skb(dev, cf);
- if (unlikely(!skb))
- return NULL;
-
- (*cf)->can_id = CAN_ERR_FLAG;
- (*cf)->len = CAN_ERR_DLC;
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_can_err_skb);
-
-/* Allocate and setup space for the CAN network device */
-struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
- unsigned int txqs, unsigned int rxqs)
-{
- struct net_device *dev;
- struct can_priv *priv;
- int size;
-
- /* We put the driver's priv, the CAN mid layer priv and the
- * echo skb into the netdevice's priv. The memory layout for
- * the netdev_priv is like this:
- *
- * +-------------------------+
- * | driver's priv |
- * +-------------------------+
- * | struct can_ml_priv |
- * +-------------------------+
- * | array of struct sk_buff |
- * +-------------------------+
- */
-
- size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
-
- if (echo_skb_max)
- size = ALIGN(size, sizeof(struct sk_buff *)) +
- echo_skb_max * sizeof(struct sk_buff *);
-
- dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
- txqs, rxqs);
- if (!dev)
- return NULL;
-
- priv = netdev_priv(dev);
- priv->dev = dev;
-
- dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
-
- if (echo_skb_max) {
- priv->echo_skb_max = echo_skb_max;
- priv->echo_skb = (void *)priv +
- (size - echo_skb_max * sizeof(struct sk_buff *));
- }
-
- priv->state = CAN_STATE_STOPPED;
-
- INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(alloc_candev_mqs);
-
-/* Free space of the CAN network device */
-void free_candev(struct net_device *dev)
-{
- free_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(free_candev);
-
-/* changing MTU and control mode for CAN/CANFD devices */
-int can_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* Do not allow changing the MTU while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* allow change of MTU according to the CANFD ability of the device */
- switch (new_mtu) {
- case CAN_MTU:
- /* 'CANFD-only' controllers can not switch to CAN_MTU */
- if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
- return -EINVAL;
-
- priv->ctrlmode &= ~CAN_CTRLMODE_FD;
- break;
-
- case CANFD_MTU:
- /* check for potential CANFD ability */
- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
- !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
- return -EINVAL;
-
- priv->ctrlmode |= CAN_CTRLMODE_FD;
- break;
-
- default:
- return -EINVAL;
- }
-
- dev->mtu = new_mtu;
- return 0;
-}
-EXPORT_SYMBOL_GPL(can_change_mtu);
-
-/* Common open function when the device gets opened.
- *
- * This function should be called in the open function of the device
- * driver.
- */
-int open_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (!priv->bittiming.bitrate) {
- netdev_err(dev, "bit-timing not yet defined\n");
- return -EINVAL;
- }
-
- /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
- if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
- (!priv->data_bittiming.bitrate ||
- priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
- netdev_err(dev, "incorrect/missing data bit-timing\n");
- return -EINVAL;
- }
-
- /* Switch carrier on if device was stopped while in bus-off state */
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(open_candev);
-
-#ifdef CONFIG_OF
-/* Common function that can be used to understand the limitation of
- * a transceiver when it provides no means to determine these limitations
- * at runtime.
- */
-void of_can_transceiver(struct net_device *dev)
-{
- struct device_node *dn;
- struct can_priv *priv = netdev_priv(dev);
- struct device_node *np = dev->dev.parent->of_node;
- int ret;
-
- dn = of_get_child_by_name(np, "can-transceiver");
- if (!dn)
- return;
-
- ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
- of_node_put(dn);
- if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
- netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
-}
-EXPORT_SYMBOL_GPL(of_can_transceiver);
-#endif
-
-/* Common close function for cleanup before the device gets closed.
- *
- * This function should be called in the close function of the device
- * driver.
- */
-void close_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- cancel_delayed_work_sync(&priv->restart_work);
- can_flush_echo_skb(dev);
-}
-EXPORT_SYMBOL_GPL(close_candev);
-
-/* CAN netlink interface */
-static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
- [IFLA_CAN_STATE] = { .type = NLA_U32 },
- [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
- [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
- [IFLA_CAN_RESTART] = { .type = NLA_U32 },
- [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_BITTIMING_CONST]
- = { .len = sizeof(struct can_bittiming_const) },
- [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
- [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
- [IFLA_CAN_DATA_BITTIMING]
- = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_DATA_BITTIMING_CONST]
- = { .len = sizeof(struct can_bittiming_const) },
- [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
-};
-
-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- bool is_can_fd = false;
-
- /* Make sure that valid CAN FD configurations always consist of
- * - nominal/arbitration bittiming
- * - data bittiming
- * - control mode with CAN_CTRLMODE_FD set
- */
-
- if (!data)
- return 0;
-
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
-
- is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
- }
-
- if (is_can_fd) {
- if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
- return -EOPNOTSUPP;
- }
-
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int can_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- struct can_priv *priv = netdev_priv(dev);
- int err;
-
- /* We need synchronization with dev->stop() */
- ASSERT_RTNL();
-
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->bittiming_const && !priv->do_set_bittiming)
- return -EOPNOTSUPP;
-
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_get_bittiming(dev, &bt,
- priv->bittiming_const,
- priv->bitrate_const,
- priv->bitrate_const_cnt);
- if (err)
- return err;
-
- if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
- return -EINVAL;
- }
-
- memcpy(&priv->bittiming, &bt, sizeof(bt));
-
- if (priv->do_set_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_bittiming(dev);
- if (err)
- return err;
- }
- }
-
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
- u32 ctrlstatic;
- u32 maskedflags;
-
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = priv->ctrlmode_static;
- maskedflags = cm->flags & cm->mask;
-
- /* check whether provided bits are allowed to be passed */
- if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
- return -EOPNOTSUPP;
-
- /* do not check for static fd-non-iso if 'fd' is disabled */
- if (!(maskedflags & CAN_CTRLMODE_FD))
- ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
-
- /* make sure static options are provided by configuration */
- if ((maskedflags & ctrlstatic) != ctrlstatic)
- return -EOPNOTSUPP;
-
- /* clear bits to be modified and copy the flag values */
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= maskedflags;
-
- /* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD)
- dev->mtu = CANFD_MTU;
- else
- dev->mtu = CAN_MTU;
- }
-
- if (data[IFLA_CAN_RESTART_MS]) {
- /* Do not allow changing restart delay while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
- }
-
- if (data[IFLA_CAN_RESTART]) {
- /* Do not allow a restart while not running */
- if (!(dev->flags & IFF_UP))
- return -EINVAL;
- err = can_restart_now(dev);
- if (err)
- return err;
- }
-
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming dbt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * data_bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
- return -EOPNOTSUPP;
-
- memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
- sizeof(dbt));
- err = can_get_bittiming(dev, &dbt,
- priv->data_bittiming_const,
- priv->data_bitrate_const,
- priv->data_bitrate_const_cnt);
- if (err)
- return err;
-
- if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
- return -EINVAL;
- }
-
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
-
- if (priv->do_set_data_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_data_bittiming(dev);
- if (err)
- return err;
- }
- }
-
- if (data[IFLA_CAN_TERMINATION]) {
- const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
- const unsigned int num_term = priv->termination_const_cnt;
- unsigned int i;
-
- if (!priv->do_set_termination)
- return -EOPNOTSUPP;
-
- /* check whether given value is supported by the interface */
- for (i = 0; i < num_term; i++) {
- if (termval == priv->termination_const[i])
- break;
- }
- if (i >= num_term)
- return -EINVAL;
-
- /* Finally, set the termination value */
- err = priv->do_set_termination(dev, termval);
- if (err)
- return err;
-
- priv->termination = termval;
- }
-
- return 0;
-}
-
-static size_t can_get_size(const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- size_t size = 0;
-
- if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
- size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
- size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
- if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
- size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
- if (priv->termination_const) {
- size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
- size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
- priv->termination_const_cnt);
- }
- if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt);
- if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt);
- size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
-
- return size;
-}
-
-static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct can_ctrlmode cm = {.flags = priv->ctrlmode};
- struct can_berr_counter bec;
- enum can_state state = priv->state;
-
- if (priv->do_get_state)
- priv->do_get_state(dev, &state);
-
- if ((priv->bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming)) ||
-
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
-
- nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
- nla_put_u32(skb, IFLA_CAN_STATE, state) ||
- nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
- nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
-
- (priv->do_get_berr_counter &&
- !priv->do_get_berr_counter(dev, &bec) &&
- nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
-
- (priv->data_bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
-
- (priv->data_bittiming_const &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)) ||
-
- (priv->termination_const &&
- (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
- nla_put(skb, IFLA_CAN_TERMINATION_CONST,
- sizeof(*priv->termination_const) *
- priv->termination_const_cnt,
- priv->termination_const))) ||
-
- (priv->bitrate_const &&
- nla_put(skb, IFLA_CAN_BITRATE_CONST,
- sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt,
- priv->bitrate_const)) ||
-
- (priv->data_bitrate_const &&
- nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt,
- priv->data_bitrate_const)) ||
-
- (nla_put(skb, IFLA_CAN_BITRATE_MAX,
- sizeof(priv->bitrate_max),
- &priv->bitrate_max))
- )
-
- return -EMSGSIZE;
-
- return 0;
-}
-
-static size_t can_get_xstats_size(const struct net_device *dev)
-{
- return sizeof(struct can_device_stats);
-}
-
-static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (nla_put(skb, IFLA_INFO_XSTATS,
- sizeof(priv->can_stats), &priv->can_stats))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-static int can_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- return -EOPNOTSUPP;
-}
-
-static void can_dellink(struct net_device *dev, struct list_head *head)
-{
-}
-
-static struct rtnl_link_ops can_link_ops __read_mostly = {
- .kind = "can",
- .maxtype = IFLA_CAN_MAX,
- .policy = can_policy,
- .setup = can_setup,
- .validate = can_validate,
- .newlink = can_newlink,
- .changelink = can_changelink,
- .dellink = can_dellink,
- .get_size = can_get_size,
- .fill_info = can_fill_info,
- .get_xstats_size = can_get_xstats_size,
- .fill_xstats = can_fill_xstats,
-};
-
-/* Register the CAN network device */
-int register_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* Ensure termination_const, termination_const_cnt and
- * do_set_termination consistency. All must be either set or
- * unset.
- */
- if ((!priv->termination_const != !priv->termination_const_cnt) ||
- (!priv->termination_const != !priv->do_set_termination))
- return -EINVAL;
-
- if (!priv->bitrate_const != !priv->bitrate_const_cnt)
- return -EINVAL;
-
- if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
- return -EINVAL;
-
- dev->rtnl_link_ops = &can_link_ops;
- netif_carrier_off(dev);
-
- return register_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(register_candev);
-
-/* Unregister the CAN network device */
-void unregister_candev(struct net_device *dev)
-{
- unregister_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(unregister_candev);
-
-/* Test if a network device is a candev based device
- * and return the can_priv* if so.
- */
-struct can_priv *safe_candev_priv(struct net_device *dev)
-{
- if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
- return NULL;
-
- return netdev_priv(dev);
-}
-EXPORT_SYMBOL_GPL(safe_candev_priv);
-
-static __init int can_dev_init(void)
-{
- int err;
-
- can_led_notifier_init();
-
- err = rtnl_link_register(&can_link_ops);
- if (!err)
- pr_info(MOD_DESC "\n");
-
- return err;
-}
-module_init(can_dev_init);
-
-static __exit void can_dev_exit(void)
-{
- rtnl_link_unregister(&can_link_ops);
-
- can_led_notifier_exit();
-}
-module_exit(can_dev_exit);
-
-MODULE_ALIAS_RTNL_LINK("can");
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
new file mode 100644
index 000000000000..3e2e207861fc
--- /dev/null
+++ b/drivers/net/can/dev/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_DEV) += can-dev.o
+can-dev-y += bittiming.o
+can-dev-y += dev.o
+can-dev-y += length.o
+can-dev-y += netlink.o
+can-dev-y += rx-offload.o
+can-dev-y += skb.o
+
+can-dev-$(CONFIG_CAN_LEDS) += led.o
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
new file mode 100644
index 000000000000..f7fe226bb395
--- /dev/null
+++ b/drivers/net/can/dev/bittiming.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can/dev.h>
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+/* Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@cmp.felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int
+can_update_sample_point(const struct can_bittiming_const *btc,
+ unsigned int sample_point_nominal, unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
+{
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+ 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
+ sample_point_error = abs(sample_point_nominal - sample_point);
+
+ if (sample_point <= sample_point_nominal &&
+ sample_point_error < best_sample_point_error) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
+ }
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
+}
+
+int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+ u64 v64;
+
+ /* Use CiA recommended sample points */
+ if (bt->sample_point) {
+ sample_point_nominal = bt->sample_point;
+ } else {
+ if (bt->bitrate > 800000)
+ sample_point_nominal = 750;
+ else if (bt->bitrate > 500000)
+ sample_point_nominal = 800;
+ else
+ sample_point_nominal = 875;
+ }
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = CAN_SYNC_SEG + tseg / 2;
+
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+
+ /* choose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if (brp < btc->brp_min || brp > btc->brp_max)
+ continue;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
+ /* tseg brp biterror */
+ if (bitrate_error > best_bitrate_error)
+ continue;
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error > best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
+ best_tseg = tseg / 2;
+ best_brp = brp;
+
+ if (bitrate_error == 0 && sample_point_error == 0)
+ break;
+ }
+
+ if (best_bitrate_error) {
+ /* Error in one-tenth of a percent */
+ v64 = (u64)best_bitrate_error * 1000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
+ netdev_err(dev,
+ "bitrate error %d.%d%% too high\n",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EDOM;
+ }
+ netdev_warn(dev, "bitrate error %d.%d%%\n",
+ bitrate_error / 10, bitrate_error % 10);
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ best_tseg, &tseg1, &tseg2,
+ NULL);
+
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+
+ /* check for sjw user settings */
+ if (!bt->sjw || !btc->sjw_max) {
+ bt->sjw = 1;
+ } else {
+ /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
+ if (bt->sjw > btc->sjw_max)
+ bt->sjw = btc->sjw_max;
+ /* bt->sjw must not be higher than tseg2 */
+ if (tseg2 < bt->sjw)
+ bt->sjw = tseg2;
+ }
+
+ bt->brp = best_brp;
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq /
+ (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+
+ return 0;
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+/* Checks the validity of the specified bit-timing parameters prop_seg,
+ * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
+ * prescaler value brp. You can find more information in the header
+ * file linux/can/netlink.h.
+ */
+static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int tseg1, alltseg;
+ u64 brp64;
+
+ tseg1 = bt->prop_seg + bt->phase_seg1;
+ if (!bt->sjw)
+ bt->sjw = 1;
+ if (bt->sjw > btc->sjw_max ||
+ tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
+ bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
+ return -ERANGE;
+
+ brp64 = (u64)priv->clock.freq * (u64)bt->tq;
+ if (btc->brp_inc > 1)
+ do_div(brp64, btc->brp_inc);
+ brp64 += 500000000UL - 1;
+ do_div(brp64, 1000000000UL); /* the practicable BRP */
+ if (btc->brp_inc > 1)
+ brp64 *= btc->brp_inc;
+ bt->brp = (u32)brp64;
+
+ if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+ return -EINVAL;
+
+ alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
+ bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
+ bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+
+ return 0;
+}
+
+/* Checks the validity of predefined bitrate settings */
+static int
+can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < bitrate_const_cnt; i++) {
+ if (bt->bitrate == bitrate_const[i])
+ break;
+ }
+
+ if (i >= priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ return 0;
+}
+
+int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
+{
+ int err;
+
+ /* Depending on the given can_bittiming parameter structure the CAN
+ * timing parameters are calculated based on the provided bitrate OR
+ * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
+ * provided directly which are then checked and fixed up.
+ */
+ if (!bt->tq && bt->bitrate && btc)
+ err = can_calc_bittiming(dev, bt, btc);
+ else if (bt->tq && !bt->bitrate && btc)
+ err = can_fixup_bittiming(dev, bt, btc);
+ else if (!bt->tq && bt->bitrate && bitrate_const)
+ err = can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt);
+ else
+ err = -EINVAL;
+
+ return err;
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
new file mode 100644
index 000000000000..d9281ae853f8
--- /dev/null
+++ b/drivers/net/can/dev/dev.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/workqueue.h>
+#include <linux/can.h>
+#include <linux/can/can-ml.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+#include <linux/can/led.h>
+#include <linux/of.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+
+static void can_update_state_error_stats(struct net_device *dev,
+ enum can_state new_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (new_state <= priv->state)
+ return;
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_WARNING:
+ priv->can_stats.error_warning++;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can_stats.error_passive++;
+ break;
+ case CAN_STATE_BUS_OFF:
+ priv->can_stats.bus_off++;
+ break;
+ default:
+ break;
+ }
+}
+
+static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_TX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_TX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_RX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_RX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+const char *can_get_state_str(const enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return "Error Active";
+ case CAN_STATE_ERROR_WARNING:
+ return "Error Warning";
+ case CAN_STATE_ERROR_PASSIVE:
+ return "Error Passive";
+ case CAN_STATE_BUS_OFF:
+ return "Bus Off";
+ case CAN_STATE_STOPPED:
+ return "Stopped";
+ case CAN_STATE_SLEEPING:
+ return "Sleeping";
+ default:
+ return "<unknown>";
+ }
+
+ return "<unknown>";
+}
+EXPORT_SYMBOL_GPL(can_get_state_str);
+
+void can_change_state(struct net_device *dev, struct can_frame *cf,
+ enum can_state tx_state, enum can_state rx_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ enum can_state new_state = max(tx_state, rx_state);
+
+ if (unlikely(new_state == priv->state)) {
+ netdev_warn(dev, "%s: oops, state did not change", __func__);
+ return;
+ }
+
+ netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
+ can_get_state_str(priv->state), priv->state,
+ can_get_state_str(new_state), new_state);
+
+ can_update_state_error_stats(dev, new_state);
+ priv->state = new_state;
+
+ if (!cf)
+ return;
+
+ if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
+ cf->can_id |= CAN_ERR_BUSOFF;
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= tx_state >= rx_state ?
+ can_tx_state_to_frame(dev, tx_state) : 0;
+ cf->data[1] |= tx_state <= rx_state ?
+ can_rx_state_to_frame(dev, rx_state) : 0;
+}
+EXPORT_SYMBOL_GPL(can_change_state);
+
+/* CAN device restart for bus-off recovery */
+static void can_restart(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ int err;
+
+ BUG_ON(netif_carrier_ok(dev));
+
+ /* No synchronization needed because the device is bus-off and
+ * no messages can come in or go out.
+ */
+ can_flush_echo_skb(dev);
+
+ /* send restart message upstream */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (!skb)
+ goto restart;
+
+ cf->can_id |= CAN_ERR_RESTARTED;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->len;
+
+ netif_rx_ni(skb);
+
+restart:
+ netdev_dbg(dev, "restarted\n");
+ priv->can_stats.restarts++;
+
+ /* Now restart the device */
+ err = priv->do_set_mode(dev, CAN_MODE_START);
+
+ netif_carrier_on(dev);
+ if (err)
+ netdev_err(dev, "Error %d during restart", err);
+}
+
+static void can_restart_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct can_priv *priv = container_of(dwork, struct can_priv,
+ restart_work);
+
+ can_restart(priv->dev);
+}
+
+int can_restart_now(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* A manual restart is only permitted if automatic restart is
+ * disabled and the device is in the bus-off state
+ */
+ if (priv->restart_ms)
+ return -EINVAL;
+ if (priv->state != CAN_STATE_BUS_OFF)
+ return -EBUSY;
+
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_restart(dev);
+
+ return 0;
+}
+
+/* CAN bus-off
+ *
+ * This functions should be called when the device goes bus-off to
+ * tell the netif layer that no more packets can be sent or received.
+ * If enabled, a timer is started to trigger bus-off recovery.
+ */
+void can_bus_off(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (priv->restart_ms)
+ netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
+ priv->restart_ms);
+ else
+ netdev_info(dev, "bus-off\n");
+
+ netif_carrier_off(dev);
+
+ if (priv->restart_ms)
+ schedule_delayed_work(&priv->restart_work,
+ msecs_to_jiffies(priv->restart_ms));
+}
+EXPORT_SYMBOL_GPL(can_bus_off);
+
+void can_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_CAN;
+ dev->mtu = CAN_MTU;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+
+ /* New-style flags. */
+ dev->flags = IFF_NOARP;
+ dev->features = NETIF_F_HW_CSUM;
+}
+
+/* Allocate and setup space for the CAN network device */
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs)
+{
+ struct net_device *dev;
+ struct can_priv *priv;
+ int size;
+
+ /* We put the driver's priv, the CAN mid layer priv and the
+ * echo skb into the netdevice's priv. The memory layout for
+ * the netdev_priv is like this:
+ *
+ * +-------------------------+
+ * | driver's priv |
+ * +-------------------------+
+ * | struct can_ml_priv |
+ * +-------------------------+
+ * | array of struct sk_buff |
+ * +-------------------------+
+ */
+
+ size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
+
+ if (echo_skb_max)
+ size = ALIGN(size, sizeof(struct sk_buff *)) +
+ echo_skb_max * sizeof(struct sk_buff *);
+
+ dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
+ txqs, rxqs);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+
+ dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
+
+ if (echo_skb_max) {
+ priv->echo_skb_max = echo_skb_max;
+ priv->echo_skb = (void *)priv +
+ (size - echo_skb_max * sizeof(struct sk_buff *));
+ }
+
+ priv->state = CAN_STATE_STOPPED;
+
+ INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_candev_mqs);
+
+/* Free space of the CAN network device */
+void free_candev(struct net_device *dev)
+{
+ free_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(free_candev);
+
+/* changing MTU and control mode for CAN/CANFD devices */
+int can_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Do not allow changing the MTU while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* allow change of MTU according to the CANFD ability of the device */
+ switch (new_mtu) {
+ case CAN_MTU:
+ /* 'CANFD-only' controllers can not switch to CAN_MTU */
+ if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+ return -EINVAL;
+
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+ break;
+
+ case CANFD_MTU:
+ /* check for potential CANFD ability */
+ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
+ !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+ return -EINVAL;
+
+ priv->ctrlmode |= CAN_CTRLMODE_FD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_change_mtu);
+
+/* Common open function when the device gets opened.
+ *
+ * This function should be called in the open function of the device
+ * driver.
+ */
+int open_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (!priv->bittiming.bitrate) {
+ netdev_err(dev, "bit-timing not yet defined\n");
+ return -EINVAL;
+ }
+
+ /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
+ if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
+ (!priv->data_bittiming.bitrate ||
+ priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+ netdev_err(dev, "incorrect/missing data bit-timing\n");
+ return -EINVAL;
+ }
+
+ /* Switch carrier on if device was stopped while in bus-off state */
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(open_candev);
+
+#ifdef CONFIG_OF
+/* Common function that can be used to understand the limitation of
+ * a transceiver when it provides no means to determine these limitations
+ * at runtime.
+ */
+void of_can_transceiver(struct net_device *dev)
+{
+ struct device_node *dn;
+ struct can_priv *priv = netdev_priv(dev);
+ struct device_node *np = dev->dev.parent->of_node;
+ int ret;
+
+ dn = of_get_child_by_name(np, "can-transceiver");
+ if (!dn)
+ return;
+
+ ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+ of_node_put(dn);
+ if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
+ netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
+}
+EXPORT_SYMBOL_GPL(of_can_transceiver);
+#endif
+
+/* Common close function for cleanup before the device gets closed.
+ *
+ * This function should be called in the close function of the device
+ * driver.
+ */
+void close_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_flush_echo_skb(dev);
+}
+EXPORT_SYMBOL_GPL(close_candev);
+
+/* Register the CAN network device */
+int register_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Ensure termination_const, termination_const_cnt and
+ * do_set_termination consistency. All must be either set or
+ * unset.
+ */
+ if ((!priv->termination_const != !priv->termination_const_cnt) ||
+ (!priv->termination_const != !priv->do_set_termination))
+ return -EINVAL;
+
+ if (!priv->bitrate_const != !priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ return -EINVAL;
+
+ dev->rtnl_link_ops = &can_link_ops;
+ netif_carrier_off(dev);
+
+ return register_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(register_candev);
+
+/* Unregister the CAN network device */
+void unregister_candev(struct net_device *dev)
+{
+ unregister_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_candev);
+
+/* Test if a network device is a candev based device
+ * and return the can_priv* if so.
+ */
+struct can_priv *safe_candev_priv(struct net_device *dev)
+{
+ if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
+ return NULL;
+
+ return netdev_priv(dev);
+}
+EXPORT_SYMBOL_GPL(safe_candev_priv);
+
+static __init int can_dev_init(void)
+{
+ int err;
+
+ can_led_notifier_init();
+
+ err = can_netlink_register();
+ if (!err)
+ pr_info(MOD_DESC "\n");
+
+ return err;
+}
+module_init(can_dev_init);
+
+static __exit void can_dev_exit(void)
+{
+ can_netlink_unregister();
+
+ can_led_notifier_exit();
+}
+module_exit(can_dev_exit);
+
+MODULE_ALIAS_RTNL_LINK("can");
diff --git a/drivers/net/can/dev/length.c b/drivers/net/can/dev/length.c
new file mode 100644
index 000000000000..b48140b1102e
--- /dev/null
+++ b/drivers/net/can/dev/length.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2012, 2020 Oliver Hartkopp <socketcan@hartkopp.net>
+ */
+
+#include <linux/can/dev.h>
+
+/* CAN DLC to real data length conversion helpers */
+
+static const u8 dlc2len[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 12, 16, 20, 24, 32, 48, 64
+};
+
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc)
+{
+ return dlc2len[dlc & 0x0F];
+}
+EXPORT_SYMBOL_GPL(can_fd_dlc2len);
+
+static const u8 len2dlc[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
+ 9, 9, 9, 9, /* 9 - 12 */
+ 10, 10, 10, 10, /* 13 - 16 */
+ 11, 11, 11, 11, /* 17 - 20 */
+ 12, 12, 12, 12, /* 21 - 24 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
+ 15, 15, 15, 15, 15, 15, 15, 15 /* 57 - 64 */
+};
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_fd_len2dlc(u8 len)
+{
+ /* check for length mapping table size at build time */
+ BUILD_BUG_ON(ARRAY_SIZE(len2dlc) != CANFD_MAX_DLEN + 1);
+
+ if (unlikely(len > CANFD_MAX_DLEN))
+ return CANFD_MAX_DLC;
+
+ return len2dlc[len];
+}
+EXPORT_SYMBOL_GPL(can_fd_len2dlc);
+
+/**
+ * can_skb_get_frame_len() - Calculate the CAN Frame length in bytes
+ * of a given skb.
+ * @skb: socket buffer of a CAN message.
+ *
+ * Do a rough calculation: bit stuffing is ignored and length in bits
+ * is rounded up to a length in bytes.
+ *
+ * Rationale: this function is to be used for the BQL functions
+ * (netdev_sent_queue() and netdev_completed_queue()) which expect a
+ * value in bytes. Just using skb->len is insufficient because it will
+ * return the constant value of CAN(FD)_MTU. Doing the bit stuffing
+ * calculation would be too expensive in term of computing resources
+ * for no noticeable gain.
+ *
+ * Remarks: The payload of CAN FD frames with BRS flag are sent at a
+ * different bitrate. Currently, the can-utils canbusload tool does
+ * not support CAN-FD yet and so we could not run any benchmark to
+ * measure the impact. There might be possible improvement here.
+ *
+ * Return: length in bytes.
+ */
+unsigned int can_skb_get_frame_len(const struct sk_buff *skb)
+{
+ const struct canfd_frame *cf = (const struct canfd_frame *)skb->data;
+ u8 len;
+
+ if (can_is_canfd_skb(skb))
+ len = canfd_sanitize_len(cf->len);
+ else if (cf->can_id & CAN_RTR_FLAG)
+ len = 0;
+ else
+ len = cf->len;
+
+ if (can_is_canfd_skb(skb)) {
+ if (cf->can_id & CAN_EFF_FLAG)
+ len += CANFD_FRAME_OVERHEAD_EFF;
+ else
+ len += CANFD_FRAME_OVERHEAD_SFF;
+ } else {
+ if (cf->can_id & CAN_EFF_FLAG)
+ len += CAN_FRAME_OVERHEAD_EFF;
+ else
+ len += CAN_FRAME_OVERHEAD_SFF;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_skb_get_frame_len);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
new file mode 100644
index 000000000000..867f6be31230
--- /dev/null
+++ b/drivers/net/can/dev/netlink.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can/dev.h>
+#include <net/rtnetlink.h>
+
+static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+ [IFLA_CAN_STATE] = { .type = NLA_U32 },
+ [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
+ [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
+ [IFLA_CAN_RESTART] = { .type = NLA_U32 },
+ [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
+ [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
+ [IFLA_CAN_DATA_BITTIMING]
+ = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_DATA_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
+};
+
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ bool is_can_fd = false;
+
+ /* Make sure that valid CAN FD configurations always consist of
+ * - nominal/arbitration bittiming
+ * - data bittiming
+ * - control mode with CAN_CTRLMODE_FD set
+ */
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+ }
+
+ if (is_can_fd) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+ return -EOPNOTSUPP;
+ }
+
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* We need synchronization with dev->stop() */
+ ASSERT_RTNL();
+
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->bittiming_const && !priv->do_set_bittiming)
+ return -EOPNOTSUPP;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_get_bittiming(dev, &bt,
+ priv->bittiming_const,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->bittiming, &bt, sizeof(bt));
+
+ if (priv->do_set_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic;
+ u32 maskedflags;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = priv->ctrlmode_static;
+ maskedflags = cm->flags & cm->mask;
+
+ /* check whether provided bits are allowed to be passed */
+ if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
+ return -EOPNOTSUPP;
+
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+
+ /* make sure static options are provided by configuration */
+ if ((maskedflags & ctrlstatic) != ctrlstatic)
+ return -EOPNOTSUPP;
+
+ /* clear bits to be modified and copy the flag values */
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= maskedflags;
+
+ /* CAN_CTRLMODE_FD can only be set when driver supports FD */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
+ else
+ dev->mtu = CAN_MTU;
+ }
+
+ if (data[IFLA_CAN_RESTART_MS]) {
+ /* Do not allow changing restart delay while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ }
+
+ if (data[IFLA_CAN_RESTART]) {
+ /* Do not allow a restart while not running */
+ if (!(dev->flags & IFF_UP))
+ return -EINVAL;
+ err = can_restart_now(dev);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ struct can_bittiming dbt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * data_bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+ return -EOPNOTSUPP;
+
+ memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
+ sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt,
+ priv->data_bittiming_const,
+ priv->data_bitrate_const,
+ priv->data_bitrate_const_cnt);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+
+ if (priv->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_data_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data[IFLA_CAN_TERMINATION]) {
+ const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
+ const unsigned int num_term = priv->termination_const_cnt;
+ unsigned int i;
+
+ if (!priv->do_set_termination)
+ return -EOPNOTSUPP;
+
+ /* check whether given value is supported by the interface */
+ for (i = 0; i < num_term; i++) {
+ if (termval == priv->termination_const[i])
+ break;
+ }
+ if (i >= num_term)
+ return -EINVAL;
+
+ /* Finally, set the termination value */
+ err = priv->do_set_termination(dev, termval);
+ if (err)
+ return err;
+
+ priv->termination = termval;
+ }
+
+ return 0;
+}
+
+static size_t can_get_size(const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ size_t size = 0;
+
+ if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
+ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+ size += nla_total_size(sizeof(struct can_berr_counter));
+ if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
+ if (priv->termination_const) {
+ size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
+ size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
+ priv->termination_const_cnt);
+ }
+ if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt);
+ if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt);
+ size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
+
+ return size;
+}
+
+static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+ struct can_berr_counter bec = { };
+ enum can_state state = priv->state;
+
+ if (priv->do_get_state)
+ priv->do_get_state(dev, &state);
+
+ if ((priv->bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_BITTIMING,
+ sizeof(priv->bittiming), &priv->bittiming)) ||
+
+ (priv->bittiming_const &&
+ nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+ sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+
+ nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
+ nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+ nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+ nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+
+ (priv->do_get_berr_counter &&
+ !priv->do_get_berr_counter(dev, &bec) &&
+ nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+
+ (priv->data_bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING,
+ sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+
+ (priv->data_bittiming_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ sizeof(*priv->data_bittiming_const),
+ priv->data_bittiming_const)) ||
+
+ (priv->termination_const &&
+ (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
+ nla_put(skb, IFLA_CAN_TERMINATION_CONST,
+ sizeof(*priv->termination_const) *
+ priv->termination_const_cnt,
+ priv->termination_const))) ||
+
+ (priv->bitrate_const &&
+ nla_put(skb, IFLA_CAN_BITRATE_CONST,
+ sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt,
+ priv->bitrate_const)) ||
+
+ (priv->data_bitrate_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt,
+ priv->data_bitrate_const)) ||
+
+ (nla_put(skb, IFLA_CAN_BITRATE_MAX,
+ sizeof(priv->bitrate_max),
+ &priv->bitrate_max))
+ )
+
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static size_t can_get_xstats_size(const struct net_device *dev)
+{
+ return sizeof(struct can_device_stats);
+}
+
+static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (nla_put(skb, IFLA_INFO_XSTATS,
+ sizeof(priv->can_stats), &priv->can_stats))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int can_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+}
+
+struct rtnl_link_ops can_link_ops __read_mostly = {
+ .kind = "can",
+ .maxtype = IFLA_CAN_MAX,
+ .policy = can_policy,
+ .setup = can_setup,
+ .validate = can_validate,
+ .newlink = can_newlink,
+ .changelink = can_changelink,
+ .dellink = can_dellink,
+ .get_size = can_get_size,
+ .fill_info = can_fill_info,
+ .get_xstats_size = can_get_xstats_size,
+ .fill_xstats = can_fill_xstats,
+};
+
+int can_netlink_register(void)
+{
+ return rtnl_link_register(&can_link_ops);
+}
+
+void can_netlink_unregister(void)
+{
+ rtnl_link_unregister(&can_link_ops);
+}
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 3c1912c0430b..ab2c1543786c 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -263,7 +263,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp)
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr)
{
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
@@ -271,7 +272,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
u8 len;
int err;
- skb = __can_get_echo_skb(dev, idx, &len);
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
return 0;
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
new file mode 100644
index 000000000000..6a64fe410987
--- /dev/null
+++ b/drivers/net/can/dev/skb.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can/dev.h>
+
+/* Local echo of CAN messages
+ *
+ * CAN network devices *should* support a local echo functionality
+ * (see Documentation/networking/can.rst). To test the handling of CAN
+ * interfaces that do not support the local echo both driver types are
+ * implemented. In the case that the driver does not support the echo
+ * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
+ * to perform the echo as a fallback solution.
+ */
+void can_flush_echo_skb(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ int i;
+
+ for (i = 0; i < priv->echo_skb_max; i++) {
+ if (priv->echo_skb[i]) {
+ kfree_skb(priv->echo_skb[i]);
+ priv->echo_skb[i] = NULL;
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+ }
+ }
+}
+
+/* Put the skb on the stack to be looped backed locally lateron
+ *
+ * The function is typically called in the start_xmit function
+ * of the device driver. The driver must protect access to
+ * priv->echo_skb, if necessary.
+ */
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx, unsigned int frame_len)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ BUG_ON(idx >= priv->echo_skb_max);
+
+ /* check flag whether this packet has to be looped back */
+ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
+ (skb->protocol != htons(ETH_P_CAN) &&
+ skb->protocol != htons(ETH_P_CANFD))) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (!priv->echo_skb[idx]) {
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return -ENOMEM;
+
+ /* make settings for echo to reduce code in irq context */
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->dev = dev;
+
+ /* save frame_len to reuse it when transmission is completed */
+ can_skb_prv(skb)->frame_len = frame_len;
+
+ skb_tx_timestamp(skb);
+
+ /* save this skb for tx interrupt echo handling */
+ priv->echo_skb[idx] = skb;
+ } else {
+ /* locking problem with netif_stop_queue() ?? */
+ netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
+ kfree_skb(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_put_echo_skb);
+
+struct sk_buff *
+__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
+ unsigned int *frame_len_ptr)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return NULL;
+ }
+
+ if (priv->echo_skb[idx]) {
+ /* Using "struct canfd_frame::len" for the frame
+ * length is supported on both CAN and CANFD frames.
+ */
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+
+ /* get the real payload length for netdev statistics */
+ if (cf->can_id & CAN_RTR_FLAG)
+ *len_ptr = 0;
+ else
+ *len_ptr = cf->len;
+
+ if (frame_len_ptr)
+ *frame_len_ptr = can_skb_priv->frame_len;
+
+ priv->echo_skb[idx] = NULL;
+
+ return skb;
+ }
+
+ return NULL;
+}
+
+/* Get the skb from the stack and loop it back locally
+ *
+ * The function is typically called when the TX done interrupt
+ * is handled in the device driver. The driver must protect
+ * access to priv->echo_skb, if necessary.
+ */
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr)
+{
+ struct sk_buff *skb;
+ u8 len;
+
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
+ if (!skb)
+ return 0;
+
+ skb_get(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS)
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_get_echo_skb);
+
+/* Remove the skb from the stack and free it.
+ *
+ * The function is typically called when TX failed.
+ */
+void can_free_echo_skb(struct net_device *dev, unsigned int idx)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ BUG_ON(idx >= priv->echo_skb_max);
+
+ if (priv->echo_skb[idx]) {
+ dev_kfree_skb_any(priv->echo_skb[idx]);
+ priv->echo_skb[idx] = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(can_free_echo_skb);
+
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct can_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ *cf = skb_put_zero(skb, sizeof(struct can_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_skb);
+
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct canfd_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CANFD);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+
+struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_can_skb(dev, cf);
+ if (unlikely(!skb))
+ return NULL;
+
+ (*cf)->can_id = CAN_ERR_FLAG;
+ (*cf)->len = CAN_ERR_DLC;
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_err_skb);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 038fe1036df2..971ada36e37f 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -9,6 +9,7 @@
//
// Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/bitfield.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -17,6 +18,7 @@
#include <linux/can/rx-offload.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
@@ -242,6 +244,8 @@
#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
/* support memory detection and correction */
#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
+/* Setup stop mode with SCU firmware to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
/* Structure of the message buffer */
struct flexcan_mb {
@@ -347,6 +351,7 @@ struct flexcan_priv {
u8 mb_count;
u8 mb_size;
u8 clk_src; /* clock source of CAN Protocol Engine */
+ u8 scu_idx;
u64 rx_mask;
u64 tx_mask;
@@ -358,6 +363,9 @@ struct flexcan_priv {
struct regulator *reg_xceiver;
struct flexcan_stop_mode stm;
+ /* IPC handle when setup stop mode by System Controller firmware(scfw) */
+ struct imx_sc_ipc *sc_ipc_handle;
+
/* Read and Write APIs */
u32 (*read)(void __iomem *addr);
void (*write)(u32 val, void __iomem *addr);
@@ -387,7 +395,7 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPORT_FD,
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW,
};
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
@@ -546,18 +554,42 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
priv->write(reg_mcr, &regs->mcr);
}
+static int flexcan_stop_mode_enable_scfw(struct flexcan_priv *priv, bool enabled)
+{
+ u8 idx = priv->scu_idx;
+ u32 rsrc_id, val;
+
+ rsrc_id = IMX_SC_R_CAN(idx);
+
+ if (enabled)
+ val = 1;
+ else
+ val = 0;
+
+ /* stop mode request via scu firmware */
+ return imx_sc_misc_set_control(priv->sc_ipc_handle, rsrc_id,
+ IMX_SC_C_IPG_STOP, val);
+}
+
static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr;
+ int ret;
reg_mcr = priv->read(&regs->mcr);
reg_mcr |= FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
/* enable stop request */
- regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
- 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ ret = flexcan_stop_mode_enable_scfw(priv, true);
+ if (ret < 0)
+ return ret;
+ } else {
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ }
return flexcan_low_power_enter_ack(priv);
}
@@ -566,10 +598,17 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr;
+ int ret;
/* remove stop request */
- regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
- 1 << priv->stm.req_bit, 0);
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ ret = flexcan_stop_mode_enable_scfw(priv, false);
+ if (ret < 0)
+ return ret;
+ } else {
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 0);
+ }
reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
@@ -776,7 +815,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]);
}
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
priv->write(can_id, &priv->tx_mb->can_id);
priv->write(ctrl, &priv->tx_mb->can_ctrl);
@@ -1083,8 +1122,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl);
handled = IRQ_HANDLED;
- stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
- 0, reg_ctrl << 16);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&priv->offload, 0,
+ reg_ctrl << 16, NULL);
stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
@@ -1867,7 +1907,7 @@ static void unregister_flexcandev(struct net_device *dev)
unregister_candev(dev);
}
-static int flexcan_setup_stop_mode(struct platform_device *pdev)
+static int flexcan_setup_stop_mode_gpr(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
@@ -1912,11 +1952,6 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
"gpr %s req_gpr=0x02%x req_bit=%u\n",
gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit);
- device_set_wakeup_capable(&pdev->dev, true);
-
- if (of_property_read_bool(np, "wakeup-source"))
- device_set_wakeup_enable(&pdev->dev, true);
-
return 0;
out_put_node:
@@ -1924,6 +1959,58 @@ out_put_node:
return ret;
}
+static int flexcan_setup_stop_mode_scfw(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv;
+ u8 scu_idx;
+ int ret;
+
+ ret = of_property_read_u8(pdev->dev.of_node, "fsl,scu-index", &scu_idx);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "failed to get scu index\n");
+ return ret;
+ }
+
+ priv = netdev_priv(dev);
+ priv->scu_idx = scu_idx;
+
+ /* this function could be deferred probe, return -EPROBE_DEFER */
+ return imx_scu_get_handle(&priv->sc_ipc_handle);
+}
+
+/* flexcan_setup_stop_mode - Setup stop mode for wakeup
+ *
+ * Return: = 0 setup stop mode successfully or doesn't support this feature
+ * < 0 fail to setup stop mode (could be deferred probe)
+ */
+static int flexcan_setup_stop_mode(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv;
+ int ret;
+
+ priv = netdev_priv(dev);
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW)
+ ret = flexcan_setup_stop_mode_scfw(pdev);
+ else if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
+ ret = flexcan_setup_stop_mode_gpr(pdev);
+ else
+ /* return 0 directly if doesn't support stop mode feature */
+ return 0;
+
+ if (ret)
+ return ret;
+
+ device_set_wakeup_capable(&pdev->dev, true);
+
+ if (of_property_read_bool(pdev->dev.of_node, "wakeup-source"))
+ device_set_wakeup_enable(&pdev->dev, true);
+
+ return 0;
+}
+
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
{ .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
@@ -2054,17 +2141,20 @@ static int flexcan_probe(struct platform_device *pdev)
goto failed_register;
}
+ err = flexcan_setup_stop_mode(pdev);
+ if (err < 0) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "setup stop mode failed\n");
+ goto failed_setup_stop_mode;
+ }
+
of_can_transceiver(dev);
devm_can_led_init(dev);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
- err = flexcan_setup_stop_mode(pdev);
- if (err)
- dev_dbg(&pdev->dev, "failed to setup stop-mode\n");
- }
-
return 0;
+ failed_setup_stop_mode:
+ unregister_flexcandev(dev);
failed_register:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index f5d94a692576..4a8453290530 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -517,7 +517,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
stats->tx_packets++;
stats->tx_bytes += priv->txdlc[i];
priv->txdlc[i] = 0;
- can_get_echo_skb(dev, i);
+ can_get_echo_skb(dev, i, NULL);
} else {
/* For cleanup of untransmitted messages */
can_free_echo_skb(dev, i);
@@ -1448,7 +1448,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
* taken.
*/
priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */
- can_put_echo_skb(skb, dev, slotindex);
+ can_put_echo_skb(skb, dev, slotindex, 0);
/* Make sure everything is written before allowing hardware to
* read from the memory
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 86b0e1406a21..5bb957a26bc6 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -629,7 +629,7 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
/* TX IRQ */
if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
- stats->tx_bytes += can_get_echo_skb(ndev, 0);
+ stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
}
@@ -922,7 +922,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
writel(0, priv->base + IFI_CANFD_TXFIFO_REPEATCOUNT);
writel(0, priv->base + IFI_CANFD_TXFIFO_SUSPEND_US);
- can_put_echo_skb(skb, ndev, 0);
+ can_put_echo_skb(skb, ndev, 0, 0);
/* Start the transmission */
writel(IFI_CANFD_TXSTCMD_ADD_MSG, priv->base + IFI_CANFD_TXSTCMD);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 969cedb9b0b6..37e05010ca91 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -778,7 +778,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, netdev, can->echo_idx);
+ can_put_echo_skb(skb, netdev, can->echo_idx, 0);
/* Move echo index to the next slot */
can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
@@ -1467,7 +1467,7 @@ static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
} else {
int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
- int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
struct net_device_stats *stats = &can->can.dev->stats;
stats->tx_bytes += dlc;
@@ -1533,7 +1533,7 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
- int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
u8 count = ioread32(can->reg_base +
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile
index ef7963ff2006..d717bbc9e033 100644
--- a/drivers/net/can/m_can/Makefile
+++ b/drivers/net/can/m_can/Makefile
@@ -7,3 +7,7 @@ obj-$(CONFIG_CAN_M_CAN) += m_can.o
obj-$(CONFIG_CAN_M_CAN_PCI) += m_can_pci.o
obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o
obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o
+
+tcan4x5x-objs :=
+tcan4x5x-objs += tcan4x5x-core.o
+tcan4x5x-objs += tcan4x5x-regmap.o
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2c9f12401276..3752520a7d4b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -930,7 +930,7 @@ static void m_can_echo_tx_event(struct net_device *dev)
(fgi << TXEFA_EFAI_SHIFT)));
/* update stats */
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
stats->tx_packets++;
}
}
@@ -972,7 +972,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (cdev->version == 30) {
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
- stats->tx_bytes += can_get_echo_skb(dev, 0);
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
netif_wake_queue(dev);
@@ -1483,7 +1483,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
M_CAN_FIFO_DATA(i / 4),
*(u32 *)(cf->data + i));
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
@@ -1554,7 +1554,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx);
+ can_put_echo_skb(skb, dev, putidx, 0);
/* Enable TX FIFO element to start transfer */
m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
@@ -1852,8 +1852,6 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
unregister_candev(cdev->net);
-
- m_can_clk_stop(cdev);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 24c737c4fc44..b7caec769ddb 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -2,15 +2,8 @@
// SPI to CAN driver for the Texas Instruments TCAN4x5x
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
+#include "tcan4x5x.h"
-#include <linux/regulator/consumer.h>
-#include <linux/gpio/consumer.h>
-
-#include "m_can.h"
-
-#define DEVICE_NAME "tcan4x5x"
#define TCAN4X5X_EXT_CLK_DEF 40000000
#define TCAN4X5X_DEV_ID0 0x00
@@ -88,14 +81,10 @@
#define TCAN4X5X_MRAM_START 0x8000
#define TCAN4X5X_MCAN_OFFSET 0x1000
-#define TCAN4X5X_MAX_REGISTER 0x8fff
#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff
#define TCAN4X5X_SET_ALL_INT 0xffffffff
-#define TCAN4X5X_WRITE_CMD (0x61 << 24)
-#define TCAN4X5X_READ_CMD (0x41 << 24)
-
#define TCAN4X5X_MODE_SEL_MASK (BIT(7) | BIT(6))
#define TCAN4X5X_MODE_SLEEP 0x00
#define TCAN4X5X_MODE_STANDBY BIT(6)
@@ -113,48 +102,12 @@
#define TCAN4X5X_WD_3_S_TIMER BIT(29)
#define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29))
-struct tcan4x5x_priv {
- struct m_can_classdev cdev;
-
- struct regmap *regmap;
- struct spi_device *spi;
-
- struct gpio_desc *reset_gpio;
- struct gpio_desc *device_wake_gpio;
- struct gpio_desc *device_state_gpio;
- struct regulator *power;
-};
-
static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
{
return container_of(cdev, struct tcan4x5x_priv, cdev);
}
-static struct can_bittiming_const tcan4x5x_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 2,
- .tseg1_max = 31,
- .tseg2_min = 2,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 1,
- .tseg1_max = 32,
- .tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
{
int wake_state = 0;
@@ -191,72 +144,6 @@ static int tcan4x5x_reset(struct tcan4x5x_priv *priv)
return ret;
}
-static int regmap_spi_gather_write(void *context, const void *reg,
- size_t reg_len, const void *val,
- size_t val_len)
-{
- struct device *dev = context;
- struct spi_device *spi = to_spi_device(dev);
- struct spi_message m;
- u32 addr;
- struct spi_transfer t[2] = {
- { .tx_buf = &addr, .len = reg_len, .cs_change = 0,},
- { .tx_buf = val, .len = val_len, },
- };
-
- addr = TCAN4X5X_WRITE_CMD | (*((u16 *)reg) << 8) | val_len >> 2;
-
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
- return spi_sync(spi, &m);
-}
-
-static int tcan4x5x_regmap_write(void *context, const void *data, size_t count)
-{
- u16 *reg = (u16 *)(data);
- const u32 *val = data + 4;
-
- return regmap_spi_gather_write(context, reg, 4, val, count - 4);
-}
-
-static int regmap_spi_async_write(void *context,
- const void *reg, size_t reg_len,
- const void *val, size_t val_len,
- struct regmap_async *a)
-{
- return -ENOTSUPP;
-}
-
-static struct regmap_async *regmap_spi_async_alloc(void)
-{
- return NULL;
-}
-
-static int tcan4x5x_regmap_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct device *dev = context;
- struct spi_device *spi = to_spi_device(dev);
-
- u32 addr = TCAN4X5X_READ_CMD | (*((u16 *)reg) << 8) | val_size >> 2;
-
- return spi_write_then_read(spi, &addr, reg_size, (u32 *)val, val_size);
-}
-
-static struct regmap_bus tcan4x5x_bus = {
- .write = tcan4x5x_regmap_write,
- .gather_write = regmap_spi_gather_write,
- .async_write = regmap_spi_async_write,
- .async_alloc = regmap_spi_async_alloc,
- .read = tcan4x5x_regmap_read,
- .read_flag_mask = 0x00,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg)
{
struct tcan4x5x_priv *priv = cdev_to_priv(cdev);
@@ -411,13 +298,6 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev)
return 0;
}
-static const struct regmap_config tcan4x5x_regmap = {
- .reg_bits = 32,
- .val_bits = 32,
- .cache_type = REGCACHE_NONE,
- .max_register = TCAN4X5X_MAX_REGISTER,
-};
-
static struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
.read_reg = tcan4x5x_read_reg,
@@ -469,24 +349,19 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
mcan_class->is_peripheral = true;
- mcan_class->bit_timing = &tcan4x5x_bittiming_const;
- mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
mcan_class->net->irq = spi->irq;
spi_set_drvdata(spi, priv);
/* Configure the SPI bus */
- spi->bits_per_word = 32;
+ spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret)
goto out_m_can_class_free_dev;
- priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
- &spi->dev, &tcan4x5x_regmap);
- if (IS_ERR(priv->regmap)) {
- ret = PTR_ERR(priv->regmap);
+ ret = tcan4x5x_regmap_init(priv);
+ if (ret)
goto out_m_can_class_free_dev;
- }
ret = tcan4x5x_power_enable(priv->power, 1);
if (ret)
@@ -528,23 +403,26 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
}
static const struct of_device_id tcan4x5x_of_match[] = {
- { .compatible = "ti,tcan4x5x", },
- { }
+ {
+ .compatible = "ti,tcan4x5x",
+ }, {
+ /* sentinel */
+ },
};
MODULE_DEVICE_TABLE(of, tcan4x5x_of_match);
static const struct spi_device_id tcan4x5x_id_table[] = {
{
- .name = "tcan4x5x",
- .driver_data = 0,
+ .name = "tcan4x5x",
+ }, {
+ /* sentinel */
},
- { }
};
MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table);
static struct spi_driver tcan4x5x_can_driver = {
.driver = {
- .name = DEVICE_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = tcan4x5x_of_match,
.pm = NULL,
},
diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
new file mode 100644
index 000000000000..ca80dbaf7a3f
--- /dev/null
+++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver
+//
+// Copyright (c) 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2018-2019 Texas Instruments Incorporated
+// http://www.ti.com/
+
+#include "tcan4x5x.h"
+
+#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24)
+#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24)
+
+#define TCAN4X5X_MAX_REGISTER 0x8ffc
+
+static int tcan4x5x_regmap_gather_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+ struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx;
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = buf_tx,
+ .len = sizeof(buf_tx->cmd) + val_len,
+ },
+ };
+
+ memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd.cmd) +
+ sizeof(buf_tx->cmd.addr));
+ tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len);
+ memcpy(buf_tx->data, val, val_len);
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+
+static int tcan4x5x_regmap_write(void *context, const void *data, size_t count)
+{
+ return tcan4x5x_regmap_gather_write(context, data, sizeof(__be32),
+ data + sizeof(__be32),
+ count - sizeof(__be32));
+}
+
+static int tcan4x5x_regmap_read(void *context,
+ const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+ struct tcan4x5x_map_buf *buf_rx = &priv->map_buf_rx;
+ struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx;
+ struct spi_transfer xfer[2] = {
+ {
+ .tx_buf = buf_tx,
+ }
+ };
+ struct spi_message msg;
+ int err;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ memcpy(&buf_tx->cmd, reg_buf, sizeof(buf_tx->cmd.cmd) +
+ sizeof(buf_tx->cmd.addr));
+ tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len);
+
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = val_buf;
+ xfer[1].len = val_len;
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + val_len;
+
+ if (TCAN4X5X_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, val_len);
+ }
+
+ err = spi_sync(spi, &msg);
+ if (err)
+ return err;
+
+ if (!(spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX))
+ memcpy(val_buf, buf_rx->data, val_len);
+
+ return 0;
+}
+
+static const struct regmap_range tcan4x5x_reg_table_yes_range[] = {
+ regmap_reg_range(0x0000, 0x002c), /* Device ID and SPI Registers */
+ regmap_reg_range(0x0800, 0x083c), /* Device configuration registers and Interrupt Flags*/
+ regmap_reg_range(0x1000, 0x10fc), /* M_CAN */
+ regmap_reg_range(0x8000, 0x87fc), /* MRAM */
+};
+
+static const struct regmap_access_table tcan4x5x_reg_table = {
+ .yes_ranges = tcan4x5x_reg_table_yes_range,
+ .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_yes_range),
+};
+
+static const struct regmap_config tcan4x5x_regmap = {
+ .reg_bits = 24,
+ .reg_stride = 4,
+ .pad_bits = 8,
+ .val_bits = 32,
+ .wr_table = &tcan4x5x_reg_table,
+ .rd_table = &tcan4x5x_reg_table,
+ .max_register = TCAN4X5X_MAX_REGISTER,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = (__force unsigned long)
+ cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_READ),
+ .write_flag_mask = (__force unsigned long)
+ cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_WRITE),
+};
+
+static const struct regmap_bus tcan4x5x_bus = {
+ .write = tcan4x5x_regmap_write,
+ .gather_write = tcan4x5x_regmap_gather_write,
+ .read = tcan4x5x_regmap_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+ .max_raw_read = 256,
+ .max_raw_write = 256,
+};
+
+int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv)
+{
+ priv->regmap = devm_regmap_init(&priv->spi->dev, &tcan4x5x_bus,
+ priv->spi, &tcan4x5x_regmap);
+ return PTR_ERR_OR_ZERO(priv->regmap);
+}
diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
new file mode 100644
index 000000000000..c66da829b795
--- /dev/null
+++ b/drivers/net/can/m_can/tcan4x5x.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver
+ *
+ * Copyright (c) 2020 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _TCAN4X5X_H
+#define _TCAN4X5X_H
+
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include "m_can.h"
+
+#define TCAN4X5X_SANITIZE_SPI 1
+
+struct __packed tcan4x5x_buf_cmd {
+ u8 cmd;
+ __be16 addr;
+ u8 len;
+};
+
+struct tcan4x5x_map_buf {
+ struct tcan4x5x_buf_cmd cmd;
+ u8 data[256 * sizeof(u32)];
+} ____cacheline_aligned;
+
+struct tcan4x5x_priv {
+ struct m_can_classdev cdev;
+
+ struct regmap *regmap;
+ struct spi_device *spi;
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *device_wake_gpio;
+ struct gpio_desc *device_state_gpio;
+ struct regulator *power;
+
+ struct tcan4x5x_map_buf map_buf_rx;
+ struct tcan4x5x_map_buf map_buf_tx;
+};
+
+static inline void
+tcan4x5x_spi_cmd_set_len(struct tcan4x5x_buf_cmd *cmd, u8 len)
+{
+ /* number of u32 */
+ cmd->len = len >> 2;
+}
+
+int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv);
+
+#endif
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 5ed00a1558e1..fa32e418eb29 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -270,7 +270,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
- can_put_echo_skb(skb, dev, buf_id);
+ can_put_echo_skb(skb, dev, buf_id, 0);
/* Enable interrupt. */
priv->tx_active |= 1 << buf_id;
@@ -448,7 +448,7 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
out_8(&regs->cantbsel, mask);
stats->tx_bytes += in_8(&regs->tx.dlr);
stats->tx_packets++;
- can_get_echo_skb(dev, entry->id);
+ can_get_echo_skb(dev, entry->id, NULL);
priv->tx_active &= ~mask;
list_del(pos);
}
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 4f9e7ec192aa..92a54a5fd4c5 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -711,7 +711,7 @@ static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
struct net_device_stats *stats = &(priv->ndev->stats);
u32 dlc;
- can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
+ can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1, NULL);
iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
&priv->regs->ifregs[1].cmask);
pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
@@ -924,7 +924,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
&priv->regs->ifregs[1].data[i / 2]);
}
- can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
+ can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1, 0);
/* Set the size of the data. Update if2_mcont */
iowrite32(cf->len | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index c5334b0c3038..00847cbaf7b6 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -266,7 +266,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
- can_get_echo_skb(priv->ndev, msg->client);
+ can_get_echo_skb(priv->ndev, msg->client, NULL);
/* count bytes of the echo instead of skb */
stats->tx_bytes += cf_len;
@@ -716,7 +716,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&priv->echo_lock, flags);
/* prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, ndev, priv->echo_idx);
+ can_put_echo_skb(skb, ndev, priv->echo_idx, 0);
/* move echo index to the next slot */
priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max;
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
index 8d36101b78e3..29cabc20109e 100644
--- a/drivers/net/can/rcar/Kconfig
+++ b/drivers/net/can/rcar/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
config CAN_RCAR
- tristate "Renesas R-Car CAN controller"
+ tristate "Renesas R-Car and RZ/G CAN controller"
depends on ARCH_RENESAS || ARM
help
Say Y here if you want to use CAN controller found on Renesas R-Car
- SoCs.
+ or RZ/G SoCs.
To compile this driver as a module, choose M here: the module will
be called rcar_can.
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index c803327f8f79..4870c4ea190a 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -386,7 +386,7 @@ static void rcar_can_tx_done(struct net_device *ndev)
stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
RCAR_CAN_FIFO_DEPTH];
priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
- can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
+ can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH, NULL);
priv->tx_tail++;
netif_wake_queue(ndev);
}
@@ -617,7 +617,7 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len;
- can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0);
priv->tx_head++;
/* Start Tx: write 0xff to the TFPCR register to increment
* the CPU-side pointer for the transmit FIFO to the next
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 2778ed5c61d1..d8d233e62990 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1044,7 +1044,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
stats->tx_packets++;
stats->tx_bytes += priv->tx_len[sent];
priv->tx_len[sent] = 0;
- can_get_echo_skb(ndev, sent);
+ can_get_echo_skb(ndev, sent, NULL);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_tail++;
@@ -1390,7 +1390,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
}
priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
- can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH);
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_head++;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index b6a7003c51d2..9e86488ba55f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -318,7 +318,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
for (i = 0; i < cf->len; i++)
priv->write_reg(priv, dreg++, cf->data[i]);
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
cmd_reg_val |= CMD_AT;
@@ -531,7 +531,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
stats->tx_bytes +=
priv->read_reg(priv, SJA1000_FI) & 0xf;
stats->tx_packets++;
- can_get_echo_skb(dev, 0);
+ can_get_echo_skb(dev, 0, NULL);
}
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 40070c930202..c44f3411e561 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -104,7 +104,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
card->tx.last_bus = priv->index;
++card->tx.pending;
++priv->tx.pending;
- can_put_echo_skb(skb, dev, priv->tx.echo_put);
+ can_put_echo_skb(skb, dev, priv->tx.echo_put, 0);
++priv->tx.echo_put;
if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
priv->tx.echo_put = 0;
@@ -284,7 +284,7 @@ static int softing_handle_1(struct softing *card)
skb = priv->can.echo_skb[priv->tx.echo_get];
if (skb)
skb->tstamp = ktime;
- can_get_echo_skb(netdev, priv->tx.echo_get);
+ can_get_echo_skb(netdev, priv->tx.echo_get, NULL);
++priv->tx.echo_get;
if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
priv->tx.echo_get = 0;
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index f9455de94786..c3e020c90111 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -586,7 +586,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws)
frame = (struct can_frame *)priv->tx_skb->data;
hi3110_hw_tx(spi, frame);
priv->tx_len = 1 + frame->len;
- can_put_echo_skb(priv->tx_skb, net, 0);
+ can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
}
@@ -725,7 +725,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_len) {
- can_get_echo_skb(net, 0);
+ can_get_echo_skb(net, 0, NULL);
priv->tx_len = 0;
}
netif_wake_queue(net);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 25859d16d06f..f69fb4238a65 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1002,7 +1002,7 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
frame->len = CAN_FRAME_MAX_DATA_LEN;
mcp251x_hw_tx(spi, frame, 0);
priv->tx_len = 1 + frame->len;
- can_put_echo_skb(priv->tx_skb, net, 0);
+ can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
}
@@ -1171,7 +1171,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_len) {
- can_get_echo_skb(net, 0);
+ can_get_echo_skb(net, 0, NULL);
priv->tx_len = 0;
}
netif_wake_queue(net);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 77129d5f410b..3638b474d86b 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -335,6 +335,8 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
u8 len;
int i, j;
+ netdev_reset_queue(priv->ndev);
+
/* TEF */
tef_ring = priv->tef;
tef_ring->head = 0;
@@ -1249,7 +1251,8 @@ mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
static int
mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
- const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
+ const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ unsigned int *frame_len_ptr)
{
struct net_device_stats *stats = &priv->ndev->stats;
u32 seq, seq_masked, tef_tail_masked;
@@ -1271,7 +1274,8 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
stats->tx_bytes +=
can_rx_offload_get_echo_skb(&priv->offload,
mcp251xfd_get_tef_tail(priv),
- hw_tef_obj->ts);
+ hw_tef_obj->ts,
+ frame_len_ptr);
stats->tx_packets++;
priv->tef->tail++;
@@ -1308,6 +1312,7 @@ mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
const u8 offset, const u8 len)
{
const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
(offset > tx_ring->obj_num ||
@@ -1322,12 +1327,13 @@ mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
return regmap_bulk_read(priv->map_rx,
mcp251xfd_get_tef_obj_addr(offset),
hw_tef_obj,
- sizeof(*hw_tef_obj) / sizeof(u32) * len);
+ sizeof(*hw_tef_obj) / val_bytes * len);
}
static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
{
struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
+ unsigned int total_frame_len = 0;
u8 tef_tail, len, l;
int err, i;
@@ -1349,7 +1355,9 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
}
for (i = 0; i < len; i++) {
- err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]);
+ unsigned int frame_len;
+
+ err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
/* -EAGAIN means the Sequence Number in the TEF
* doesn't match our tef_tail. This can happen if we
* read the TEF objects too early. Leave loop let the
@@ -1359,6 +1367,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
goto out_netif_wake_queue;
if (err)
return err;
+
+ total_frame_len += frame_len;
}
out_netif_wake_queue:
@@ -1368,13 +1378,10 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct spi_transfer *last_xfer;
- tx_ring->tail += len;
-
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
- */
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1391,6 +1398,9 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
if (err)
return err;
+ tx_ring->tail += len;
+ netdev_completed_queue(priv->ndev, len, total_frame_len);
+
err = mcp251xfd_check_tef_tail(priv);
if (err)
return err;
@@ -1439,6 +1449,7 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
struct sk_buff *skb)
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ u8 dlc;
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
u32 sid, eid;
@@ -1454,9 +1465,10 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
hw_rx_obj->id);
}
+ dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
+
/* CANFD */
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
- u8 dlc;
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
cfd->flags |= CANFD_ESI;
@@ -1464,17 +1476,17 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
cfd->flags |= CANFD_BRS;
- dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
cfd->len = can_fd_dlc2len(dlc);
} else {
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
cfd->can_id |= CAN_RTR_FLAG;
- cfd->len = can_cc_dlc2len(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
- hw_rx_obj->flags));
+ can_frame_set_cc_len((struct can_frame *)cfd, dlc,
+ priv->can.ctrlmode);
}
- memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+ if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
+ memcpy(cfd->data, hw_rx_obj->data, cfd->len);
}
static int
@@ -1492,7 +1504,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
else
skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
- if (!cfd) {
+ if (!skb) {
stats->rx_dropped++;
return 0;
}
@@ -1511,12 +1523,13 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
const u8 offset, const u8 len)
{
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
int err;
err = regmap_bulk_read(priv->map_rx,
mcp251xfd_get_rx_obj_addr(ring, offset),
hw_rx_obj,
- len * ring->obj_size / sizeof(u32));
+ len * ring->obj_size / val_bytes);
return err;
}
@@ -1553,10 +1566,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
/* Increment the RX FIFO tail pointer 'len' times in a
* single SPI message.
- */
- ring->tail += len;
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1572,6 +1583,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
last_xfer->cs_change = 1;
if (err)
return err;
+
+ ring->tail += len;
}
return 0;
@@ -2138,6 +2151,7 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
{
struct mcp251xfd_priv *priv = dev_id;
+ const int val_bytes = regmap_get_val_bytes(priv->map_reg);
irqreturn_t handled = IRQ_NONE;
int err;
@@ -2163,7 +2177,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
&priv->regs_status,
sizeof(priv->regs_status) /
- sizeof(u32));
+ val_bytes);
if (err)
goto out_fail;
@@ -2301,7 +2315,7 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
union mcp251xfd_tx_obj_load_buf *load_buf;
u8 dlc;
u32 id, flags;
- int offset, len;
+ int len_sanitized = 0, len;
if (cfd->can_id & CAN_EFF_FLAG) {
u32 sid, eid;
@@ -2322,12 +2336,12 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
* harm, only the lower 7 bits will be transferred into the
* TEF object.
*/
- dlc = can_fd_len2dlc(cfd->len);
- flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) |
- FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc);
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
if (cfd->can_id & CAN_RTR_FLAG)
flags |= MCP251XFD_OBJ_FLAGS_RTR;
+ else
+ len_sanitized = canfd_sanitize_len(cfd->len);
/* CANFD */
if (can_is_canfd_skb(skb)) {
@@ -2338,8 +2352,15 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
if (cfd->flags & CANFD_BRS)
flags |= MCP251XFD_OBJ_FLAGS_BRS;
+
+ dlc = can_fd_len2dlc(cfd->len);
+ } else {
+ dlc = can_get_cc_dlc((struct can_frame *)cfd,
+ priv->can.ctrlmode);
}
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc);
+
load_buf = &tx_obj->buf;
if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
hw_tx_obj = &load_buf->crc.hw_tx_obj;
@@ -2349,17 +2370,22 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
put_unaligned_le32(id, &hw_tx_obj->id);
put_unaligned_le32(flags, &hw_tx_obj->flags);
- /* Clear data at end of CAN frame */
- offset = round_down(cfd->len, sizeof(u32));
- len = round_up(can_fd_dlc2len(dlc), sizeof(u32)) - offset;
- if (MCP251XFD_SANITIZE_CAN && len)
- memset(hw_tx_obj->data + offset, 0x0, len);
+ /* Copy data */
memcpy(hw_tx_obj->data, cfd->data, cfd->len);
+ /* Clear unused data at end of CAN frame */
+ if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
+ int pad_len;
+
+ pad_len = len_sanitized - cfd->len;
+ if (pad_len)
+ memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
+ }
+
/* Number of bytes to be written into the RAM of the controller */
len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
if (MCP251XFD_SANITIZE_CAN)
- len += round_up(can_fd_dlc2len(dlc), sizeof(u32));
+ len += round_up(len_sanitized, sizeof(u32));
else
len += round_up(cfd->len, sizeof(u32));
@@ -2419,6 +2445,7 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct mcp251xfd_priv *priv = netdev_priv(ndev);
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct mcp251xfd_tx_obj *tx_obj;
+ unsigned int frame_len;
u8 tx_head;
int err;
@@ -2434,10 +2461,12 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
/* Stop queue if we occupy the complete TX FIFO */
tx_head = mcp251xfd_get_tx_head(tx_ring);
tx_ring->head++;
- if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num)
+ if (mcp251xfd_get_tx_free(tx_ring) == 0)
netif_stop_queue(ndev);
- can_put_echo_skb(skb, ndev, tx_head);
+ frame_len = can_skb_get_frame_len(skb);
+ can_put_echo_skb(skb, ndev, tx_head, frame_len);
+ netdev_sent_queue(priv->ndev, frame_len);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
if (err)
@@ -2886,7 +2915,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
- CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO;
+ CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
+ CAN_CTRLMODE_CC_LEN8_DLC;
priv->ndev = ndev;
priv->spi = spi;
priv->rx_int = rx_int;
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 783b63218b7b..54aa7c25c4de 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -448,7 +448,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d
writel(msg_flag_n, priv->base + SUN4I_REG_BUF0_ADDR);
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
sun4i_can_write_cmdreg(priv, SUN4I_CMD_SELF_RCV_REQ);
@@ -655,7 +655,7 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
readl(priv->base +
SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf;
stats->tx_packets++;
- can_get_echo_skb(dev, 0);
+ can_get_echo_skb(dev, 0, NULL);
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index a6850ff0b55b..73245d8836a9 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -513,7 +513,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
be32_to_cpu(*(__be32 *)(cf->data + 4)));
else
*(u32 *)(cf->data + 4) = 0;
- can_put_echo_skb(skb, ndev, mbxno);
+ can_put_echo_skb(skb, ndev, mbxno, 0);
spin_lock_irqsave(&priv->mbx_lock, flags);
--priv->tx_head;
@@ -757,7 +757,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
stamp = hecc_read_stamp(priv, mbxno);
stats->tx_bytes +=
can_rx_offload_get_echo_skb(&priv->offload,
- mbxno, stamp);
+ mbxno, stamp, NULL);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
--priv->tx_tail;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 25eee4466364..18f40eb20360 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -518,7 +518,7 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += context->dlc;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -801,7 +801,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&dev->active_tx_urbs);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 9eed75a4b678..562acbf454fd 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -357,7 +357,7 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
if (!msg->msg.txdone.status) {
stats->tx_packets++;
stats->tx_bytes += context->len;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
} else {
stats->tx_errors++;
can_free_echo_skb(netdev, context->echo_index);
@@ -783,7 +783,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
usb_anchor_urb(urb, &priv->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&priv->active_tx_jobs);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 0487095e1fd0..a00dc1904415 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -370,7 +370,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- can_get_echo_skb(netdev, hf->echo_id);
+ can_get_echo_skb(netdev, hf->echo_id, NULL);
gs_free_tx_context(txc);
@@ -525,7 +525,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, idx);
+ can_put_echo_skb(skb, netdev, idx, 0);
atomic_inc(&dev->active_tx_urbs);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index e2d58846c40c..2b7efd296758 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -578,7 +578,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
context->priv = priv;
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 480bd2ecb296..dcee8dc828ec 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -1151,7 +1151,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
- can_get_echo_skb(priv->netdev, context->echo_index);
+ can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 98c016ef0607..59ba7c7beec0 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -594,7 +594,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- can_get_echo_skb(priv->netdev, context->echo_index);
+ can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index df54eb7d4b36..1f649d178010 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -237,7 +237,7 @@ static void mcba_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_bytes += ctx->dlc;
can_led_event(netdev, CAN_LED_EVENT_TX);
- can_get_echo_skb(netdev, ctx->ndx);
+ can_get_echo_skb(netdev, ctx->ndx, NULL);
}
if (urb->status)
@@ -355,7 +355,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
usb_msg.dlc |= MCBA_DLC_RTR_MASK;
- can_put_echo_skb(skb, priv->netdev, ctx->ndx);
+ can_put_echo_skb(skb, priv->netdev, ctx->ndx, 0);
err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx);
if (err)
@@ -466,7 +466,7 @@ static void mcba_usb_process_ka_usb(struct mcba_priv *priv,
struct mcba_usb_msg_ka_usb *msg)
{
if (unlikely(priv->usb_ka_first_pass)) {
- netdev_info(priv->netdev, "PIC USB version %hhu.%hhu\n",
+ netdev_info(priv->netdev, "PIC USB version %u.%u\n",
msg->soft_ver_major, msg->soft_ver_minor);
priv->usb_ka_first_pass = false;
@@ -492,7 +492,7 @@ static void mcba_usb_process_ka_can(struct mcba_priv *priv,
struct mcba_usb_msg_ka_can *msg)
{
if (unlikely(priv->can_ka_first_pass)) {
- netdev_info(priv->netdev, "PIC CAN version %hhu.%hhu\n",
+ netdev_info(priv->netdev, "PIC CAN version %u.%u\n",
msg->soft_ver_major, msg->soft_ver_minor);
priv->can_ka_first_pass = false;
@@ -554,7 +554,7 @@ static void mcba_usb_process_rx(struct mcba_priv *priv,
break;
default:
- netdev_warn(priv->netdev, "Unsupported msg (0x%hhX)",
+ netdev_warn(priv->netdev, "Unsupported msg (0x%X)",
msg->cmd_id);
break;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 251835ea15aa..573b11559d73 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -309,7 +309,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
}
/* should always release echo skb and corresponding context */
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
context->echo_index = PCAN_USB_MAX_TX_URBS;
/* do wakeup tx queue in case of success only */
@@ -365,7 +365,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&dev->active_tx_urbs);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 61631f4fd92a..f347ecc79aef 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -514,11 +514,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
else
memcpy(cfd->data, rm->d, cfd->len);
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cfd->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+
return 0;
}
@@ -580,11 +580,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
if (!skb)
return -ENOMEM;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cf->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+
return 0;
}
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 7d92da8954fe..fa403c080871 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -672,7 +672,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
/* update statistics */
up->netdev->stats.tx_packets++;
up->netdev->stats.tx_bytes += dlc;
- can_get_echo_skb(up->netdev, echo_index);
+ can_get_echo_skb(up->netdev, echo_index, NULL);
} else {
up->netdev->stats.tx_dropped++;
can_free_echo_skb(up->netdev, echo_index);
@@ -1137,7 +1137,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
/* put the skb on can loopback stack */
spin_lock_irqsave(&up->echo_skb_lock, flags);
- can_put_echo_skb(skb, up->netdev, echo_index);
+ can_put_echo_skb(skb, up->netdev, echo_index, 0);
spin_unlock_irqrestore(&up->echo_skb_lock, flags);
/* transmit it */
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 44478304ff46..e8c42430a4fc 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -585,7 +585,7 @@ static void usb_8dev_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += context->dlc;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
can_led_event(netdev, CAN_LED_EVENT_TX);
@@ -664,7 +664,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &priv->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&priv->active_tx_urbs);
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index fa47bab510bb..f9a524c5f6d6 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *peer;
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
+ u8 len;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
@@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ len = cfd->len;
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
- srcstats->tx_bytes += cfd->len;
+ srcstats->tx_bytes += len;
peerstats = &peer->stats;
peerstats->rx_packets++;
- peerstats->rx_bytes += cfd->len;
+ peerstats->rx_bytes += len;
}
out_unlock:
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3f54edee92eb..37fa19c62d73 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -592,9 +592,9 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
(priv->devtype.flags & XCAN_FLAG_TXFEMP))
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
else
- can_put_echo_skb(skb, ndev, 0);
+ can_put_echo_skb(skb, ndev, 0, 0);
priv->tx_head++;
@@ -1292,7 +1292,7 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
while (frames_sent--) {
stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
- priv->tx_max);
+ priv->tx_max, NULL);
priv->tx_tail++;
stats->tx_packets++;
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index f6a0488589fc..3af373e90806 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -60,6 +60,8 @@ source "drivers/net/dsa/qca/Kconfig"
source "drivers/net/dsa/sja1105/Kconfig"
+source "drivers/net/dsa/xrs700x/Kconfig"
+
config NET_DSA_QCA8K
tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
depends on NET_DSA
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index a84adb140a04..f3598c040994 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -24,3 +24,4 @@ obj-y += mv88e6xxx/
obj-y += ocelot/
obj-y += qca/
obj-y += sja1105/
+obj-y += xrs700x/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 288b5a5c3e0d..23fc7225c8d1 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1374,26 +1374,22 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_phylink_mac_link_up);
-int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans)
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
struct b53_device *dev = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
return 0;
}
EXPORT_SYMBOL(b53_vlan_filtering);
-int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
- if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
+ if ((is5325(dev) || is5365(dev)) && vlan->vid == 0)
return -EOPNOTSUPP;
/* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
@@ -1404,47 +1400,50 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
return -EINVAL;
- if (vlan->vid_end > dev->num_vlans)
+ if (vlan->vid >= dev->num_vlans)
return -ERANGE;
b53_enable_vlan(dev, true, ds->vlan_filtering);
return 0;
}
-EXPORT_SYMBOL(b53_vlan_prepare);
-void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct b53_vlan *vl;
- u16 vid;
+ int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &dev->vlans[vid];
+ err = b53_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
- b53_get_vlan_entry(dev, vid, vl);
+ vl = &dev->vlans[vlan->vid];
- if (vid == 0 && vid == b53_default_pvid(dev))
- untagged = true;
+ b53_get_vlan_entry(dev, vlan->vid, vl);
- vl->members |= BIT(port);
- if (untagged && !dsa_is_cpu_port(ds, port))
- vl->untag |= BIT(port);
- else
- vl->untag &= ~BIT(port);
+ if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
+ untagged = true;
- b53_set_vlan_entry(dev, vid, vl);
- b53_fast_age_vlan(dev, vid);
- }
+ vl->members |= BIT(port);
+ if (untagged && !dsa_is_cpu_port(ds, port))
+ vl->untag |= BIT(port);
+ else
+ vl->untag &= ~BIT(port);
+
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
if (pvid && !dsa_is_cpu_port(ds, port)) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- vlan->vid_end);
- b53_fast_age_vlan(dev, vid);
+ vlan->vid);
+ b53_fast_age_vlan(dev, vlan->vid);
}
+
+ return 0;
}
EXPORT_SYMBOL(b53_vlan_add);
@@ -1454,27 +1453,24 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct b53_vlan *vl;
- u16 vid;
u16 pvid;
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &dev->vlans[vid];
+ vl = &dev->vlans[vlan->vid];
- b53_get_vlan_entry(dev, vid, vl);
+ b53_get_vlan_entry(dev, vlan->vid, vl);
- vl->members &= ~BIT(port);
+ vl->members &= ~BIT(port);
- if (pvid == vid)
- pvid = b53_default_pvid(dev);
+ if (pvid == vlan->vid)
+ pvid = b53_default_pvid(dev);
- if (untagged && !dsa_is_cpu_port(ds, port))
- vl->untag &= ~(BIT(port));
+ if (untagged && !dsa_is_cpu_port(ds, port))
+ vl->untag &= ~(BIT(port));
- b53_set_vlan_entry(dev, vid, vl);
- b53_fast_age_vlan(dev, vid);
- }
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
b53_fast_age_vlan(dev, pvid);
@@ -1751,8 +1747,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_fdb_dump);
-int b53_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct b53_device *priv = ds->priv;
@@ -1762,19 +1758,7 @@ int b53_mdb_prepare(struct dsa_switch *ds, int port,
if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP;
- return 0;
-}
-EXPORT_SYMBOL(b53_mdb_prepare);
-
-void b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- struct b53_device *priv = ds->priv;
- int ret;
-
- ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
- if (ret)
- dev_err(ds->dev, "failed to add MDB entry\n");
+ return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
}
EXPORT_SYMBOL(b53_mdb_add);
@@ -2207,7 +2191,6 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_fast_age = b53_br_fast_age,
.port_egress_floods = b53_br_egress_floods,
.port_vlan_filtering = b53_vlan_filtering,
- .port_vlan_prepare = b53_vlan_prepare,
.port_vlan_add = b53_vlan_add,
.port_vlan_del = b53_vlan_del,
.port_fdb_dump = b53_fdb_dump,
@@ -2215,7 +2198,6 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_fdb_del = b53_fdb_del,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
- .port_mdb_prepare = b53_mdb_prepare,
.port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del,
.port_max_mtu = b53_get_max_mtu,
@@ -2459,6 +2441,20 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
+ /* Starfighter 2 */
+ {
+ .chip_id = BCM4908_DEVICE_ID,
+ .dev_name = "BCM4908",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_bins = 4,
+ .arl_buckets = 256,
+ .cpu_port = 8, /* TODO: ports 4, 5, 8 */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
{
.chip_id = BCM7445_DEVICE_ID,
.dev_name = "BCM7445",
@@ -2606,9 +2602,8 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
- ds->configure_vlan_while_not_filtering = true;
ds->untag_bridge_pvid = true;
- dev->vlan_enabled = ds->configure_vlan_while_not_filtering;
+ dev->vlan_enabled = true;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 7c67409bb186..0d2cc0453bef 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -64,6 +64,7 @@ struct b53_io_ops {
#define B53_INVALID_LANE 0xff
enum {
+ BCM4908_DEVICE_ID = 0x4908,
BCM5325_DEVICE_ID = 0x25,
BCM5365_DEVICE_ID = 0x65,
BCM5389_DEVICE_ID = 0x89,
@@ -347,12 +348,9 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause);
-int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans);
-int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
-void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
+int b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_fdb_add(struct dsa_switch *ds, int port,
@@ -361,10 +359,8 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
-int b53_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-void b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+int b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int b53_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
int b53_mirror_add(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 1e9a0adda2d6..1857aa9aa84a 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -105,7 +105,8 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
b53_brcm_hdr_setup(ds, port);
if (port == 8) {
- if (priv->type == BCM7445_DEVICE_ID)
+ if (priv->type == BCM4908_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_IMP;
else
offset = CORE_STS_OVERRIDE_IMP2;
@@ -509,15 +510,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
/* Find our integrated MDIO bus node */
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
priv->master_mii_bus = of_mdio_find_bus(dn);
- if (!priv->master_mii_bus)
+ if (!priv->master_mii_bus) {
+ of_node_put(dn);
return -EPROBE_DEFER;
+ }
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
- if (!priv->slave_mii_bus)
+ if (!priv->slave_mii_bus) {
+ of_node_put(dn);
return -ENOMEM;
+ }
priv->slave_mii_bus->priv = priv;
priv->slave_mii_bus->name = "sf2 slave mii";
@@ -715,7 +720,8 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
u32 reg, offset;
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM7445_DEVICE_ID)
+ if (priv->type == BCM4908_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
else
offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
@@ -742,7 +748,8 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM7445_DEVICE_ID)
+ if (priv->type == BCM4908_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
else
offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
@@ -1113,7 +1120,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.port_stp_state_set = b53_br_set_stp_state,
.port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
- .port_vlan_prepare = b53_vlan_prepare,
.port_vlan_add = b53_vlan_add,
.port_vlan_del = b53_vlan_del,
.port_fdb_dump = b53_fdb_dump,
@@ -1123,7 +1129,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_rxnfc = bcm_sf2_set_rxnfc,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
- .port_mdb_prepare = b53_mdb_prepare,
.port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del,
};
@@ -1135,6 +1140,30 @@ struct bcm_sf2_of_data {
unsigned int num_cfp_rules;
};
+static const u16 bcm_sf2_4908_reg_offsets[] = {
+ [REG_SWITCH_CNTRL] = 0x00,
+ [REG_SWITCH_STATUS] = 0x04,
+ [REG_DIR_DATA_WRITE] = 0x08,
+ [REG_DIR_DATA_READ] = 0x0c,
+ [REG_SWITCH_REVISION] = 0x10,
+ [REG_PHY_REVISION] = 0x14,
+ [REG_SPHY_CNTRL] = 0x24,
+ [REG_CROSSBAR] = 0xc8,
+ [REG_RGMII_0_CNTRL] = 0xe0,
+ [REG_RGMII_1_CNTRL] = 0xec,
+ [REG_RGMII_2_CNTRL] = 0xf8,
+ [REG_LED_0_CNTRL] = 0x40,
+ [REG_LED_1_CNTRL] = 0x4c,
+ [REG_LED_2_CNTRL] = 0x58,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
+ .type = BCM4908_DEVICE_ID,
+ .core_reg_align = 0,
+ .reg_offsets = bcm_sf2_4908_reg_offsets,
+ .num_cfp_rules = 0, /* FIXME */
+};
+
/* Register offsets for the SWITCH_REG_* block */
static const u16 bcm_sf2_7445_reg_offsets[] = {
[REG_SWITCH_CNTRL] = 0x00,
@@ -1183,6 +1212,9 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
};
static const struct of_device_id bcm_sf2_of_match[] = {
+ { .compatible = "brcm,bcm4908-switch",
+ .data = &bcm_sf2_4908_data
+ },
{ .compatible = "brcm,bcm7445-switch-v4.0",
.data = &bcm_sf2_7445_data
},
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index d82cee5d9202..ed45d16250e1 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -885,18 +885,15 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
return -EINVAL;
vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
- vlan.vid_begin = vid;
- vlan.vid_end = vid;
+ vlan.vid = vid;
if (cpu_to_be32(fs->h_ext.data[1]) & 1)
vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
else
vlan.flags = 0;
- ret = ds->ops->port_vlan_prepare(ds, port_num, &vlan);
+ ret = ds->ops->port_vlan_add(ds, port_num, &vlan);
if (ret)
return ret;
-
- ds->ops->port_vlan_add(ds, port_num, &vlan);
}
/*
@@ -942,8 +939,7 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
return -EINVAL;
if ((fs->flow_type & FLOW_EXT) &&
- !(ds->ops->port_vlan_prepare || ds->ops->port_vlan_add ||
- ds->ops->port_vlan_del))
+ !(ds->ops->port_vlan_add || ds->ops->port_vlan_del))
return -EOPNOTSUPP;
if (fs->location != RX_CLS_LOC_ANY &&
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index d8a5e6269c0e..1d2d55c9f8aa 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -17,6 +17,7 @@ enum bcm_sf2_reg_offs {
REG_SWITCH_REVISION,
REG_PHY_REVISION,
REG_SPHY_CNTRL,
+ REG_CROSSBAR,
REG_RGMII_0_CNTRL,
REG_RGMII_1_CNTRL,
REG_RGMII_2_CNTRL,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index e38906ae8f23..8c283f59158b 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -190,8 +190,7 @@ static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
}
static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+ bool vlan_filtering)
{
dev_dbg(ds->dev, "%s: port: %d, vlan_filtering: %d\n",
__func__, port, vlan_filtering);
@@ -199,53 +198,36 @@ static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int
-dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct dsa_loop_priv *ps = ds->priv;
- struct mii_bus *bus = ps->bus;
-
- dev_dbg(ds->dev, "%s: port: %d, vlan: %d-%d",
- __func__, port, vlan->vid_begin, vlan->vid_end);
-
- /* Just do a sleeping operation to make lockdep checks effective */
- mdiobus_read(bus, ps->port_base + port, MII_BMSR);
-
- if (vlan->vid_end > ARRAY_SIZE(ps->vlans))
- return -ERANGE;
-
- return 0;
-}
-
-static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
struct dsa_loop_vlan *vl;
- u16 vid;
+
+ if (vlan->vid >= ARRAY_SIZE(ps->vlans))
+ return -ERANGE;
/* Just do a sleeping operation to make lockdep checks effective */
mdiobus_read(bus, ps->port_base + port, MII_BMSR);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &ps->vlans[vid];
+ vl = &ps->vlans[vlan->vid];
- vl->members |= BIT(port);
- if (untagged)
- vl->untagged |= BIT(port);
- else
- vl->untagged &= ~BIT(port);
+ vl->members |= BIT(port);
+ if (untagged)
+ vl->untagged |= BIT(port);
+ else
+ vl->untagged &= ~BIT(port);
- dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
- __func__, port, vid, untagged ? "un" : "", pvid);
- }
+ dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
+ __func__, port, vlan->vid, untagged ? "un" : "", pvid);
if (pvid)
- ps->ports[port].pvid = vid;
+ ps->ports[port].pvid = vlan->vid;
+
+ return 0;
}
static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
@@ -253,26 +235,24 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct dsa_loop_priv *ps = ds->priv;
+ u16 pvid = ps->ports[port].pvid;
struct mii_bus *bus = ps->bus;
struct dsa_loop_vlan *vl;
- u16 vid, pvid = ps->ports[port].pvid;
/* Just do a sleeping operation to make lockdep checks effective */
mdiobus_read(bus, ps->port_base + port, MII_BMSR);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &ps->vlans[vid];
+ vl = &ps->vlans[vlan->vid];
- vl->members &= ~BIT(port);
- if (untagged)
- vl->untagged &= ~BIT(port);
+ vl->members &= ~BIT(port);
+ if (untagged)
+ vl->untagged &= ~BIT(port);
- if (pvid == vid)
- pvid = 1;
+ if (pvid == vlan->vid)
+ pvid = 1;
- dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
- __func__, port, vid, untagged ? "un" : "", pvid);
- }
+ dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
+ __func__, port, vlan->vid, untagged ? "un" : "", pvid);
ps->ports[port].pvid = pvid;
return 0;
@@ -307,7 +287,6 @@ static const struct dsa_switch_ops dsa_loop_driver = {
.port_bridge_leave = dsa_loop_port_bridge_leave,
.port_stp_state_set = dsa_loop_port_stp_state_set,
.port_vlan_filtering = dsa_loop_port_vlan_filtering,
- .port_vlan_prepare = dsa_loop_port_vlan_prepare,
.port_vlan_add = dsa_loop_port_vlan_add,
.port_vlan_del = dsa_loop_port_vlan_del,
.port_change_mtu = dsa_loop_port_change_mtu,
@@ -344,7 +323,6 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
ds->dev = &mdiodev->dev;
ds->ops = &dsa_loop_driver;
ds->priv = ps;
- ds->configure_vlan_while_not_filtering = true;
ps->bus = mdiodev->bus;
dev_set_drvdata(&mdiodev->dev, ds);
diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig
index 222dd35e2c9d..e01191107a4b 100644
--- a/drivers/net/dsa/hirschmann/Kconfig
+++ b/drivers/net/dsa/hirschmann/Kconfig
@@ -4,6 +4,7 @@ config NET_DSA_HIRSCHMANN_HELLCREEK
depends on HAS_IOMEM
depends on NET_DSA
depends on PTP_1588_CLOCK
+ depends on LEDS_CLASS
select NET_DSA_TAG_HELLCREEK
help
This driver adds support for Hirschmann Hellcreek TSN switches.
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 6420b76ea37c..4cc51fb37e67 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -3,7 +3,7 @@
* DSA driver for:
* Hirschmann Hellcreek TSN switch.
*
- * Copyright (C) 2019,2020 Linutronix GmbH
+ * Copyright (C) 2019-2021 Linutronix GmbH
* Author Kurt Kanzenbach <kurt@linutronix.de>
*/
@@ -153,6 +153,13 @@ static void hellcreek_select_vlan(struct hellcreek *hellcreek, int vid,
hellcreek_write(hellcreek, val, HR_VIDCFG);
}
+static void hellcreek_select_tgd(struct hellcreek *hellcreek, int port)
+{
+ u16 val = port << TR_TGDSEL_TDGSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, TR_TGDSEL);
+}
+
static int hellcreek_wait_until_ready(struct hellcreek *hellcreek)
{
u16 val;
@@ -348,14 +355,12 @@ static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port,
*/
for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
const u16 restricted_vid = hellcreek_private_vid(i);
- u16 vid;
if (!dsa_is_user_port(ds, i))
continue;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (vid == restricted_vid)
- return -EBUSY;
+ if (vlan->vid == restricted_vid)
+ return -EBUSY;
}
return 0;
@@ -440,34 +445,35 @@ static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port,
mutex_unlock(&hellcreek->reg_lock);
}
-static void hellcreek_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int hellcreek_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct hellcreek *hellcreek = ds->priv;
- u16 vid;
+ int err;
+
+ err = hellcreek_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
- dev_dbg(hellcreek->dev, "Add VLANs (%d -- %d) on port %d, %s, %s\n",
- vlan->vid_begin, vlan->vid_end, port,
- untagged ? "untagged" : "tagged",
+ dev_dbg(hellcreek->dev, "Add VLAN %d on port %d, %s, %s\n",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? "PVID" : "no PVID");
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- hellcreek_apply_vlan(hellcreek, port, vid, pvid, untagged);
+ hellcreek_apply_vlan(hellcreek, port, vlan->vid, pvid, untagged);
+
+ return 0;
}
static int hellcreek_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct hellcreek *hellcreek = ds->priv;
- u16 vid;
- dev_dbg(hellcreek->dev, "Remove VLANs (%d -- %d) on port %d\n",
- vlan->vid_begin, vlan->vid_end, port);
+ dev_dbg(hellcreek->dev, "Remove VLAN %d on port %d\n", vlan->vid, port);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- hellcreek_unapply_vlan(hellcreek, port, vid);
+ hellcreek_unapply_vlan(hellcreek, port, vlan->vid);
return 0;
}
@@ -866,14 +872,10 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
}
static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+ bool vlan_filtering)
{
struct hellcreek *hellcreek = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
dev_dbg(hellcreek->dev, "%s VLAN filtering on port %d\n",
vlan_filtering ? "Enable" : "Disable", port);
@@ -1038,11 +1040,6 @@ static int hellcreek_setup(struct dsa_switch *ds)
/* Configure PCP <-> TC mapping */
hellcreek_setup_tc_identity_mapping(hellcreek);
- /* Allow VLAN configurations while not filtering which is the default
- * for new DSA drivers.
- */
- ds->configure_vlan_while_not_filtering = true;
-
/* The VLAN awareness is a global switch setting. Therefore, mixed vlan
* filtering setups are not supported.
*/
@@ -1135,6 +1132,296 @@ out:
return ret;
}
+static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ const struct tc_taprio_sched_entry *cur, *initial, *next;
+ size_t i;
+
+ cur = initial = &schedule->entries[0];
+ next = cur + 1;
+
+ for (i = 1; i <= schedule->num_entries; ++i) {
+ u16 data;
+ u8 gates;
+
+ cur++;
+ next++;
+
+ if (i == schedule->num_entries)
+ gates = initial->gate_mask ^
+ cur->gate_mask;
+ else
+ gates = next->gate_mask ^
+ cur->gate_mask;
+
+ data = gates;
+
+ if (i == schedule->num_entries)
+ data |= TR_GCLDAT_GCLWRLAST;
+
+ /* Gates states */
+ hellcreek_write(hellcreek, data, TR_GCLDAT);
+
+ /* Time interval */
+ hellcreek_write(hellcreek,
+ cur->interval & 0x0000ffff,
+ TR_GCLTIL);
+ hellcreek_write(hellcreek,
+ (cur->interval & 0xffff0000) >> 16,
+ TR_GCLTIH);
+
+ /* Commit entry */
+ data = ((i - 1) << TR_GCLCMD_GCLWRADR_SHIFT) |
+ (initial->gate_mask <<
+ TR_GCLCMD_INIT_GATE_STATES_SHIFT);
+ hellcreek_write(hellcreek, data, TR_GCLCMD);
+ }
+}
+
+static void hellcreek_set_cycle_time(struct hellcreek *hellcreek,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ u32 cycle_time = schedule->cycle_time;
+
+ hellcreek_write(hellcreek, cycle_time & 0x0000ffff, TR_CTWRL);
+ hellcreek_write(hellcreek, (cycle_time & 0xffff0000) >> 16, TR_CTWRH);
+}
+
+static void hellcreek_switch_schedule(struct hellcreek *hellcreek,
+ ktime_t start_time)
+{
+ struct timespec64 ts = ktime_to_timespec64(start_time);
+
+ /* Start schedule at this point of time */
+ hellcreek_write(hellcreek, ts.tv_nsec & 0x0000ffff, TR_ESTWRL);
+ hellcreek_write(hellcreek, (ts.tv_nsec & 0xffff0000) >> 16, TR_ESTWRH);
+
+ /* Arm timer, set seconds and switch schedule */
+ hellcreek_write(hellcreek, TR_ESTCMD_ESTARM | TR_ESTCMD_ESTSWCFG |
+ ((ts.tv_sec & TR_ESTCMD_ESTSEC_MASK) <<
+ TR_ESTCMD_ESTSEC_SHIFT), TR_ESTCMD);
+}
+
+static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port)
+{
+ struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
+ s64 base_time_ns, current_ns;
+
+ /* The switch allows a schedule to be started only eight seconds within
+ * the future. Therefore, check the current PTP time if the schedule is
+ * startable or not.
+ */
+
+ /* Use the "cached" time. That should be alright, as it's updated quite
+ * frequently in the PTP code.
+ */
+ mutex_lock(&hellcreek->ptp_lock);
+ current_ns = hellcreek->seconds * NSEC_PER_SEC + hellcreek->last_ts;
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Calculate difference to admin base time */
+ base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
+
+ return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC;
+}
+
+static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port)
+{
+ struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
+ ktime_t base_time, current_time;
+ s64 current_ns;
+ u32 cycle_time;
+
+ /* First select port */
+ hellcreek_select_tgd(hellcreek, port);
+
+ /* Forward base time into the future if needed */
+ mutex_lock(&hellcreek->ptp_lock);
+ current_ns = hellcreek->seconds * NSEC_PER_SEC + hellcreek->last_ts;
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ current_time = ns_to_ktime(current_ns);
+ base_time = hellcreek_port->current_schedule->base_time;
+ cycle_time = hellcreek_port->current_schedule->cycle_time;
+
+ if (ktime_compare(current_time, base_time) > 0) {
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(current_time, base_time),
+ cycle_time);
+ base_time = ktime_add_ns(base_time, (n + 1) * cycle_time);
+ }
+
+ /* Set admin base time and switch schedule */
+ hellcreek_switch_schedule(hellcreek, base_time);
+
+ taprio_offload_free(hellcreek_port->current_schedule);
+ hellcreek_port->current_schedule = NULL;
+
+ dev_dbg(hellcreek->dev, "Armed EST timer for port %d\n",
+ hellcreek_port->port);
+}
+
+static void hellcreek_check_schedule(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct hellcreek_port *hellcreek_port;
+ struct hellcreek *hellcreek;
+ bool startable;
+
+ hellcreek_port = dw_to_hellcreek_port(dw);
+ hellcreek = hellcreek_port->hellcreek;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ /* Check starting time */
+ startable = hellcreek_schedule_startable(hellcreek,
+ hellcreek_port->port);
+ if (startable) {
+ hellcreek_start_schedule(hellcreek, hellcreek_port->port);
+ mutex_unlock(&hellcreek->reg_lock);
+ return;
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ /* Reschedule */
+ schedule_delayed_work(&hellcreek_port->schedule_work,
+ HELLCREEK_SCHEDULE_PERIOD);
+}
+
+static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ bool startable;
+ u16 ctrl;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Configure traffic schedule on port %d\n",
+ port);
+
+ /* First cancel delayed work */
+ cancel_delayed_work_sync(&hellcreek_port->schedule_work);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ if (hellcreek_port->current_schedule) {
+ taprio_offload_free(hellcreek_port->current_schedule);
+ hellcreek_port->current_schedule = NULL;
+ }
+ hellcreek_port->current_schedule = taprio_offload_get(taprio);
+
+ /* Then select port */
+ hellcreek_select_tgd(hellcreek, port);
+
+ /* Enable gating and keep defaults */
+ ctrl = (0xff << TR_TGDCTRL_ADMINGATESTATES_SHIFT) | TR_TGDCTRL_GATE_EN;
+ hellcreek_write(hellcreek, ctrl, TR_TGDCTRL);
+
+ /* Cancel pending schedule */
+ hellcreek_write(hellcreek, 0x00, TR_ESTCMD);
+
+ /* Setup a new schedule */
+ hellcreek_setup_gcl(hellcreek, port, hellcreek_port->current_schedule);
+
+ /* Configure cycle time */
+ hellcreek_set_cycle_time(hellcreek, hellcreek_port->current_schedule);
+
+ /* Check starting time */
+ startable = hellcreek_schedule_startable(hellcreek, port);
+ if (startable) {
+ hellcreek_start_schedule(hellcreek, port);
+ mutex_unlock(&hellcreek->reg_lock);
+ return 0;
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ /* Schedule periodic schedule check */
+ schedule_delayed_work(&hellcreek_port->schedule_work,
+ HELLCREEK_SCHEDULE_PERIOD);
+
+ return 0;
+}
+
+static int hellcreek_port_del_schedule(struct dsa_switch *ds, int port)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Remove traffic schedule on port %d\n", port);
+
+ /* First cancel delayed work */
+ cancel_delayed_work_sync(&hellcreek_port->schedule_work);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ if (hellcreek_port->current_schedule) {
+ taprio_offload_free(hellcreek_port->current_schedule);
+ hellcreek_port->current_schedule = NULL;
+ }
+
+ /* Then select port */
+ hellcreek_select_tgd(hellcreek, port);
+
+ /* Disable gating and return to regular switching flow */
+ hellcreek_write(hellcreek, 0xff << TR_TGDCTRL_ADMINGATESTATES_SHIFT,
+ TR_TGDCTRL);
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return 0;
+}
+
+static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
+ struct tc_taprio_qopt_offload *schedule)
+{
+ size_t i;
+
+ /* Does this hellcreek version support Qbv in hardware? */
+ if (!hellcreek->pdata->qbv_support)
+ return false;
+
+ /* cycle time can only be 32bit */
+ if (schedule->cycle_time > (u32)-1)
+ return false;
+
+ /* cycle time extension is not supported */
+ if (schedule->cycle_time_extension)
+ return false;
+
+ /* Only set command is supported */
+ for (i = 0; i < schedule->num_entries; ++i)
+ if (schedule->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return false;
+
+ return true;
+}
+
+static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data)
+{
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct hellcreek *hellcreek = ds->priv;
+
+ if (type != TC_SETUP_QDISC_TAPRIO)
+ return -EOPNOTSUPP;
+
+ if (!hellcreek_validate_schedule(hellcreek, taprio))
+ return -EOPNOTSUPP;
+
+ if (taprio->enable)
+ return hellcreek_port_set_schedule(ds, port, taprio);
+
+ return hellcreek_port_del_schedule(ds, port);
+}
+
static const struct dsa_switch_ops hellcreek_ds_ops = {
.get_ethtool_stats = hellcreek_get_ethtool_stats,
.get_sset_count = hellcreek_get_sset_count,
@@ -1153,12 +1440,12 @@ static const struct dsa_switch_ops hellcreek_ds_ops = {
.port_hwtstamp_get = hellcreek_port_hwtstamp_get,
.port_prechangeupper = hellcreek_port_prechangeupper,
.port_rxtstamp = hellcreek_port_rxtstamp,
+ .port_setup_tc = hellcreek_port_setup_tc,
.port_stp_state_set = hellcreek_port_stp_state_set,
.port_txtstamp = hellcreek_port_txtstamp,
.port_vlan_add = hellcreek_vlan_add,
.port_vlan_del = hellcreek_vlan_del,
.port_vlan_filtering = hellcreek_vlan_filtering,
- .port_vlan_prepare = hellcreek_vlan_prepare,
.setup = hellcreek_setup,
};
@@ -1208,6 +1495,9 @@ static int hellcreek_probe(struct platform_device *pdev)
port->hellcreek = hellcreek;
port->port = i;
+
+ INIT_DELAYED_WORK(&port->schedule_work,
+ hellcreek_check_schedule);
}
mutex_init(&hellcreek->reg_lock);
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index e81781ebc31c..854639f87247 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -3,7 +3,7 @@
* DSA driver for:
* Hirschmann Hellcreek TSN switch.
*
- * Copyright (C) 2019,2020 Linutronix GmbH
+ * Copyright (C) 2019-2021 Linutronix GmbH
* Author Kurt Kanzenbach <kurt@linutronix.de>
*/
@@ -21,6 +21,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <net/dsa.h>
+#include <net/pkt_sched.h>
/* Ports:
* - 0: CPU
@@ -246,6 +247,10 @@ struct hellcreek_port {
/* Per-port timestamping resources */
struct hellcreek_port_hwtstamp port_hwtstamp;
+
+ /* Per-port Qbv schedule information */
+ struct tc_taprio_qopt_offload *current_schedule;
+ struct delayed_work schedule_work;
};
struct hellcreek_fdb_entry {
@@ -283,4 +288,14 @@ struct hellcreek {
size_t fdb_entries;
};
+/* A Qbv schedule can only started up to 8 seconds in the future. If the delta
+ * between the base time and the current ptp time is larger than 8 seconds, then
+ * use periodic work to check for the schedule to be started. The delayed work
+ * cannot be armed directly to $base_time - 8 + X, because for large deltas the
+ * PTP frequency matters.
+ */
+#define HELLCREEK_SCHEDULE_PERIOD (2 * HZ)
+#define dw_to_hellcreek_port(dw) \
+ container_of(dw, struct hellcreek_port, schedule_work)
+
#endif /* _HELLCREEK_H_ */
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index aa1142d6a9f5..344374025426 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1232,14 +1232,19 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
return 0;
}
-static void lan9303_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct lan9303 *chip = ds->priv;
+ int err;
+
+ err = lan9303_port_mdb_prepare(ds, port, mdb);
+ if (err)
+ return err;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
mdb->vid);
- lan9303_alr_add_port(chip, mdb->addr, port, false);
+ return lan9303_alr_add_port(chip, mdb->addr, port, false);
}
static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
@@ -1274,7 +1279,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
.port_fdb_add = lan9303_port_fdb_add,
.port_fdb_del = lan9303_port_fdb_del,
.port_fdb_dump = lan9303_port_fdb_dump,
- .port_mdb_prepare = lan9303_port_mdb_prepare,
.port_mdb_add = lan9303_port_mdb_add,
.port_mdb_del = lan9303_port_mdb_del,
};
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 09701c17f3f6..9fec97773a15 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -92,9 +92,7 @@
GSWIP_MDIO_PHY_FDUP_MASK)
/* GSWIP MII Registers */
-#define GSWIP_MII_CFG0 0x00
-#define GSWIP_MII_CFG1 0x02
-#define GSWIP_MII_CFG5 0x04
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
#define GSWIP_MII_CFG_EN BIT(14)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
@@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
- switch (port) {
- case 0:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
- break;
- case 1:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
- break;
- case 5:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
- break;
- }
+ /* There's no MII_CFG register for the CPU port */
+ if (!dsa_is_cpu_port(priv->ds, port))
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
@@ -737,23 +727,14 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv)
}
static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+ bool vlan_filtering)
{
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
struct gswip_priv *priv = ds->priv;
/* Do not allow changing the VLAN filtering options while in bridge */
- if (switchdev_trans_ph_prepare(trans)) {
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
-
- if (!bridge)
- return 0;
-
- if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering)
- return -EIO;
-
- return 0;
- }
+ if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering)
+ return -EIO;
if (vlan_filtering) {
/* Use port based VLAN tag */
@@ -791,15 +772,8 @@ static int gswip_setup(struct dsa_switch *ds)
/* disable port fetch/store dma on all ports */
for (i = 0; i < priv->hw_info->max_ports; i++) {
- struct switchdev_trans trans;
-
- /* Skip the prepare phase, this shouldn't return an error
- * during setup.
- */
- trans.ph_prepare = false;
-
gswip_port_disable(ds, i);
- gswip_port_vlan_filtering(ds, i, false, &trans);
+ gswip_port_vlan_filtering(ds, i, false);
}
/* enable Switch */
@@ -822,9 +796,8 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
/* Disable the xMII link */
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -854,6 +827,9 @@ static int gswip_setup(struct dsa_switch *ds)
}
gswip_port_enable(ds, cpu_port, NULL);
+
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1157,56 +1133,55 @@ static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
unsigned int max_ports = priv->hw_info->max_ports;
- u16 vid;
- int i;
int pos = max_ports;
+ int i, idx = -1;
/* We only support VLAN filtering on bridges */
if (!dsa_is_cpu_port(ds, port) && !bridge)
return -EOPNOTSUPP;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- int idx = -1;
+ /* Check if there is already a page for this VLAN */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vlan->vid) {
+ idx = i;
+ break;
+ }
+ }
- /* Check if there is already a page for this VLAN */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge &&
- priv->vlans[i].vid == vid) {
- idx = i;
+ /* If this VLAN is not programmed yet, we have to reserve
+ * one entry in the VLAN table. Make sure we start at the
+ * next position round.
+ */
+ if (idx == -1) {
+ /* Look for a free slot */
+ for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
+ if (!priv->vlans[pos].bridge) {
+ idx = pos;
+ pos++;
break;
}
}
- /* If this VLAN is not programmed yet, we have to reserve
- * one entry in the VLAN table. Make sure we start at the
- * next position round.
- */
- if (idx == -1) {
- /* Look for a free slot */
- for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
- if (!priv->vlans[pos].bridge) {
- idx = pos;
- pos++;
- break;
- }
- }
-
- if (idx == -1)
- return -ENOSPC;
- }
+ if (idx == -1)
+ return -ENOSPC;
}
return 0;
}
-static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- u16 vid;
+ int err;
+
+ err = gswip_port_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here. This is also called with bridge
@@ -1214,10 +1189,10 @@ static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
* this.
*/
if (dsa_is_cpu_port(ds, port))
- return;
+ return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- gswip_vlan_add_aware(priv, bridge, port, vid, untagged, pvid);
+ return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
+ untagged, pvid);
}
static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
@@ -1226,8 +1201,6 @@ static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- u16 vid;
- int err;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here. This is also called with bridge
@@ -1237,13 +1210,7 @@ static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
if (dsa_is_cpu_port(ds, port))
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = gswip_vlan_remove(priv, bridge, port, vid, pvid, true);
- if (err)
- return err;
- }
-
- return 0;
+ return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
}
static void gswip_port_fast_age(struct dsa_switch *ds, int port)
@@ -1447,11 +1414,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
+ /* With the exclusion of MII, Reverse MII and Reduced MII, we
+ * support Gigabit, including Half duplex
*/
if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_RMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseT_Half);
}
@@ -1541,9 +1509,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
- /* Enable the xMII interface only for the external PHY */
- if (interface != PHY_INTERFACE_MODE_INTERNAL)
- gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+ gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
@@ -1623,7 +1589,6 @@ static const struct dsa_switch_ops gswip_switch_ops = {
.port_bridge_leave = gswip_port_bridge_leave,
.port_fast_age = gswip_port_fast_age,
.port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_prepare = gswip_port_vlan_prepare,
.port_vlan_add = gswip_port_vlan_add,
.port_vlan_del = gswip_port_vlan_del,
.port_stp_state_set = gswip_port_stp_state_set,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index c973db101b72..c87d445b30fd 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -783,55 +783,53 @@ static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port)
}
static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag,
- struct switchdev_trans *trans)
+ bool flag)
{
struct ksz_device *dev = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
return 0;
}
-static void ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_device *dev = ds->priv;
- u16 data, vid, new_pvid = 0;
+ u16 data, new_pvid = 0;
u8 fid, member, valid;
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ksz8795_r_vlan_table(dev, vid, &data);
- ksz8795_from_vlan(data, &fid, &member, &valid);
+ ksz8795_r_vlan_table(dev, vlan->vid, &data);
+ ksz8795_from_vlan(data, &fid, &member, &valid);
- /* First time to setup the VLAN entry. */
- if (!valid) {
- /* Need to find a way to map VID to FID. */
- fid = 1;
- valid = 1;
- }
- member |= BIT(port);
+ /* First time to setup the VLAN entry. */
+ if (!valid) {
+ /* Need to find a way to map VID to FID. */
+ fid = 1;
+ valid = 1;
+ }
+ member |= BIT(port);
- ksz8795_to_vlan(fid, member, valid, &data);
- ksz8795_w_vlan_table(dev, vid, data);
+ ksz8795_to_vlan(fid, member, valid, &data);
+ ksz8795_w_vlan_table(dev, vlan->vid, data);
- /* change PVID */
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
- new_pvid = vid;
- }
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ new_pvid = vlan->vid;
if (new_pvid) {
+ u16 vid;
+
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
vid &= 0xfff;
vid |= new_pvid;
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
}
+
+ return 0;
}
static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
@@ -839,7 +837,7 @@ static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_device *dev = ds->priv;
- u16 data, vid, pvid, new_pvid = 0;
+ u16 data, pvid, new_pvid = 0;
u8 fid, member, valid;
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
@@ -847,24 +845,22 @@ static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ksz8795_r_vlan_table(dev, vid, &data);
- ksz8795_from_vlan(data, &fid, &member, &valid);
+ ksz8795_r_vlan_table(dev, vlan->vid, &data);
+ ksz8795_from_vlan(data, &fid, &member, &valid);
- member &= ~BIT(port);
+ member &= ~BIT(port);
- /* Invalidate the entry if no more member. */
- if (!member) {
- fid = 0;
- valid = 0;
- }
+ /* Invalidate the entry if no more member. */
+ if (!member) {
+ fid = 0;
+ valid = 0;
+ }
- if (pvid == vid)
- new_pvid = 1;
+ if (pvid == vlan->vid)
+ new_pvid = 1;
- ksz8795_to_vlan(fid, member, valid, &data);
- ksz8795_w_vlan_table(dev, vid, data);
- }
+ ksz8795_to_vlan(fid, member, valid, &data);
+ ksz8795_w_vlan_table(dev, vlan->vid, data);
if (new_pvid != pvid)
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
@@ -1098,6 +1094,8 @@ static int ksz8795_setup(struct dsa_switch *ds)
ksz_init_mib_timer(dev);
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1116,11 +1114,9 @@ static const struct dsa_switch_ops ksz8795_switch_ops = {
.port_stp_state_set = ksz8795_port_stp_state_set,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz8795_port_vlan_filtering,
- .port_vlan_prepare = ksz_port_vlan_prepare,
.port_vlan_add = ksz8795_port_vlan_add,
.port_vlan_del = ksz8795_port_vlan_del,
.port_fdb_dump = ksz_port_fdb_dump,
- .port_mdb_prepare = ksz_port_mdb_prepare,
.port_mdb_add = ksz_port_mdb_add,
.port_mdb_del = ksz_port_mdb_del,
.port_mirror_add = ksz8795_port_mirror_add,
@@ -1187,6 +1183,20 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = {
.port_cnt = 5, /* total cpu and user ports */
},
{
+ /*
+ * WARNING
+ * =======
+ * KSZ8794 is similar to KSZ8795, except the port map
+ * contains a gap between external and CPU ports, the
+ * port map is NOT continuous. The per-port register
+ * map is shifted accordingly too, i.e. registers at
+ * offset 0x40 are NOT used on KSZ8794 and they ARE
+ * used on KSZ8795 for external port 3.
+ * external cpu
+ * KSZ8794 0,1,2 4
+ * KSZ8795 0,1,2,3 4
+ * KSZ8765 0,1,2,3 4
+ */
.chip_id = 0x8794,
.dev_name = "KSZ8794",
.num_vlans = 4096,
@@ -1220,9 +1230,13 @@ static int ksz8795_switch_init(struct ksz_device *dev)
dev->num_vlans = chip->num_vlans;
dev->num_alus = chip->num_alus;
dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
+ dev->port_cnt = fls(chip->cpu_ports);
+ dev->cpu_port = fls(chip->cpu_ports) - 1;
+ dev->phy_port_cnt = dev->port_cnt - 1;
dev->cpu_ports = chip->cpu_ports;
-
+ dev->host_mask = chip->cpu_ports;
+ dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
+ chip->cpu_ports;
break;
}
}
@@ -1231,17 +1245,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
if (!dev->cpu_ports)
return -ENODEV;
- dev->port_mask = BIT(dev->port_cnt) - 1;
- dev->port_mask |= dev->host_mask;
-
dev->reg_mib_cnt = KSZ8795_COUNTER_NUM;
dev->mib_cnt = ARRAY_SIZE(mib_names);
- dev->phy_port_cnt = dev->port_cnt - 1;
-
- dev->cpu_port = dev->port_cnt - 1;
- dev->host_mask = BIT(dev->cpu_port);
-
dev->ports = devm_kzalloc(dev->dev,
dev->port_cnt * sizeof(struct ksz_port),
GFP_KERNEL);
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 42e647b67abd..00e38c8e0d01 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -493,14 +493,10 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
}
static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag,
- struct switchdev_trans *trans)
+ bool flag)
{
struct ksz_device *dev = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
@@ -514,38 +510,40 @@ static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ksz_device *dev = ds->priv;
u32 vlan_table[3];
- u16 vid;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to get vlan table\n");
- return;
- }
-
- vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M);
- if (untagged)
- vlan_table[1] |= BIT(port);
- else
- vlan_table[1] &= ~BIT(port);
- vlan_table[1] &= ~(BIT(dev->cpu_port));
+ err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
+ if (err) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return err;
+ }
- vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
+ vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
+ if (untagged)
+ vlan_table[1] |= BIT(port);
+ else
+ vlan_table[1] &= ~BIT(port);
+ vlan_table[1] &= ~(BIT(dev->cpu_port));
- if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to set vlan table\n");
- return;
- }
+ vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
- /* change PVID */
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
- ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid);
+ err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
+ if (err) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return err;
}
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
+
+ return 0;
}
static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
@@ -554,30 +552,27 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
u32 vlan_table[3];
- u16 vid;
u16 pvid;
ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
pvid = pvid & 0xFFF;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to get vlan table\n");
- return -ETIMEDOUT;
- }
+ if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return -ETIMEDOUT;
+ }
- vlan_table[2] &= ~BIT(port);
+ vlan_table[2] &= ~BIT(port);
- if (pvid == vid)
- pvid = 1;
+ if (pvid == vlan->vid)
+ pvid = 1;
- if (untagged)
- vlan_table[1] &= ~BIT(port);
+ if (untagged)
+ vlan_table[1] &= ~BIT(port);
- if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to set vlan table\n");
- return -ETIMEDOUT;
- }
+ if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return -ETIMEDOUT;
}
ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
@@ -784,14 +779,15 @@ exit:
return ret;
}
-static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
u32 static_table[4];
u32 data;
int index;
u32 mac_hi, mac_lo;
+ int err = 0;
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
@@ -806,7 +802,8 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
- if (ksz9477_wait_alu_sta_ready(dev)) {
+ err = ksz9477_wait_alu_sta_ready(dev);
+ if (err) {
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
goto exit;
}
@@ -829,8 +826,10 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics)
+ if (index == dev->num_statics) {
+ err = -ENOSPC;
goto exit;
+ }
/* add entry */
static_table[0] = ALU_V_STATIC_VALID;
@@ -852,6 +851,7 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
exit:
mutex_unlock(&dev->alu_mutex);
+ return err;
}
static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
@@ -1381,6 +1381,8 @@ static int ksz9477_setup(struct dsa_switch *ds)
ksz_init_mib_timer(dev);
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1399,13 +1401,11 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
.port_stp_state_set = ksz9477_port_stp_state_set,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz9477_port_vlan_filtering,
- .port_vlan_prepare = ksz_port_vlan_prepare,
.port_vlan_add = ksz9477_port_vlan_add,
.port_vlan_del = ksz9477_port_vlan_del,
.port_fdb_dump = ksz9477_port_fdb_dump,
.port_fdb_add = ksz9477_port_fdb_add,
.port_fdb_del = ksz9477_port_fdb_del,
- .port_mdb_prepare = ksz_port_mdb_prepare,
.port_mdb_add = ksz9477_port_mdb_add,
.port_mdb_del = ksz9477_port_mdb_del,
.port_mirror_add = ksz9477_port_mirror_add,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index cf743133b0b9..a7e5ac60baef 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -213,15 +213,6 @@ void ksz_port_fast_age(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL_GPL(ksz_port_fast_age);
-int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- /* nothing needed */
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
-
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data)
{
@@ -253,16 +244,8 @@ int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
}
EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
-int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- /* nothing to do */
- return 0;
-}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
-
-void ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
struct alu_struct alu;
@@ -284,7 +267,7 @@ void ksz_port_mdb_add(struct dsa_switch *ds, int port,
/* no available entry */
if (index == dev->num_statics && !empty)
- return;
+ return -ENOSPC;
/* add entry */
if (index == dev->num_statics) {
@@ -301,6 +284,8 @@ void ksz_port_mdb_add(struct dsa_switch *ds, int port,
alu.fid = mdb->vid;
}
dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
@@ -400,7 +385,7 @@ int ksz_switch_register(struct ksz_device *dev,
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
- usleep_range(100, 1000);
+ msleep(100);
}
mutex_init(&dev->dev_mutex);
@@ -434,7 +419,7 @@ int ksz_switch_register(struct ksz_device *dev,
if (of_property_read_u32(port, "reg",
&port_num))
continue;
- if (port_num >= dev->port_cnt)
+ if (!(dev->port_mask & BIT(port_num)))
return -EINVAL;
of_get_phy_mode(port,
&dev->ports[port_num].interface);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 720f22275c84..f212775372ce 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -161,14 +161,10 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *br);
void ksz_port_fast_age(struct dsa_switch *ds, int port);
-int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data);
-int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-void ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int ksz_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index a67cac15a724..eb13ba79dd01 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -18,6 +18,7 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <net/dsa.h>
#include "mt7530.h"
@@ -1376,12 +1377,8 @@ mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+ bool vlan_filtering)
{
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1397,15 +1394,6 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int
-mt7530_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- /* nothing needed */
-
- return 0;
-}
-
static void
mt7530_hw_vlan_add(struct mt7530_priv *priv,
struct mt7530_hw_vlan_entry *entry)
@@ -1493,7 +1481,7 @@ mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
}
-static void
+static int
mt7530_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
@@ -1501,23 +1489,21 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct mt7530_hw_vlan_entry new_entry;
struct mt7530_priv *priv = ds->priv;
- u16 vid;
mutex_lock(&priv->reg_mutex);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
- mt7530_hw_vlan_update(priv, vid, &new_entry,
- mt7530_hw_vlan_add);
- }
+ mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
+ mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add);
if (pvid) {
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
- G0_PORT_VID(vlan->vid_end));
- priv->ports[port].pvid = vlan->vid_end;
+ G0_PORT_VID(vlan->vid));
+ priv->ports[port].pvid = vlan->vid;
}
mutex_unlock(&priv->reg_mutex);
+
+ return 0;
}
static int
@@ -1526,22 +1512,20 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
{
struct mt7530_hw_vlan_entry target_entry;
struct mt7530_priv *priv = ds->priv;
- u16 vid, pvid;
+ u16 pvid;
mutex_lock(&priv->reg_mutex);
pvid = priv->ports[port].pvid;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- mt7530_hw_vlan_entry_init(&target_entry, port, 0);
- mt7530_hw_vlan_update(priv, vid, &target_entry,
- mt7530_hw_vlan_del);
+ mt7530_hw_vlan_entry_init(&target_entry, port, 0);
+ mt7530_hw_vlan_update(priv, vlan->vid, &target_entry,
+ mt7530_hw_vlan_del);
- /* PVID is being restored to the default whenever the PVID port
- * is being removed from the VLAN.
- */
- if (pvid == vid)
- pvid = G0_PORT_VID_DEF;
- }
+ /* PVID is being restored to the default whenever the PVID port
+ * is being removed from the VLAN.
+ */
+ if (pvid == vlan->vid)
+ pvid = G0_PORT_VID_DEF;
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid);
priv->ports[port].pvid = pvid;
@@ -1639,6 +1623,109 @@ mtk_get_tag_protocol(struct dsa_switch *ds, int port,
}
}
+static inline u32
+mt7530_gpio_to_bit(unsigned int offset)
+{
+ /* Map GPIO offset to register bit
+ * [ 2: 0] port 0 LED 0..2 as GPIO 0..2
+ * [ 6: 4] port 1 LED 0..2 as GPIO 3..5
+ * [10: 8] port 2 LED 0..2 as GPIO 6..8
+ * [14:12] port 3 LED 0..2 as GPIO 9..11
+ * [18:16] port 4 LED 0..2 as GPIO 12..14
+ */
+ return BIT(offset + offset / 3);
+}
+
+static int
+mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
+}
+
+static void
+mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ if (value)
+ mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
+ else
+ mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+}
+
+static int
+mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ?
+ GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int
+mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ mt7530_clear(priv, MT7530_LED_GPIO_OE, bit);
+ mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit);
+
+ return 0;
+}
+
+static int
+mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ mt7530_set(priv, MT7530_LED_GPIO_DIR, bit);
+
+ if (value)
+ mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
+ else
+ mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+
+ mt7530_set(priv, MT7530_LED_GPIO_OE, bit);
+
+ return 0;
+}
+
+static int
+mt7530_setup_gpio(struct mt7530_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct gpio_chip *gc;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ mt7530_write(priv, MT7530_LED_GPIO_OE, 0);
+ mt7530_write(priv, MT7530_LED_GPIO_DIR, 0);
+ mt7530_write(priv, MT7530_LED_IO_MODE, 0);
+
+ gc->label = "mt7530";
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->get_direction = mt7530_gpio_get_direction;
+ gc->direction_input = mt7530_gpio_direction_input;
+ gc->direction_output = mt7530_gpio_direction_output;
+ gc->get = mt7530_gpio_get;
+ gc->set = mt7530_gpio_set;
+ gc->base = -1;
+ gc->ngpio = 15;
+ gc->can_sleep = true;
+
+ return devm_gpiochip_add_data(dev, gc, priv);
+}
+
static int
mt7530_setup(struct dsa_switch *ds)
{
@@ -1656,7 +1743,6 @@ mt7530_setup(struct dsa_switch *ds)
* as two netdev instances.
*/
dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
- ds->configure_vlan_while_not_filtering = true;
ds->mtu_enforcement_ingress = true;
if (priv->id == ID_MT7530) {
@@ -1781,6 +1867,12 @@ mt7530_setup(struct dsa_switch *ds)
}
}
+ if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) {
+ ret = mt7530_setup_gpio(priv);
+ if (ret)
+ return ret;
+ }
+
mt7530_setup_port5(ds, interface);
/* Flush the FDB table */
@@ -1895,7 +1987,6 @@ mt7531_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
- ds->configure_vlan_while_not_filtering = true;
ds->mtu_enforcement_ingress = true;
/* Flush the FDB table */
@@ -2618,7 +2709,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_fdb_del = mt7530_port_fdb_del,
.port_fdb_dump = mt7530_port_fdb_dump,
.port_vlan_filtering = mt7530_port_vlan_filtering,
- .port_vlan_prepare = mt7530_port_vlan_prepare,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
.port_mirror_add = mt753x_port_mirror_add,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 32d8969b3ace..64a9bb377e15 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -554,6 +554,26 @@ enum mt7531_clk_skew {
#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
#define MT7531_EXT_P_MDIO_12 (2 << 16)
+/* Registers for LED GPIO control (MT7530 only)
+ * All registers follow this pattern:
+ * [ 2: 0] port 0
+ * [ 6: 4] port 1
+ * [10: 8] port 2
+ * [14:12] port 3
+ * [18:16] port 4
+ */
+
+/* LED enable, 0: Disable, 1: Enable (Default) */
+#define MT7530_LED_EN 0x7d00
+/* LED mode, 0: GPIO mode, 1: PHY mode (Default) */
+#define MT7530_LED_IO_MODE 0x7d04
+/* GPIO direction, 0: Input, 1: Output */
+#define MT7530_LED_GPIO_DIR 0x7d10
+/* GPIO output enable, 0: Disable, 1: Enable */
+#define MT7530_LED_GPIO_OE 0x7d14
+/* GPIO value, 0: Low, 1: High */
+#define MT7530_LED_GPIO_DATA 0x7d18
+
#define MT7530_CREV 0x7ffc
#define CHIP_NAME_SHIFT 16
#define MT7530_ID 0x7530
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 51185e4d7d15..05af632b0f59 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -9,23 +9,10 @@ config NET_DSA_MV88E6XXX
This driver adds support for most of the Marvell 88E6xxx models of
Ethernet switch chips, except 88E6060.
-config NET_DSA_MV88E6XXX_GLOBAL2
- bool "Switch Global 2 Registers support"
- default y
- depends on NET_DSA_MV88E6XXX
- help
- This registers set at internal SMI address 0x1C provides extended
- features like EEPROM interface, trunking, cross-chip setup, etc.
-
- It is required on most chips. If the chip you compile the support for
- doesn't have such registers set, say N here. In doubt, say Y.
-
config NET_DSA_MV88E6XXX_PTP
bool "PTP support for Marvell 88E6xxx"
default n
- depends on NET_DSA_MV88E6XXX_GLOBAL2
depends on PTP_1588_CLOCK
- imply NETWORK_PHY_TIMESTAMPING
help
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
chips that support it.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 4b080b448ce7..c8eca2b6f959 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -5,9 +5,9 @@ mv88e6xxx-objs += devlink.o
mv88e6xxx-objs += global1.o
mv88e6xxx-objs += global1_atu.o
mv88e6xxx-objs += global1_vtu.o
-mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
-mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2_avb.o
-mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2_scratch.o
+mv88e6xxx-objs += global2.o
+mv88e6xxx-objs += global2_avb.o
+mv88e6xxx-objs += global2_scratch.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o
mv88e6xxx-objs += phy.o
mv88e6xxx-objs += port.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index eafe6bedc692..b99f27b8c084 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1396,15 +1396,32 @@ static int mv88e6xxx_mac_setup(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
{
+ struct dsa_switch_tree *dst = chip->ds->dst;
+ struct dsa_switch *ds;
+ struct dsa_port *dp;
u16 pvlan = 0;
if (!mv88e6xxx_has_pvt(chip))
return 0;
/* Skip the local source device, which uses in-chip port VLAN */
- if (dev != chip->ds->index)
+ if (dev != chip->ds->index) {
pvlan = mv88e6xxx_port_vlan(chip, dev, port);
+ ds = dsa_switch_find(dst->index, dev);
+ dp = ds ? dsa_to_port(ds, port) : NULL;
+ if (dp && dp->lag_dev) {
+ /* As the PVT is used to limit flooding of
+ * FORWARD frames, which use the LAG ID as the
+ * source port, we must translate dev/port to
+ * the special "LAG device" in the PVT, using
+ * the LAG ID as the port number.
+ */
+ dev = MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK;
+ port = dsa_lag_id(dst, dp->lag_dev);
+ }
+ }
+
return mv88e6xxx_g2_pvt_write(chip, dev, port, pvlan);
}
@@ -1529,72 +1546,69 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
}
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
- u16 vid_begin, u16 vid_end)
+ u16 vid)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry vlan;
int i, err;
+ if (!vid)
+ return -EOPNOTSUPP;
+
/* DSA and CPU ports have to be members of multiple vlans */
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
return 0;
- if (!vid_begin)
- return -EOPNOTSUPP;
-
- vlan.vid = vid_begin - 1;
+ vlan.vid = vid - 1;
vlan.valid = false;
- do {
- err = mv88e6xxx_vtu_getnext(chip, &vlan);
- if (err)
- return err;
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
+ if (err)
+ return err;
- if (!vlan.valid)
- break;
+ if (!vlan.valid)
+ return 0;
- if (vlan.vid > vid_end)
- break;
+ if (vlan.vid != vid)
+ return 0;
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
- continue;
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
+ continue;
- if (!dsa_to_port(ds, i)->slave)
- continue;
+ if (!dsa_to_port(ds, i)->slave)
+ continue;
- if (vlan.member[i] ==
- MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
- continue;
+ if (vlan.member[i] ==
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ continue;
- if (dsa_to_port(ds, i)->bridge_dev ==
- dsa_to_port(ds, port)->bridge_dev)
- break; /* same bridge, check next VLAN */
+ if (dsa_to_port(ds, i)->bridge_dev ==
+ dsa_to_port(ds, port)->bridge_dev)
+ break; /* same bridge, check next VLAN */
- if (!dsa_to_port(ds, i)->bridge_dev)
- continue;
+ if (!dsa_to_port(ds, i)->bridge_dev)
+ continue;
- dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
- port, vlan.vid, i,
- netdev_name(dsa_to_port(ds, i)->bridge_dev));
- return -EOPNOTSUPP;
- }
- } while (vlan.vid < vid_end);
+ dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
+ port, vlan.vid, i,
+ netdev_name(dsa_to_port(ds, i)->bridge_dev));
+ return -EOPNOTSUPP;
+ }
return 0;
}
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+ bool vlan_filtering)
{
struct mv88e6xxx_chip *chip = ds->priv;
u16 mode = vlan_filtering ? MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE :
MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return mv88e6xxx_max_vid(chip) ? 0 : -EOPNOTSUPP;
+ if (!mv88e6xxx_max_vid(chip))
+ return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
@@ -1617,13 +1631,9 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
* members, do not support it (yet) and fallback to software VLAN.
*/
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
- vlan->vid_end);
+ err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid);
mv88e6xxx_reg_unlock(chip);
- /* We don't need any dynamic resource from the kernel (yet),
- * so skip the prepare phase.
- */
return err;
}
@@ -1923,9 +1933,6 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
struct mv88e6xxx_vtu_entry vlan;
int i, err;
- if (!vid)
- return -EOPNOTSUPP;
-
vlan.vid = vid - 1;
vlan.valid = false;
@@ -1970,18 +1977,19 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
return 0;
}
-static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
bool warn;
u8 member;
- u16 vid;
+ int err;
- if (!mv88e6xxx_max_vid(chip))
- return;
+ err = mv88e6xxx_port_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED;
@@ -1997,16 +2005,25 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (mv88e6xxx_port_vlan_join(chip, port, vid, member, warn))
- dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
- vid, untagged ? 'u' : 't');
-
- if (pvid && mv88e6xxx_port_set_pvid(chip, port, vlan->vid_end))
- dev_err(ds->dev, "p%d: failed to set PVID %d\n", port,
- vlan->vid_end);
+ err = mv88e6xxx_port_vlan_join(chip, port, vlan->vid, member, warn);
+ if (err) {
+ dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
+ vlan->vid, untagged ? 'u' : 't');
+ goto out;
+ }
+ if (pvid) {
+ err = mv88e6xxx_port_set_pvid(chip, port, vlan->vid);
+ if (err) {
+ dev_err(ds->dev, "p%d: failed to set PVID %d\n",
+ port, vlan->vid);
+ goto out;
+ }
+ }
+out:
mv88e6xxx_reg_unlock(chip);
+
+ return err;
}
static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
@@ -2055,8 +2072,8 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
- u16 pvid, vid;
int err = 0;
+ u16 pvid;
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
@@ -2067,16 +2084,14 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (err)
goto unlock;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = mv88e6xxx_port_vlan_leave(chip, port, vid);
+ err = mv88e6xxx_port_vlan_leave(chip, port, vlan->vid);
+ if (err)
+ goto unlock;
+
+ if (vlan->vid == pvid) {
+ err = mv88e6xxx_port_set_pvid(chip, port, 0);
if (err)
goto unlock;
-
- if (vid == pvid) {
- err = mv88e6xxx_port_set_pvid(chip, port, 0);
- if (err)
- goto unlock;
- }
}
unlock:
@@ -2860,7 +2875,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
- ds->configure_vlan_while_not_filtering = true;
mv88e6xxx_reg_lock(chip);
@@ -4035,8 +4049,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6250_g1_reset,
- .vtu_getnext = mv88e6250_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6250_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6250_ptp_ops,
.phylink_validate = mv88e6065_phylink_validate,
@@ -5213,10 +5227,6 @@ static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
/* Update the compatible info with the probed one */
chip->info = info;
- err = mv88e6xxx_g2_require(chip);
- if (err)
- return err;
-
dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n",
chip->info->prod_num, chip->info->name, rev);
@@ -5249,27 +5259,18 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->info->tag_protocol;
}
-static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- /* We don't need any dynamic resource from the kernel (yet),
- * so skip the prepare phase.
- */
-
- return 0;
-}
-
-static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
- MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC))
- dev_err(ds->dev, "p%d: failed to load multicast MAC address\n",
- port);
+ err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
+ MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
mv88e6xxx_reg_unlock(chip);
+
+ return err;
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
@@ -5375,6 +5376,275 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
return err;
}
+static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp;
+ int id, members = 0;
+
+ if (!mv88e6xxx_has_lag(chip))
+ return false;
+
+ id = dsa_lag_id(ds->dst, lag);
+ if (id < 0 || id >= ds->num_lag_ids)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > 8)
+ return false;
+
+ /* We could potentially relax this to include active
+ * backup in the future.
+ */
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return false;
+
+ /* Ideally we would also validate that the hash type matches
+ * the hardware. Alas, this is always set to unknown on team
+ * interfaces.
+ */
+ return true;
+}
+
+static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp;
+ u16 map = 0;
+ int id;
+
+ id = dsa_lag_id(ds->dst, lag);
+
+ /* Build the map of all ports to distribute flows destined for
+ * this LAG. This can be either a local user port, or a DSA
+ * port if the LAG port is on a remote chip.
+ */
+ dsa_lag_foreach_port(dp, ds->dst, lag)
+ map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
+
+ return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
+}
+
+static const u8 mv88e6xxx_lag_mask_table[8][8] = {
+ /* Row number corresponds to the number of active members in a
+ * LAG. Each column states which of the eight hash buckets are
+ * mapped to the column:th port in the LAG.
+ *
+ * Example: In a LAG with three active ports, the second port
+ * ([2][1]) would be selected for traffic mapped to buckets
+ * 3,4,5 (0x38).
+ */
+ { 0xff, 0, 0, 0, 0, 0, 0, 0 },
+ { 0x0f, 0xf0, 0, 0, 0, 0, 0, 0 },
+ { 0x07, 0x38, 0xc0, 0, 0, 0, 0, 0 },
+ { 0x03, 0x0c, 0x30, 0xc0, 0, 0, 0, 0 },
+ { 0x03, 0x0c, 0x30, 0x40, 0x80, 0, 0, 0 },
+ { 0x03, 0x0c, 0x10, 0x20, 0x40, 0x80, 0, 0 },
+ { 0x03, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0 },
+ { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 },
+};
+
+static void mv88e6xxx_lag_set_port_mask(u16 *mask, int port,
+ int num_tx, int nth)
+{
+ u8 active = 0;
+ int i;
+
+ num_tx = num_tx <= 8 ? num_tx : 8;
+ if (nth < num_tx)
+ active = mv88e6xxx_lag_mask_table[num_tx - 1][nth];
+
+ for (i = 0; i < 8; i++) {
+ if (BIT(i) & active)
+ mask[i] |= BIT(port);
+ }
+}
+
+static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ unsigned int id, num_tx;
+ struct net_device *lag;
+ struct dsa_port *dp;
+ int i, err, nth;
+ u16 mask[8];
+ u16 ivec;
+
+ /* Assume no port is a member of any LAG. */
+ ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
+
+ /* Disable all masks for ports that _are_ members of a LAG. */
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (!dp->lag_dev || dp->ds != ds)
+ continue;
+
+ ivec &= ~BIT(dp->index);
+ }
+
+ for (i = 0; i < 8; i++)
+ mask[i] = ivec;
+
+ /* Enable the correct subset of masks for all LAG ports that
+ * are in the Tx set.
+ */
+ dsa_lags_foreach_id(id, ds->dst) {
+ lag = dsa_lag_dev(ds->dst, id);
+ if (!lag)
+ continue;
+
+ num_tx = 0;
+ dsa_lag_foreach_port(dp, ds->dst, lag) {
+ if (dp->lag_tx_enabled)
+ num_tx++;
+ }
+
+ if (!num_tx)
+ continue;
+
+ nth = 0;
+ dsa_lag_foreach_port(dp, ds->dst, lag) {
+ if (!dp->lag_tx_enabled)
+ continue;
+
+ if (dp->ds == ds)
+ mv88e6xxx_lag_set_port_mask(mask, dp->index,
+ num_tx, nth);
+
+ nth++;
+ }
+ }
+
+ for (i = 0; i < 8; i++) {
+ err = mv88e6xxx_g2_trunk_mask_write(chip, i, true, mask[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
+ struct net_device *lag)
+{
+ int err;
+
+ err = mv88e6xxx_lag_sync_masks(ds);
+
+ if (!err)
+ err = mv88e6xxx_lag_sync_map(ds, lag);
+
+ return err;
+}
+
+static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_lag_sync_masks(ds);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err, id;
+
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ id = dsa_lag_id(ds->dst, lag);
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_port_set_trunk(chip, port, true, id);
+ if (err)
+ goto err_unlock;
+
+ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ if (err)
+ goto err_clear_trunk;
+
+ mv88e6xxx_reg_unlock(chip);
+ return 0;
+
+err_clear_trunk:
+ mv88e6xxx_port_set_trunk(chip, port, false, 0);
+err_unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
+ struct net_device *lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err_sync, err_trunk;
+
+ mv88e6xxx_reg_lock(chip);
+ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ err_trunk = mv88e6xxx_port_set_trunk(chip, port, false, 0);
+ mv88e6xxx_reg_unlock(chip);
+ return err_sync ? : err_trunk;
+}
+
+static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
+ int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_lag_sync_masks(ds);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
+ int port, struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_pvt_map(chip, sw_index, port);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
+ int port, struct net_device *lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err_sync, err_pvt;
+
+ mv88e6xxx_reg_lock(chip);
+ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ err_pvt = mv88e6xxx_pvt_map(chip, sw_index, port);
+ mv88e6xxx_reg_unlock(chip);
+ return err_sync ? : err_pvt;
+}
+
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
@@ -5408,13 +5678,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
- .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
.port_fdb_dump = mv88e6xxx_port_fdb_dump,
- .port_mdb_prepare = mv88e6xxx_port_mdb_prepare,
.port_mdb_add = mv88e6xxx_port_mdb_add,
.port_mdb_del = mv88e6xxx_port_mdb_del,
.port_mirror_add = mv88e6xxx_port_mirror_add,
@@ -5429,6 +5697,12 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.devlink_param_get = mv88e6xxx_devlink_param_get,
.devlink_param_set = mv88e6xxx_devlink_param_set,
.devlink_info_get = mv88e6xxx_devlink_info_get,
+ .port_lag_change = mv88e6xxx_port_lag_change,
+ .port_lag_join = mv88e6xxx_port_lag_join,
+ .port_lag_leave = mv88e6xxx_port_lag_leave,
+ .crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
+ .crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
+ .crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
@@ -5448,6 +5722,12 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
ds->ageing_time_min = chip->info->age_time_coeff;
ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
+ /* Some chips support up to 32, but that requires enabling the
+ * 5-bit port mode, which we do not support. 640k^W16 ought to
+ * be enough for anyone.
+ */
+ ds->num_lag_ids = mv88e6xxx_has_lag(chip) ? 16 : 0;
+
dev_set_drvdata(dev, ds);
return dsa_register_switch(ds);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 3543055bcb51..788b3f585ef3 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -662,6 +662,11 @@ static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
return chip->info->pvt;
}
+static inline bool mv88e6xxx_has_lag(struct mv88e6xxx_chip *chip)
+{
+ return !!chip->info->global2_addr;
+}
+
static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
{
return chip->info->num_databases;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 80a182c5b98a..7c396964d0b2 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -336,10 +336,6 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
-int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry);
-int mv88e6250_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry);
int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 66ddf67b8737..ae12c981923e 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -336,35 +336,6 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g1_vtu_vid_read(chip, entry);
}
-int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 val;
- int err;
-
- err = mv88e6xxx_g1_vtu_getnext(chip, entry);
- if (err)
- return err;
-
- if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
- if (err)
- return err;
-
- /* VTU DBNum[3:0] are located in VTU Operation 3:0
- * VTU DBNum[5:4] are located in VTU Operation 9:8
- */
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
- if (err)
- return err;
-
- entry->fid = val & 0x000f;
- entry->fid |= (val & 0x0300) >> 4;
- }
-
- return 0;
-}
-
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -385,7 +356,7 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
/* VTU DBNum[3:0] are located in VTU Operation 3:0
- * VTU DBNum[7:4] are located in VTU Operation 11:8
+ * VTU DBNum[7:4] ([5:4] for 6250) are located in VTU Operation 11:8 (9:8)
*/
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
if (err)
@@ -393,6 +364,7 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
entry->fid = val & 0x000f;
entry->fid |= (val & 0x0f00) >> 4;
+ entry->fid &= mv88e6xxx_num_databases(chip) - 1;
}
return 0;
@@ -462,35 +434,6 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return 0;
}
-int mv88e6250_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 op = MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE;
- int err;
-
- err = mv88e6xxx_g1_vtu_op_wait(chip);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
- if (err)
- return err;
-
- if (entry->valid) {
- err = mv88e6185_g1_vtu_data_write(chip, entry);
- if (err)
- return err;
-
- /* VTU DBNum[3:0] are located in VTU Operation 3:0
- * VTU DBNum[5:4] are located in VTU Operation 9:8
- */
- op |= entry->fid & 0x000f;
- op |= (entry->fid & 0x0030) << 4;
- }
-
- return mv88e6xxx_g1_vtu_op(chip, op);
-}
-
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -512,6 +455,10 @@ int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[7:4] are located in VTU Operation 11:8
+ *
+ * For the 6250/6220, the latter are really [5:4] and
+ * 9:8, but in those cases bits 7:6 of entry->fid are
+ * 0 since they have num_databases = 64.
*/
op |= entry->fid & 0x000f;
op |= (entry->fid & 0x00f0) << 4;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 75b227d0f73b..da8bac8813e1 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -126,8 +126,8 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
/* Offset 0x07: Trunk Mask Table register */
-static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
- bool hash, u16 mask)
+int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+ bool hash, u16 mask)
{
u16 val = (num << 12) | (mask & mv88e6xxx_port_mask(chip));
@@ -140,8 +140,8 @@ static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
/* Offset 0x08: Trunk Mapping Table register */
-static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
- u16 map)
+int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+ u16 map)
{
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (id << 11) | (map & port_mask);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 1f42ee656816..4127f82275ad 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -101,6 +101,7 @@
#define MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN 0x3000
#define MV88E6XXX_G2_PVT_ADDR_OP_READ 0x4000
#define MV88E6XXX_G2_PVT_ADDR_PTR_MASK 0x01ff
+#define MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK 0x1f
/* Offset 0x0C: Cross-chip Port VLAN Data Register */
#define MV88E6XXX_G2_PVT_DATA 0x0c
@@ -295,13 +296,6 @@
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_TRIG 1
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ 2
-#ifdef CONFIG_NET_DSA_MV88E6XXX_GLOBAL2
-
-static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
-{
- return 0;
-}
-
int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg,
@@ -345,6 +339,10 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+ bool hash, u16 mask);
+int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+ u16 map);
int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
@@ -365,179 +363,4 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
-#else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
-
-static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
-{
- if (chip->info->global2_addr) {
- dev_err(chip->dev, "this chip requires CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 enabled\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static inline int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip,
- int reg, int bit, int val)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip,
- int port)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip,
- int port)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip,
- u8 *addr)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom,
- u8 *data)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom,
- u8 *data)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom,
- u8 *data)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom,
- u8 *data)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
- int src_dev, int src_port, u16 data)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip)
-{
-}
-
-static inline int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus)
-{
- return 0;
-}
-
-static inline void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus)
-{
-}
-
-static inline int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {};
-static const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = {};
-static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {};
-
-static const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {};
-static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {};
-static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {};
-
-static const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {};
-
-static inline int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
- bool external)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
- int target, int port)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip,
- u16 kind, u16 bin)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip,
- u16 *stats)
-{
- return -EOPNOTSUPP;
-}
-
-#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
-
#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 77a5fd1798cd..4b46e10a2dde 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -851,6 +851,27 @@ int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
}
+int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
+ bool trunk, u8 id)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1, &val);
+ if (err)
+ return err;
+
+ val &= ~MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK;
+
+ if (trunk)
+ val |= MV88E6XXX_PORT_CTL1_TRUNK_PORT |
+ (id << MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT);
+ else
+ val &= ~MV88E6XXX_PORT_CTL1_TRUNK_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
+}
+
/* Offset 0x06: Port Based VLAN Map */
int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 500e1d4896ff..a729bba050df 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -168,6 +168,9 @@
/* Offset 0x05: Port Control 1 */
#define MV88E6XXX_PORT_CTL1 0x05
#define MV88E6XXX_PORT_CTL1_MESSAGE_PORT 0x8000
+#define MV88E6XXX_PORT_CTL1_TRUNK_PORT 0x4000
+#define MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK 0x0f00
+#define MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT 8
#define MV88E6XXX_PORT_CTL1_FID_11_4_MASK 0x00ff
/* Offset 0x06: Port Based VLAN Map */
@@ -351,6 +354,8 @@ int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
u16 etype);
int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
bool message_port);
+int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
+ bool trunk, u8 id);
int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
size_t size);
int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 7dc230677b78..767cbdccdb3e 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -65,19 +65,12 @@ static int felix_fdb_del(struct dsa_switch *ds, int port,
return ocelot_fdb_del(ocelot, port, addr, vid);
}
-/* This callback needs to be present */
-static int felix_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- return 0;
-}
-
-static void felix_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int felix_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_mdb_add(ocelot, port, mdb);
+ return ocelot_port_mdb_add(ocelot, port, mdb);
}
static int felix_mdb_del(struct dsa_switch *ds, int port,
@@ -116,8 +109,7 @@ static int felix_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
- u16 vid, flags = vlan->flags;
- int err;
+ u16 flags = vlan->flags;
/* Ocelot switches copy frames as-is to the CPU, so the flags:
* egress-untagged or not, pvid or not, make no difference. This
@@ -130,61 +122,40 @@ static int felix_vlan_prepare(struct dsa_switch *ds, int port,
if (port == ocelot->npi)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ocelot_vlan_prepare(ocelot, port, vid,
- flags & BRIDGE_VLAN_INFO_PVID,
- flags & BRIDGE_VLAN_INFO_UNTAGGED);
- if (err)
- return err;
- }
-
- return 0;
+ return ocelot_vlan_prepare(ocelot, port, vlan->vid,
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
-static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
- struct switchdev_trans *trans)
+static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_port_vlan_filtering(ocelot, port, enabled, trans);
+ return ocelot_port_vlan_filtering(ocelot, port, enabled);
}
-static void felix_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int felix_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
u16 flags = vlan->flags;
- u16 vid;
int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ocelot_vlan_add(ocelot, port, vid,
- flags & BRIDGE_VLAN_INFO_PVID,
- flags & BRIDGE_VLAN_INFO_UNTAGGED);
- if (err) {
- dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
- vid, port, err);
- return;
- }
- }
+ err = felix_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
+
+ return ocelot_vlan_add(ocelot, port, vlan->vid,
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
static int felix_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
- u16 vid;
- int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ocelot_vlan_del(ocelot, port, vid);
- if (err) {
- dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
- vid, port, err);
- return err;
- }
- }
- return 0;
+ return ocelot_vlan_del(ocelot, port, vlan->vid);
}
static int felix_port_enable(struct dsa_switch *ds, int port,
@@ -328,7 +299,7 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
ANA_PORT_QOS_CFG,
port);
- for (i = 0; i < FELIX_NUM_TC * 2; i++) {
+ for (i = 0; i < OCELOT_NUM_TC * 2; i++) {
ocelot_rmw_ix(ocelot,
(ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
@@ -451,12 +422,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->map = felix->info->map;
ocelot->stats_layout = felix->info->stats_layout;
ocelot->num_stats = felix->info->num_stats;
- ocelot->shared_queue_sz = felix->info->shared_queue_sz;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
ocelot->ops = felix->info->ops;
ocelot->inj_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
+ ocelot->devlink = felix->ds->devlink;
port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
GFP_KERNEL);
@@ -618,6 +589,10 @@ static int felix_setup(struct dsa_switch *ds)
felix_port_qos_map_init(ocelot, port);
}
+ err = ocelot_devlink_sb_register(ocelot);
+ if (err)
+ return err;
+
/* Include the CPU port module in the forwarding mask for unknown
* unicast - the hardware default value for ANA_FLOODING_FLD_UNICAST
* excludes BIT(ocelot->num_phys_ports), and so does ocelot_init, since
@@ -628,7 +603,7 @@ static int felix_setup(struct dsa_switch *ds)
ANA_PGID_PGID, PGID_UC);
ds->mtu_enforcement_ingress = true;
- ds->configure_vlan_while_not_filtering = true;
+ ds->assisted_learning_on_cpu_port = true;
return 0;
}
@@ -639,14 +614,15 @@ static void felix_teardown(struct dsa_switch *ds)
struct felix *felix = ocelot_to_felix(ocelot);
int port;
- if (felix->info->mdio_bus_free)
- felix->info->mdio_bus_free(ocelot);
+ ocelot_devlink_sb_unregister(ocelot);
+ ocelot_deinit_timestamp(ocelot);
+ ocelot_deinit(ocelot);
for (port = 0; port < ocelot->num_phys_ports; port++)
ocelot_deinit_port(ocelot, port);
- ocelot_deinit_timestamp(ocelot);
- /* stop workqueue thread */
- ocelot_deinit(ocelot);
+
+ if (felix->info->mdio_bus_free)
+ felix->info->mdio_bus_free(ocelot);
}
static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
@@ -780,46 +756,156 @@ static int felix_port_setup_tc(struct dsa_switch *ds, int port,
return -EOPNOTSUPP;
}
+static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
+}
+
+static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
+ threshold_type, extack);
+}
+
+static int felix_sb_port_pool_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_threshold);
+}
+
+static int felix_sb_port_pool_set(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
+ threshold, extack);
+}
+
+static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_pool_index,
+ p_threshold);
+}
+
+static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
+ pool_type, pool_index, threshold,
+ extack);
+}
+
+static int felix_sb_occ_snapshot(struct dsa_switch *ds,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_snapshot(ocelot, sb_index);
+}
+
+static int felix_sb_occ_max_clear(struct dsa_switch *ds,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_max_clear(ocelot, sb_index);
+}
+
+static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_cur, p_max);
+}
+
+static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_cur, p_max);
+}
+
const struct dsa_switch_ops felix_switch_ops = {
- .get_tag_protocol = felix_get_tag_protocol,
- .setup = felix_setup,
- .teardown = felix_teardown,
- .set_ageing_time = felix_set_ageing_time,
- .get_strings = felix_get_strings,
- .get_ethtool_stats = felix_get_ethtool_stats,
- .get_sset_count = felix_get_sset_count,
- .get_ts_info = felix_get_ts_info,
- .phylink_validate = felix_phylink_validate,
- .phylink_mac_config = felix_phylink_mac_config,
- .phylink_mac_link_down = felix_phylink_mac_link_down,
- .phylink_mac_link_up = felix_phylink_mac_link_up,
- .port_enable = felix_port_enable,
- .port_disable = felix_port_disable,
- .port_fdb_dump = felix_fdb_dump,
- .port_fdb_add = felix_fdb_add,
- .port_fdb_del = felix_fdb_del,
- .port_mdb_prepare = felix_mdb_prepare,
- .port_mdb_add = felix_mdb_add,
- .port_mdb_del = felix_mdb_del,
- .port_bridge_join = felix_bridge_join,
- .port_bridge_leave = felix_bridge_leave,
- .port_stp_state_set = felix_bridge_stp_state_set,
- .port_vlan_prepare = felix_vlan_prepare,
- .port_vlan_filtering = felix_vlan_filtering,
- .port_vlan_add = felix_vlan_add,
- .port_vlan_del = felix_vlan_del,
- .port_hwtstamp_get = felix_hwtstamp_get,
- .port_hwtstamp_set = felix_hwtstamp_set,
- .port_rxtstamp = felix_rxtstamp,
- .port_txtstamp = felix_txtstamp,
- .port_change_mtu = felix_change_mtu,
- .port_max_mtu = felix_get_max_mtu,
- .port_policer_add = felix_port_policer_add,
- .port_policer_del = felix_port_policer_del,
- .cls_flower_add = felix_cls_flower_add,
- .cls_flower_del = felix_cls_flower_del,
- .cls_flower_stats = felix_cls_flower_stats,
- .port_setup_tc = felix_port_setup_tc,
+ .get_tag_protocol = felix_get_tag_protocol,
+ .setup = felix_setup,
+ .teardown = felix_teardown,
+ .set_ageing_time = felix_set_ageing_time,
+ .get_strings = felix_get_strings,
+ .get_ethtool_stats = felix_get_ethtool_stats,
+ .get_sset_count = felix_get_sset_count,
+ .get_ts_info = felix_get_ts_info,
+ .phylink_validate = felix_phylink_validate,
+ .phylink_mac_config = felix_phylink_mac_config,
+ .phylink_mac_link_down = felix_phylink_mac_link_down,
+ .phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_enable = felix_port_enable,
+ .port_disable = felix_port_disable,
+ .port_fdb_dump = felix_fdb_dump,
+ .port_fdb_add = felix_fdb_add,
+ .port_fdb_del = felix_fdb_del,
+ .port_mdb_add = felix_mdb_add,
+ .port_mdb_del = felix_mdb_del,
+ .port_bridge_join = felix_bridge_join,
+ .port_bridge_leave = felix_bridge_leave,
+ .port_stp_state_set = felix_bridge_stp_state_set,
+ .port_vlan_filtering = felix_vlan_filtering,
+ .port_vlan_add = felix_vlan_add,
+ .port_vlan_del = felix_vlan_del,
+ .port_hwtstamp_get = felix_hwtstamp_get,
+ .port_hwtstamp_set = felix_hwtstamp_set,
+ .port_rxtstamp = felix_rxtstamp,
+ .port_txtstamp = felix_txtstamp,
+ .port_change_mtu = felix_change_mtu,
+ .port_max_mtu = felix_get_max_mtu,
+ .port_policer_add = felix_port_policer_add,
+ .port_policer_del = felix_port_policer_del,
+ .cls_flower_add = felix_cls_flower_add,
+ .cls_flower_del = felix_cls_flower_del,
+ .cls_flower_stats = felix_cls_flower_stats,
+ .port_setup_tc = felix_port_setup_tc,
+ .devlink_sb_pool_get = felix_sb_pool_get,
+ .devlink_sb_pool_set = felix_sb_pool_set,
+ .devlink_sb_port_pool_get = felix_sb_port_pool_get,
+ .devlink_sb_port_pool_set = felix_sb_port_pool_set,
+ .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get,
+ .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set,
+ .devlink_sb_occ_snapshot = felix_sb_occ_snapshot,
+ .devlink_sb_occ_max_clear = felix_sb_occ_max_clear,
+ .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get,
+ .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 4c717324ac2f..994835cb9307 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -5,7 +5,6 @@
#define _MSCC_FELIX_H
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
-#define FELIX_NUM_TC 8
/* Platform-specific information */
struct felix_info {
@@ -15,7 +14,6 @@ struct felix_info {
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
- int shared_queue_sz;
int num_mact_rows;
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 2e5bbdca5ea4..f9711e69b8d5 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1006,9 +1006,27 @@ static u16 vsc9959_wm_enc(u16 value)
return value;
}
+static u16 vsc9959_wm_dec(u16 wm)
+{
+ WARN_ON(wm & ~GENMASK(8, 0));
+
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+
+static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
.wm_enc = vsc9959_wm_enc,
+ .wm_dec = vsc9959_wm_dec,
+ .wm_stat = vsc9959_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
};
@@ -1356,10 +1374,9 @@ static const struct felix_info felix_info_vsc9959 = {
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.vcap = vsc9959_vcap_props,
- .shared_queue_sz = 128 * 1024,
.num_mact_rows = 2048,
.num_ports = 6,
- .num_tx_queues = FELIX_NUM_TC,
+ .num_tx_queues = OCELOT_NUM_TC,
.switch_pci_bar = 4,
.imdio_pci_bar = 0,
.ptp_caps = &vsc9959_ptp_caps,
@@ -1408,17 +1425,6 @@ static int felix_pci_probe(struct pci_dev *pdev,
goto err_pci_enable;
}
- /* set up for high or low dma */
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
- }
-
felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
if (!felix) {
err = -ENOMEM;
@@ -1429,7 +1435,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, felix);
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = FELIX_NUM_TC;
+ ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev,
felix->info->switch_pci_bar);
@@ -1474,9 +1480,8 @@ err_register_ds:
kfree(ds);
err_alloc_ds:
err_alloc_irq:
-err_alloc_felix:
kfree(felix);
-err_dma:
+err_alloc_felix:
pci_disable_device(pdev);
err_pci_enable:
return err;
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index ebbaf6817ec8..5e9bfdea50be 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1057,9 +1057,27 @@ static u16 vsc9953_wm_enc(u16 value)
return value;
}
+static u16 vsc9953_wm_dec(u16 wm)
+{
+ WARN_ON(wm & ~GENMASK(9, 0));
+
+ if (wm & BIT(9))
+ return (wm & GENMASK(8, 0)) * 16;
+
+ return wm;
+}
+
+static void vsc9953_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(25, 13)) >> 13;
+ *maxuse = val & GENMASK(12, 0);
+}
+
static const struct ocelot_ops vsc9953_ops = {
.reset = vsc9953_reset,
.wm_enc = vsc9953_wm_enc,
+ .wm_dec = vsc9953_wm_dec,
+ .wm_stat = vsc9953_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
};
@@ -1181,9 +1199,9 @@ static const struct felix_info seville_info_vsc9953 = {
.stats_layout = vsc9953_stats_layout,
.num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
- .shared_queue_sz = 256 * 1024,
.num_mact_rows = 2048,
.num_ports = 10,
+ .num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 4d49c5f2b790..ca2ad77b71f1 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -101,6 +101,9 @@
AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
AR9331_SW_PORT_STATUS_SPEED_M)
+/* MIB registers */
+#define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100))
+
/* Phy bypass mode
* ------------------------------------------------------------------------
* Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
@@ -154,6 +157,66 @@
#define AR9331_SW_MDIO_POLL_SLEEP_US 1
#define AR9331_SW_MDIO_POLL_TIMEOUT_US 20
+/* The interval should be small enough to avoid overflow of 32bit MIBs */
+/*
+ * FIXME: until we can read MIBs from stats64 call directly (i.e. sleep
+ * there), we have to poll stats more frequently then it is actually needed.
+ * For overflow protection, normally, 100 sec interval should have been OK.
+ */
+#define STATS_INTERVAL_JIFFIES (3 * HZ)
+
+struct ar9331_sw_stats_raw {
+ u32 rxbroad; /* 0x00 */
+ u32 rxpause; /* 0x04 */
+ u32 rxmulti; /* 0x08 */
+ u32 rxfcserr; /* 0x0c */
+ u32 rxalignerr; /* 0x10 */
+ u32 rxrunt; /* 0x14 */
+ u32 rxfragment; /* 0x18 */
+ u32 rx64byte; /* 0x1c */
+ u32 rx128byte; /* 0x20 */
+ u32 rx256byte; /* 0x24 */
+ u32 rx512byte; /* 0x28 */
+ u32 rx1024byte; /* 0x2c */
+ u32 rx1518byte; /* 0x30 */
+ u32 rxmaxbyte; /* 0x34 */
+ u32 rxtoolong; /* 0x38 */
+ u32 rxgoodbyte; /* 0x3c */
+ u32 rxgoodbyte_hi;
+ u32 rxbadbyte; /* 0x44 */
+ u32 rxbadbyte_hi;
+ u32 rxoverflow; /* 0x4c */
+ u32 filtered; /* 0x50 */
+ u32 txbroad; /* 0x54 */
+ u32 txpause; /* 0x58 */
+ u32 txmulti; /* 0x5c */
+ u32 txunderrun; /* 0x60 */
+ u32 tx64byte; /* 0x64 */
+ u32 tx128byte; /* 0x68 */
+ u32 tx256byte; /* 0x6c */
+ u32 tx512byte; /* 0x70 */
+ u32 tx1024byte; /* 0x74 */
+ u32 tx1518byte; /* 0x78 */
+ u32 txmaxbyte; /* 0x7c */
+ u32 txoversize; /* 0x80 */
+ u32 txbyte; /* 0x84 */
+ u32 txbyte_hi;
+ u32 txcollision; /* 0x8c */
+ u32 txabortcol; /* 0x90 */
+ u32 txmulticol; /* 0x94 */
+ u32 txsinglecol; /* 0x98 */
+ u32 txexcdefer; /* 0x9c */
+ u32 txdefer; /* 0xa0 */
+ u32 txlatecol; /* 0xa4 */
+};
+
+struct ar9331_sw_port {
+ int idx;
+ struct delayed_work mib_read;
+ struct rtnl_link_stats64 stats;
+ struct spinlock stats_lock;
+};
+
struct ar9331_sw_priv {
struct device *dev;
struct dsa_switch ds;
@@ -165,8 +228,17 @@ struct ar9331_sw_priv {
struct mii_bus *sbus; /* mdio slave */
struct regmap *regmap;
struct reset_control *sw_reset;
+ struct ar9331_sw_port port[AR9331_SW_PORTS];
};
+static struct ar9331_sw_priv *ar9331_sw_port_to_priv(struct ar9331_sw_port *port)
+{
+ struct ar9331_sw_port *p = port - port->idx;
+
+ return (struct ar9331_sw_priv *)((void *)p -
+ offsetof(struct ar9331_sw_priv, port));
+}
+
/* Warning: switch reset will reset last AR9331_SW_MDIO_PHY_MODE_PAGE request
* If some kind of optimization is used, the request should be repeated.
*/
@@ -330,6 +402,8 @@ static int ar9331_sw_setup(struct dsa_switch *ds)
if (ret)
goto error;
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
error:
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
@@ -424,6 +498,7 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
struct regmap *regmap = priv->regmap;
int ret;
@@ -431,6 +506,8 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
AR9331_SW_PORT_STATUS_MAC_MASK, 0);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+
+ cancel_delayed_work_sync(&p->mib_read);
}
static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -441,10 +518,13 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
bool tx_pause, bool rx_pause)
{
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
struct regmap *regmap = priv->regmap;
u32 val;
int ret;
+ schedule_delayed_work(&p->mib_read, 0);
+
val = AR9331_SW_PORT_STATUS_MAC_MASK;
switch (speed) {
case SPEED_1000:
@@ -477,6 +557,73 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
}
+static void ar9331_read_stats(struct ar9331_sw_port *port)
+{
+ struct ar9331_sw_priv *priv = ar9331_sw_port_to_priv(port);
+ struct rtnl_link_stats64 *stats = &port->stats;
+ struct ar9331_sw_stats_raw raw;
+ int ret;
+
+ /* Do the slowest part first, to avoid needless locking for long time */
+ ret = regmap_bulk_read(priv->regmap, AR9331_MIB_COUNTER(port->idx),
+ &raw, sizeof(raw) / sizeof(u32));
+ if (ret) {
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return;
+ }
+ /* All MIB counters are cleared automatically on read */
+
+ spin_lock(&port->stats_lock);
+
+ stats->rx_bytes += raw.rxgoodbyte;
+ stats->tx_bytes += raw.txbyte;
+
+ stats->rx_packets += raw.rx64byte + raw.rx128byte + raw.rx256byte +
+ raw.rx512byte + raw.rx1024byte + raw.rx1518byte + raw.rxmaxbyte;
+ stats->tx_packets += raw.tx64byte + raw.tx128byte + raw.tx256byte +
+ raw.tx512byte + raw.tx1024byte + raw.tx1518byte + raw.txmaxbyte;
+
+ stats->rx_length_errors += raw.rxrunt + raw.rxfragment + raw.rxtoolong;
+ stats->rx_crc_errors += raw.rxfcserr;
+ stats->rx_frame_errors += raw.rxalignerr;
+ stats->rx_missed_errors += raw.rxoverflow;
+ stats->rx_dropped += raw.filtered;
+ stats->rx_errors += raw.rxfcserr + raw.rxalignerr + raw.rxrunt +
+ raw.rxfragment + raw.rxoverflow + raw.rxtoolong;
+
+ stats->tx_window_errors += raw.txlatecol;
+ stats->tx_fifo_errors += raw.txunderrun;
+ stats->tx_aborted_errors += raw.txabortcol;
+ stats->tx_errors += raw.txoversize + raw.txabortcol + raw.txunderrun +
+ raw.txlatecol;
+
+ stats->multicast += raw.rxmulti;
+ stats->collisions += raw.txcollision;
+
+ spin_unlock(&port->stats_lock);
+}
+
+static void ar9331_do_stats_poll(struct work_struct *work)
+{
+ struct ar9331_sw_port *port = container_of(work, struct ar9331_sw_port,
+ mib_read.work);
+
+ ar9331_read_stats(port);
+
+ schedule_delayed_work(&port->mib_read, STATS_INTERVAL_JIFFIES);
+}
+
+static void ar9331_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(s, &p->stats, sizeof(*s));
+ spin_unlock(&p->stats_lock);
+}
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
@@ -485,6 +632,7 @@ static const struct dsa_switch_ops ar9331_sw_ops = {
.phylink_mac_config = ar9331_sw_phylink_mac_config,
.phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
.phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
+ .get_stats64 = ar9331_get_stats64,
};
static irqreturn_t ar9331_sw_irq(int irq, void *data)
@@ -796,7 +944,7 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev)
{
struct ar9331_sw_priv *priv;
struct dsa_switch *ds;
- int ret;
+ int ret, i;
priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -831,6 +979,14 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev)
ds->ops = &priv->ops;
dev_set_drvdata(&mdiodev->dev, priv);
+ for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
+ struct ar9331_sw_port *port = &priv->port[i];
+
+ port->idx = i;
+ spin_lock_init(&port->stats_lock);
+ INIT_DELAYED_WORK(&port->mib_read, ar9331_do_stats_poll);
+ }
+
ret = dsa_register_switch(ds);
if (ret)
goto err_remove_irq;
@@ -846,6 +1002,13 @@ err_remove_irq:
static void ar9331_sw_remove(struct mdio_device *mdiodev)
{
struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
+ struct ar9331_sw_port *port = &priv->port[i];
+
+ cancel_delayed_work_sync(&port->mib_read);
+ }
irq_domain_remove(priv->irqdomain);
mdiobus_unregister(priv->mbus);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 5bdac669a339..6127823d6c2e 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1294,14 +1294,10 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans)
+qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
struct qca8k_priv *priv = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (vlan_filtering) {
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE,
@@ -1316,13 +1312,6 @@ qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
}
static int
-qca8k_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- return 0;
-}
-
-static void
qca8k_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
@@ -1330,24 +1319,24 @@ qca8k_port_vlan_add(struct dsa_switch *ds, int port,
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct qca8k_priv *priv = ds->priv;
int ret = 0;
- u16 vid;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid)
- ret = qca8k_vlan_add(priv, port, vid, untagged);
-
- if (ret)
+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ if (ret) {
dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ return ret;
+ }
if (pvid) {
int shift = 16 * (port % 2);
qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- 0xfff << shift,
- vlan->vid_end << shift);
+ 0xfff << shift, vlan->vid << shift);
qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(vlan->vid_end) |
- QCA8K_PORT_VLAN_SVID(vlan->vid_end));
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
}
+
+ return 0;
}
static int
@@ -1356,11 +1345,8 @@ qca8k_port_vlan_del(struct dsa_switch *ds, int port,
{
struct qca8k_priv *priv = ds->priv;
int ret = 0;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid)
- ret = qca8k_vlan_del(priv, port, vid);
+ ret = qca8k_vlan_del(priv, port, vlan->vid);
if (ret)
dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
@@ -1393,7 +1379,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_fdb_del = qca8k_port_fdb_del,
.port_fdb_dump = qca8k_port_fdb_dump,
.port_vlan_filtering = qca8k_port_vlan_filtering,
- .port_vlan_prepare = qca8k_port_vlan_prepare,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
.phylink_validate = qca8k_phylink_validate,
@@ -1446,7 +1431,6 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
- priv->ds->configure_vlan_while_not_filtering = true;
priv->ds->priv = priv;
priv->ops = qca8k_switch_ops;
priv->ds->ops = &priv->ops;
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
index 6b6a3dec0984..26376b052594 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek-smi-core.h
@@ -131,12 +131,9 @@ int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
int rtl8366_reset_vlan(struct realtek_smi *smi);
int rtl8366_init_vlan(struct realtek_smi *smi);
int rtl8366_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans);
-int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
-void rtl8366_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
+ bool vlan_filtering);
+int rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 83d481ef9273..3b24f2e13200 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -340,20 +340,15 @@ int rtl8366_init_vlan(struct realtek_smi *smi)
}
EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans)
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
struct realtek_smi *smi = ds->priv;
struct rtl8366_vlan_4k vlan4k;
int ret;
/* Use VLAN nr port + 1 since VLAN0 is not valid */
- if (switchdev_trans_ph_prepare(trans)) {
- if (!smi->ops->is_vlan_valid(smi, port + 1))
- return -EINVAL;
-
- return 0;
- }
+ if (!smi->ops->is_vlan_valid(smi, port + 1))
+ return -EINVAL;
dev_info(smi->dev, "%s filtering on port %d\n",
vlan_filtering ? "enable" : "disable",
@@ -379,76 +374,56 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
-int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct realtek_smi *smi = ds->priv;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
- if (!smi->ops->is_vlan_valid(smi, vid))
- return -EINVAL;
-
- dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
- vlan->vid_begin, vlan->vid_end);
-
- /* Enable VLAN in the hardware
- * FIXME: what's with this 4k business?
- * Just rtl8366_enable_vlan() seems inconclusive.
- */
- return rtl8366_enable_vlan4k(smi, true);
-}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare);
-
-void rtl8366_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
struct realtek_smi *smi = ds->priv;
u32 member = 0;
u32 untag = 0;
- u16 vid;
int ret;
- for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
- if (!smi->ops->is_vlan_valid(smi, vid))
- return;
+ if (!smi->ops->is_vlan_valid(smi, vlan->vid))
+ return -EINVAL;
+
+ /* Enable VLAN in the hardware
+ * FIXME: what's with this 4k business?
+ * Just rtl8366_enable_vlan() seems inconclusive.
+ */
+ ret = rtl8366_enable_vlan4k(smi, true);
+ if (ret)
+ return ret;
dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
- vlan->vid_begin,
- port,
- untagged ? "untagged" : "tagged",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? " PVID" : "no PVID");
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
dev_err(smi->dev, "port is DSA or CPU port\n");
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- member |= BIT(port);
-
- if (untagged)
- untag |= BIT(port);
+ member |= BIT(port);
- ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
- if (ret)
- dev_err(smi->dev,
- "failed to set up VLAN %04x",
- vid);
+ if (untagged)
+ untag |= BIT(port);
- if (!pvid)
- continue;
+ ret = rtl8366_set_vlan(smi, vlan->vid, member, untag, 0);
+ if (ret) {
+ dev_err(smi->dev, "failed to set up VLAN %04x", vlan->vid);
+ return ret;
+ }
- ret = rtl8366_set_pvid(smi, port, vid);
- if (ret)
- dev_err(smi->dev,
- "failed to set PVID on port %d to VLAN %04x",
- port, vid);
+ if (!pvid)
+ return 0;
- if (!ret)
- dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
- vid, port);
+ ret = rtl8366_set_pvid(smi, port, vlan->vid);
+ if (ret) {
+ dev_err(smi->dev, "failed to set PVID on port %d to VLAN %04x",
+ port, vlan->vid);
+ return ret;
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
@@ -456,46 +431,39 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct realtek_smi *smi = ds->priv;
- u16 vid;
- int ret;
-
- dev_info(smi->dev, "del VLAN on port %d\n", port);
+ int ret, i;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- int i;
+ dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port);
- dev_info(smi->dev, "del VLAN %04x\n", vid);
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ struct rtl8366_vlan_mc vlanmc;
- for (i = 0; i < smi->num_vlan_mc; i++) {
- struct rtl8366_vlan_mc vlanmc;
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
+ if (vlan->vid == vlanmc.vid) {
+ /* Remove this port from the VLAN */
+ vlanmc.member &= ~BIT(port);
+ vlanmc.untag &= ~BIT(port);
+ /*
+ * If no ports are members of this VLAN
+ * anymore then clear the whole member
+ * config so it can be reused.
+ */
+ if (!vlanmc.member && vlanmc.untag) {
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.fid = 0;
+ }
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to remove VLAN %04x\n",
+ vlan->vid);
return ret;
-
- if (vid == vlanmc.vid) {
- /* Remove this port from the VLAN */
- vlanmc.member &= ~BIT(port);
- vlanmc.untag &= ~BIT(port);
- /*
- * If no ports are members of this VLAN
- * anymore then clear the whole member
- * config so it can be reused.
- */
- if (!vlanmc.member && vlanmc.untag) {
- vlanmc.vid = 0;
- vlanmc.priority = 0;
- vlanmc.fid = 0;
- }
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret) {
- dev_err(smi->dev,
- "failed to remove VLAN %04x\n",
- vid);
- return ret;
- }
- break;
}
+ break;
}
}
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index cfe56960f44b..a89093bc6c6a 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -601,108 +601,114 @@ static int rtl8366rb_set_addr(struct realtek_smi *smi)
/* Found in a vendor driver */
+/* Struct for handling the jam tables' entries */
+struct rtl8366rb_jam_tbl_entry {
+ u16 reg;
+ u16 val;
+};
+
/* For the "version 0" early silicon, appear in most source releases */
-static const u16 rtl8366rb_init_jam_ver_0[] = {
- 0x000B, 0x0001, 0x03A6, 0x0100, 0x03A7, 0x0001, 0x02D1, 0x3FFF,
- 0x02D2, 0x3FFF, 0x02D3, 0x3FFF, 0x02D4, 0x3FFF, 0x02D5, 0x3FFF,
- 0x02D6, 0x3FFF, 0x02D7, 0x3FFF, 0x02D8, 0x3FFF, 0x022B, 0x0688,
- 0x022C, 0x0FAC, 0x03D0, 0x4688, 0x03D1, 0x01F5, 0x0000, 0x0830,
- 0x02F9, 0x0200, 0x02F7, 0x7FFF, 0x02F8, 0x03FF, 0x0080, 0x03E8,
- 0x0081, 0x00CE, 0x0082, 0x00DA, 0x0083, 0x0230, 0xBE0F, 0x2000,
- 0x0231, 0x422A, 0x0232, 0x422A, 0x0233, 0x422A, 0x0234, 0x422A,
- 0x0235, 0x422A, 0x0236, 0x422A, 0x0237, 0x422A, 0x0238, 0x422A,
- 0x0239, 0x422A, 0x023A, 0x422A, 0x023B, 0x422A, 0x023C, 0x422A,
- 0x023D, 0x422A, 0x023E, 0x422A, 0x023F, 0x422A, 0x0240, 0x422A,
- 0x0241, 0x422A, 0x0242, 0x422A, 0x0243, 0x422A, 0x0244, 0x422A,
- 0x0245, 0x422A, 0x0246, 0x422A, 0x0247, 0x422A, 0x0248, 0x422A,
- 0x0249, 0x0146, 0x024A, 0x0146, 0x024B, 0x0146, 0xBE03, 0xC961,
- 0x024D, 0x0146, 0x024E, 0x0146, 0x024F, 0x0146, 0x0250, 0x0146,
- 0xBE64, 0x0226, 0x0252, 0x0146, 0x0253, 0x0146, 0x024C, 0x0146,
- 0x0251, 0x0146, 0x0254, 0x0146, 0xBE62, 0x3FD0, 0x0084, 0x0320,
- 0x0255, 0x0146, 0x0256, 0x0146, 0x0257, 0x0146, 0x0258, 0x0146,
- 0x0259, 0x0146, 0x025A, 0x0146, 0x025B, 0x0146, 0x025C, 0x0146,
- 0x025D, 0x0146, 0x025E, 0x0146, 0x025F, 0x0146, 0x0260, 0x0146,
- 0x0261, 0xA23F, 0x0262, 0x0294, 0x0263, 0xA23F, 0x0264, 0x0294,
- 0x0265, 0xA23F, 0x0266, 0x0294, 0x0267, 0xA23F, 0x0268, 0x0294,
- 0x0269, 0xA23F, 0x026A, 0x0294, 0x026B, 0xA23F, 0x026C, 0x0294,
- 0x026D, 0xA23F, 0x026E, 0x0294, 0x026F, 0xA23F, 0x0270, 0x0294,
- 0x02F5, 0x0048, 0xBE09, 0x0E00, 0xBE1E, 0x0FA0, 0xBE14, 0x8448,
- 0xBE15, 0x1007, 0xBE4A, 0xA284, 0xC454, 0x3F0B, 0xC474, 0x3F0B,
- 0xBE48, 0x3672, 0xBE4B, 0x17A7, 0xBE4C, 0x0B15, 0xBE52, 0x0EDD,
- 0xBE49, 0x8C00, 0xBE5B, 0x785C, 0xBE5C, 0x785C, 0xBE5D, 0x785C,
- 0xBE61, 0x368A, 0xBE63, 0x9B84, 0xC456, 0xCC13, 0xC476, 0xCC13,
- 0xBE65, 0x307D, 0xBE6D, 0x0005, 0xBE6E, 0xE120, 0xBE2E, 0x7BAF,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_0[] = {
+ {0x000B, 0x0001}, {0x03A6, 0x0100}, {0x03A7, 0x0001}, {0x02D1, 0x3FFF},
+ {0x02D2, 0x3FFF}, {0x02D3, 0x3FFF}, {0x02D4, 0x3FFF}, {0x02D5, 0x3FFF},
+ {0x02D6, 0x3FFF}, {0x02D7, 0x3FFF}, {0x02D8, 0x3FFF}, {0x022B, 0x0688},
+ {0x022C, 0x0FAC}, {0x03D0, 0x4688}, {0x03D1, 0x01F5}, {0x0000, 0x0830},
+ {0x02F9, 0x0200}, {0x02F7, 0x7FFF}, {0x02F8, 0x03FF}, {0x0080, 0x03E8},
+ {0x0081, 0x00CE}, {0x0082, 0x00DA}, {0x0083, 0x0230}, {0xBE0F, 0x2000},
+ {0x0231, 0x422A}, {0x0232, 0x422A}, {0x0233, 0x422A}, {0x0234, 0x422A},
+ {0x0235, 0x422A}, {0x0236, 0x422A}, {0x0237, 0x422A}, {0x0238, 0x422A},
+ {0x0239, 0x422A}, {0x023A, 0x422A}, {0x023B, 0x422A}, {0x023C, 0x422A},
+ {0x023D, 0x422A}, {0x023E, 0x422A}, {0x023F, 0x422A}, {0x0240, 0x422A},
+ {0x0241, 0x422A}, {0x0242, 0x422A}, {0x0243, 0x422A}, {0x0244, 0x422A},
+ {0x0245, 0x422A}, {0x0246, 0x422A}, {0x0247, 0x422A}, {0x0248, 0x422A},
+ {0x0249, 0x0146}, {0x024A, 0x0146}, {0x024B, 0x0146}, {0xBE03, 0xC961},
+ {0x024D, 0x0146}, {0x024E, 0x0146}, {0x024F, 0x0146}, {0x0250, 0x0146},
+ {0xBE64, 0x0226}, {0x0252, 0x0146}, {0x0253, 0x0146}, {0x024C, 0x0146},
+ {0x0251, 0x0146}, {0x0254, 0x0146}, {0xBE62, 0x3FD0}, {0x0084, 0x0320},
+ {0x0255, 0x0146}, {0x0256, 0x0146}, {0x0257, 0x0146}, {0x0258, 0x0146},
+ {0x0259, 0x0146}, {0x025A, 0x0146}, {0x025B, 0x0146}, {0x025C, 0x0146},
+ {0x025D, 0x0146}, {0x025E, 0x0146}, {0x025F, 0x0146}, {0x0260, 0x0146},
+ {0x0261, 0xA23F}, {0x0262, 0x0294}, {0x0263, 0xA23F}, {0x0264, 0x0294},
+ {0x0265, 0xA23F}, {0x0266, 0x0294}, {0x0267, 0xA23F}, {0x0268, 0x0294},
+ {0x0269, 0xA23F}, {0x026A, 0x0294}, {0x026B, 0xA23F}, {0x026C, 0x0294},
+ {0x026D, 0xA23F}, {0x026E, 0x0294}, {0x026F, 0xA23F}, {0x0270, 0x0294},
+ {0x02F5, 0x0048}, {0xBE09, 0x0E00}, {0xBE1E, 0x0FA0}, {0xBE14, 0x8448},
+ {0xBE15, 0x1007}, {0xBE4A, 0xA284}, {0xC454, 0x3F0B}, {0xC474, 0x3F0B},
+ {0xBE48, 0x3672}, {0xBE4B, 0x17A7}, {0xBE4C, 0x0B15}, {0xBE52, 0x0EDD},
+ {0xBE49, 0x8C00}, {0xBE5B, 0x785C}, {0xBE5C, 0x785C}, {0xBE5D, 0x785C},
+ {0xBE61, 0x368A}, {0xBE63, 0x9B84}, {0xC456, 0xCC13}, {0xC476, 0xCC13},
+ {0xBE65, 0x307D}, {0xBE6D, 0x0005}, {0xBE6E, 0xE120}, {0xBE2E, 0x7BAF},
};
/* This v1 init sequence is from Belkin F5D8235 U-Boot release */
-static const u16 rtl8366rb_init_jam_ver_1[] = {
- 0x0000, 0x0830, 0x0001, 0x8000, 0x0400, 0x8130, 0xBE78, 0x3C3C,
- 0x0431, 0x5432, 0xBE37, 0x0CE4, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
- 0xC44C, 0x1585, 0xC44C, 0x1185, 0xC44C, 0x1585, 0xC46C, 0x1585,
- 0xC46C, 0x1185, 0xC46C, 0x1585, 0xC451, 0x2135, 0xC471, 0x2135,
- 0xBE10, 0x8140, 0xBE15, 0x0007, 0xBE6E, 0xE120, 0xBE69, 0xD20F,
- 0xBE6B, 0x0320, 0xBE24, 0xB000, 0xBE23, 0xFF51, 0xBE22, 0xDF20,
- 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, 0xBE24, 0x0000,
- 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, 0xBE21, 0x0140,
- 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, 0xBE2E, 0x7B7A,
- 0xBE36, 0x0CE4, 0x02F5, 0x0048, 0xBE77, 0x2940, 0x000A, 0x83E0,
- 0xBE79, 0x3C3C, 0xBE00, 0x1340,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_1[] = {
+ {0x0000, 0x0830}, {0x0001, 0x8000}, {0x0400, 0x8130}, {0xBE78, 0x3C3C},
+ {0x0431, 0x5432}, {0xBE37, 0x0CE4}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
+ {0xC44C, 0x1585}, {0xC44C, 0x1185}, {0xC44C, 0x1585}, {0xC46C, 0x1585},
+ {0xC46C, 0x1185}, {0xC46C, 0x1585}, {0xC451, 0x2135}, {0xC471, 0x2135},
+ {0xBE10, 0x8140}, {0xBE15, 0x0007}, {0xBE6E, 0xE120}, {0xBE69, 0xD20F},
+ {0xBE6B, 0x0320}, {0xBE24, 0xB000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF20},
+ {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800}, {0xBE24, 0x0000},
+ {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60}, {0xBE21, 0x0140},
+ {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000}, {0xBE2E, 0x7B7A},
+ {0xBE36, 0x0CE4}, {0x02F5, 0x0048}, {0xBE77, 0x2940}, {0x000A, 0x83E0},
+ {0xBE79, 0x3C3C}, {0xBE00, 0x1340},
};
/* This v2 init sequence is from Belkin F5D8235 U-Boot release */
-static const u16 rtl8366rb_init_jam_ver_2[] = {
- 0x0450, 0x0000, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
- 0xC44F, 0x6250, 0xC46F, 0x6250, 0xC456, 0x0C14, 0xC476, 0x0C14,
- 0xC44C, 0x1C85, 0xC44C, 0x1885, 0xC44C, 0x1C85, 0xC46C, 0x1C85,
- 0xC46C, 0x1885, 0xC46C, 0x1C85, 0xC44C, 0x0885, 0xC44C, 0x0881,
- 0xC44C, 0x0885, 0xC46C, 0x0885, 0xC46C, 0x0881, 0xC46C, 0x0885,
- 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
- 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6E, 0x0320,
- 0xBE77, 0x2940, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
- 0x8000, 0x0001, 0xBE15, 0x1007, 0x8000, 0x0000, 0xBE15, 0x1007,
- 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, 0xBE10, 0x8140,
- 0xBE00, 0x1340, 0x0F51, 0x0010,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_2[] = {
+ {0x0450, 0x0000}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
+ {0xC44F, 0x6250}, {0xC46F, 0x6250}, {0xC456, 0x0C14}, {0xC476, 0x0C14},
+ {0xC44C, 0x1C85}, {0xC44C, 0x1885}, {0xC44C, 0x1C85}, {0xC46C, 0x1C85},
+ {0xC46C, 0x1885}, {0xC46C, 0x1C85}, {0xC44C, 0x0885}, {0xC44C, 0x0881},
+ {0xC44C, 0x0885}, {0xC46C, 0x0885}, {0xC46C, 0x0881}, {0xC46C, 0x0885},
+ {0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
+ {0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6E, 0x0320},
+ {0xBE77, 0x2940}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
+ {0x8000, 0x0001}, {0xBE15, 0x1007}, {0x8000, 0x0000}, {0xBE15, 0x1007},
+ {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160}, {0xBE10, 0x8140},
+ {0xBE00, 0x1340}, {0x0F51, 0x0010},
};
/* Appears in a DDWRT code dump */
-static const u16 rtl8366rb_init_jam_ver_3[] = {
- 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
- 0x0F51, 0x0017, 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
- 0xC456, 0x0C14, 0xC476, 0x0C14, 0xC454, 0x3F8B, 0xC474, 0x3F8B,
- 0xC450, 0x2071, 0xC470, 0x2071, 0xC451, 0x226B, 0xC471, 0x226B,
- 0xC452, 0xA293, 0xC472, 0xA293, 0xC44C, 0x1585, 0xC44C, 0x1185,
- 0xC44C, 0x1585, 0xC46C, 0x1585, 0xC46C, 0x1185, 0xC46C, 0x1585,
- 0xC44C, 0x0185, 0xC44C, 0x0181, 0xC44C, 0x0185, 0xC46C, 0x0185,
- 0xC46C, 0x0181, 0xC46C, 0x0185, 0xBE24, 0xB000, 0xBE23, 0xFF51,
- 0xBE22, 0xDF20, 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800,
- 0xBE24, 0x0000, 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60,
- 0xBE21, 0x0140, 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000,
- 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
- 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6B, 0x0320,
- 0xBE77, 0x2800, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
- 0x8000, 0x0001, 0xBE10, 0x8140, 0x8000, 0x0000, 0xBE10, 0x8140,
- 0xBE15, 0x1007, 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160,
- 0xBE10, 0x8140, 0xBE00, 0x1340, 0x0450, 0x0000, 0x0401, 0x0000,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_3[] = {
+ {0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
+ {0x0F51, 0x0017}, {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
+ {0xC456, 0x0C14}, {0xC476, 0x0C14}, {0xC454, 0x3F8B}, {0xC474, 0x3F8B},
+ {0xC450, 0x2071}, {0xC470, 0x2071}, {0xC451, 0x226B}, {0xC471, 0x226B},
+ {0xC452, 0xA293}, {0xC472, 0xA293}, {0xC44C, 0x1585}, {0xC44C, 0x1185},
+ {0xC44C, 0x1585}, {0xC46C, 0x1585}, {0xC46C, 0x1185}, {0xC46C, 0x1585},
+ {0xC44C, 0x0185}, {0xC44C, 0x0181}, {0xC44C, 0x0185}, {0xC46C, 0x0185},
+ {0xC46C, 0x0181}, {0xC46C, 0x0185}, {0xBE24, 0xB000}, {0xBE23, 0xFF51},
+ {0xBE22, 0xDF20}, {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800},
+ {0xBE24, 0x0000}, {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60},
+ {0xBE21, 0x0140}, {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000},
+ {0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
+ {0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6B, 0x0320},
+ {0xBE77, 0x2800}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
+ {0x8000, 0x0001}, {0xBE10, 0x8140}, {0x8000, 0x0000}, {0xBE10, 0x8140},
+ {0xBE15, 0x1007}, {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160},
+ {0xBE10, 0x8140}, {0xBE00, 0x1340}, {0x0450, 0x0000}, {0x0401, 0x0000},
};
/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */
-static const u16 rtl8366rb_init_jam_f5d8235[] = {
- 0x0242, 0x02BF, 0x0245, 0x02BF, 0x0248, 0x02BF, 0x024B, 0x02BF,
- 0x024E, 0x02BF, 0x0251, 0x02BF, 0x0254, 0x0A3F, 0x0256, 0x0A3F,
- 0x0258, 0x0A3F, 0x025A, 0x0A3F, 0x025C, 0x0A3F, 0x025E, 0x0A3F,
- 0x0263, 0x007C, 0x0100, 0x0004, 0xBE5B, 0x3500, 0x800E, 0x200F,
- 0xBE1D, 0x0F00, 0x8001, 0x5011, 0x800A, 0xA2F4, 0x800B, 0x17A3,
- 0xBE4B, 0x17A3, 0xBE41, 0x5011, 0xBE17, 0x2100, 0x8000, 0x8304,
- 0xBE40, 0x8304, 0xBE4A, 0xA2F4, 0x800C, 0xA8D5, 0x8014, 0x5500,
- 0x8015, 0x0004, 0xBE4C, 0xA8D5, 0xBE59, 0x0008, 0xBE09, 0x0E00,
- 0xBE36, 0x1036, 0xBE37, 0x1036, 0x800D, 0x00FF, 0xBE4D, 0x00FF,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_f5d8235[] = {
+ {0x0242, 0x02BF}, {0x0245, 0x02BF}, {0x0248, 0x02BF}, {0x024B, 0x02BF},
+ {0x024E, 0x02BF}, {0x0251, 0x02BF}, {0x0254, 0x0A3F}, {0x0256, 0x0A3F},
+ {0x0258, 0x0A3F}, {0x025A, 0x0A3F}, {0x025C, 0x0A3F}, {0x025E, 0x0A3F},
+ {0x0263, 0x007C}, {0x0100, 0x0004}, {0xBE5B, 0x3500}, {0x800E, 0x200F},
+ {0xBE1D, 0x0F00}, {0x8001, 0x5011}, {0x800A, 0xA2F4}, {0x800B, 0x17A3},
+ {0xBE4B, 0x17A3}, {0xBE41, 0x5011}, {0xBE17, 0x2100}, {0x8000, 0x8304},
+ {0xBE40, 0x8304}, {0xBE4A, 0xA2F4}, {0x800C, 0xA8D5}, {0x8014, 0x5500},
+ {0x8015, 0x0004}, {0xBE4C, 0xA8D5}, {0xBE59, 0x0008}, {0xBE09, 0x0E00},
+ {0xBE36, 0x1036}, {0xBE37, 0x1036}, {0x800D, 0x00FF}, {0xBE4D, 0x00FF},
};
/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */
-static const u16 rtl8366rb_init_jam_dgn3500[] = {
- 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0F51, 0x0017,
- 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, 0x0450, 0x0000,
- 0x0401, 0x0000, 0x0431, 0x0960,
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_dgn3500[] = {
+ {0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0F51, 0x0017},
+ {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0}, {0x0450, 0x0000},
+ {0x0401, 0x0000}, {0x0431, 0x0960},
};
/* This jam table activates "green ethernet", which means low power mode
@@ -710,16 +716,53 @@ static const u16 rtl8366rb_init_jam_dgn3500[] = {
* necessary, and the ports should enter power saving mode 10 seconds after
* a cable is disconnected. Seems to always be the same.
*/
-static const u16 rtl8366rb_green_jam[][2] = {
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
{0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7},
{0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C},
{0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C},
};
+/* Function that jams the tables in the proper registers */
+static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
+ int jam_size, struct realtek_smi *smi,
+ bool write_dbg)
+{
+ u32 val;
+ int ret;
+ int i;
+
+ for (i = 0; i < jam_size; i++) {
+ if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
+ ret = regmap_read(smi->map,
+ RTL8366RB_PHY_ACCESS_BUSY_REG,
+ &val);
+ if (ret)
+ return ret;
+ if (!(val & RTL8366RB_PHY_INT_BUSY)) {
+ ret = regmap_write(smi->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+ }
+ }
+ if (write_dbg)
+ dev_dbg(smi->dev, "jam %04x into register %04x\n",
+ jam_table[i].val,
+ jam_table[i].reg);
+ ret = regmap_write(smi->map,
+ jam_table[i].reg,
+ jam_table[i].val);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_smi *smi = ds->priv;
- const u16 *jam_table;
+ const struct rtl8366rb_jam_tbl_entry *jam_table;
struct rtl8366rb *rb;
u32 chip_ver = 0;
u32 chip_id = 0;
@@ -788,54 +831,16 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
}
- i = 0;
- while (i < jam_size) {
- if ((jam_table[i] & 0xBE00) == 0xBE00) {
- ret = regmap_read(smi->map,
- RTL8366RB_PHY_ACCESS_BUSY_REG,
- &val);
- if (ret)
- return ret;
- if (!(val & RTL8366RB_PHY_INT_BUSY)) {
- ret = regmap_write(smi->map,
- RTL8366RB_PHY_ACCESS_CTRL_REG,
- RTL8366RB_PHY_CTRL_WRITE);
- if (ret)
- return ret;
- }
- }
- dev_dbg(smi->dev, "jam %04x into register %04x\n",
- jam_table[i + 1],
- jam_table[i]);
- ret = regmap_write(smi->map,
- jam_table[i],
- jam_table[i + 1]);
- if (ret)
- return ret;
- i += 2;
- }
+ ret = rtl8366rb_jam_table(jam_table, jam_size, smi, true);
+ if (ret)
+ return ret;
/* Set up the "green ethernet" feature */
- i = 0;
- while (i < ARRAY_SIZE(rtl8366rb_green_jam)) {
- ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_BUSY_REG,
- &val);
- if (ret)
- return ret;
- if (!(val & RTL8366RB_PHY_INT_BUSY)) {
- ret = regmap_write(smi->map,
- RTL8366RB_PHY_ACCESS_CTRL_REG,
- RTL8366RB_PHY_CTRL_WRITE);
- if (ret)
- return ret;
- ret = regmap_write(smi->map,
- rtl8366rb_green_jam[i][0],
- rtl8366rb_green_jam[i][1]);
- if (ret)
- return ret;
- i++;
- }
- }
+ ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
+ ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
+ if (ret)
+ return ret;
+
ret = regmap_write(smi->map,
RTL8366RB_GREEN_FEATURE_REG,
(chip_ver == 1) ? 0x0007 : 0x0003);
@@ -972,6 +977,8 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1504,7 +1511,6 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
.port_vlan_filtering = rtl8366_vlan_filtering,
- .port_vlan_prepare = rtl8366_vlan_prepare,
.port_vlan_add = rtl8366_vlan_add,
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 4ebc4a5a7b35..d582308c2401 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -245,8 +245,7 @@ enum sja1105_reset_reason {
int sja1105_static_config_reload(struct sja1105_private *priv,
enum sja1105_reset_reason reason);
-int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
- struct switchdev_trans *trans);
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled);
void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
/* From sja1105_devlink.c */
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 4a2ec395bcb0..b4bf1b10e66c 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -135,7 +135,6 @@ static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
rtnl_lock();
for (port = 0; port < ds->num_ports; port++) {
- struct switchdev_trans trans;
struct dsa_port *dp;
if (!dsa_is_user_port(ds, port))
@@ -144,13 +143,7 @@ static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
dp = dsa_to_port(ds, port);
vlan_filtering = dsa_port_is_vlan_filtering(dp);
- trans.ph_prepare = true;
- rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
- if (rc)
- break;
-
- trans.ph_prepare = false;
- rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering);
if (rc)
break;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 4ca029650993..282253543f3b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -317,7 +317,7 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ table->entries = kzalloc(table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
@@ -1524,17 +1524,10 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
-/* This callback needs to be present */
-static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- return 0;
-}
-
-static void sja1105_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int sja1105_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
- sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
}
static int sja1105_mdb_del(struct dsa_switch *ds, int port,
@@ -2607,35 +2600,11 @@ out:
return rc;
}
-static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct sja1105_private *priv = ds->priv;
- u16 vid;
-
- if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
- return 0;
-
- /* If the user wants best-effort VLAN filtering (aka vlan_filtering
- * bridge plus tagging), be sure to at least deny alterations to the
- * configuration done by dsa_8021q.
- */
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (vid_is_dsa_8021q(vid)) {
- dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
/* The TPID setting belongs to the General Parameters table,
* which can only be partially reconfigured at runtime (and not the TPID).
* So a switch reset is required.
*/
-int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
- struct switchdev_trans *trans)
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
@@ -2647,16 +2616,12 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
u16 tpid, tpid2;
int rc;
- if (switchdev_trans_ph_prepare(trans)) {
- list_for_each_entry(rule, &priv->flow_block.rules, list) {
- if (rule->type == SJA1105_RULE_VL) {
- dev_err(ds->dev,
- "Cannot change VLAN filtering with active VL rules\n");
- return -EBUSY;
- }
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type == SJA1105_RULE_VL) {
+ dev_err(ds->dev,
+ "Cannot change VLAN filtering with active VL rules\n");
+ return -EBUSY;
}
-
- return 0;
}
if (enabled) {
@@ -2794,29 +2759,34 @@ static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
return 0;
}
-static void sja1105_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int sja1105_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct sja1105_private *priv = ds->priv;
bool vlan_table_changed = false;
- u16 vid;
int rc;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- rc = sja1105_vlan_add_one(ds, port, vid, vlan->flags,
- &priv->bridge_vlans);
- if (rc < 0)
- return;
- if (rc > 0)
- vlan_table_changed = true;
+ /* If the user wants best-effort VLAN filtering (aka vlan_filtering
+ * bridge plus tagging), be sure to at least deny alterations to the
+ * configuration done by dsa_8021q.
+ */
+ if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL &&
+ vid_is_dsa_8021q(vlan->vid)) {
+ dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
+ return -EBUSY;
}
+ rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags,
+ &priv->bridge_vlans);
+ if (rc < 0)
+ return rc;
+ if (rc > 0)
+ vlan_table_changed = true;
+
if (!vlan_table_changed)
- return;
+ return 0;
- rc = sja1105_build_vlan_table(priv, true);
- if (rc)
- dev_err(ds->dev, "Failed to build VLAN table: %d\n", rc);
+ return sja1105_build_vlan_table(priv, true);
}
static int sja1105_vlan_del(struct dsa_switch *ds, int port,
@@ -2824,14 +2794,11 @@ static int sja1105_vlan_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
bool vlan_table_changed = false;
- u16 vid;
int rc;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- rc = sja1105_vlan_del_one(ds, port, vid, &priv->bridge_vlans);
- if (rc > 0)
- vlan_table_changed = true;
- }
+ rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans);
+ if (rc > 0)
+ vlan_table_changed = true;
if (!vlan_table_changed)
return 0;
@@ -2934,8 +2901,6 @@ static int sja1105_setup(struct dsa_switch *ds)
ds->mtu_enforcement_ingress = true;
- ds->configure_vlan_while_not_filtering = true;
-
rc = sja1105_devlink_setup(ds);
if (rc < 0)
return rc;
@@ -3298,11 +3263,9 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_bridge_join = sja1105_bridge_join,
.port_bridge_leave = sja1105_bridge_leave,
.port_stp_state_set = sja1105_bridge_stp_state_set,
- .port_vlan_prepare = sja1105_vlan_prepare,
.port_vlan_filtering = sja1105_vlan_filtering,
.port_vlan_add = sja1105_vlan_add,
.port_vlan_del = sja1105_vlan_del,
- .port_mdb_prepare = sja1105_mdb_prepare,
.port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del,
.port_hwtstamp_get = sja1105_hwtstamp_get,
diff --git a/drivers/net/dsa/xrs700x/Kconfig b/drivers/net/dsa/xrs700x/Kconfig
new file mode 100644
index 000000000000..d10a4dce1676
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_XRS700X
+ tristate
+ depends on NET_DSA
+ select NET_DSA_TAG_XRS700X
+ select REGMAP
+ help
+ This enables support for Arrow SpeedChips XRS7003/7004 gigabit
+ Ethernet switches.
+
+config NET_DSA_XRS700X_I2C
+ tristate "Arrow XRS7000X series switch in I2C mode"
+ depends on NET_DSA && I2C
+ select NET_DSA_XRS700X
+ select REGMAP_I2C
+ help
+ Enable I2C support for Arrow SpeedChips XRS7003/7004 gigabit Ethernet
+ switches.
+
+config NET_DSA_XRS700X_MDIO
+ tristate "Arrow XRS7000X series switch in MDIO mode"
+ depends on NET_DSA
+ select NET_DSA_XRS700X
+ help
+ Enable MDIO support for Arrow SpeedChips XRS7003/7004 gigabit Ethernet
+ switches.
diff --git a/drivers/net/dsa/xrs700x/Makefile b/drivers/net/dsa/xrs700x/Makefile
new file mode 100644
index 000000000000..51a3a7d9296a
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_XRS700X) += xrs700x.o
+obj-$(CONFIG_NET_DSA_XRS700X_I2C) += xrs700x_i2c.o
+obj-$(CONFIG_NET_DSA_XRS700X_MDIO) += xrs700x_mdio.o
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
new file mode 100644
index 000000000000..259f5e657c46
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+#include <linux/of_device.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+#define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000)
+
+#define XRS7003E_ID 0x100
+#define XRS7003F_ID 0x101
+#define XRS7004E_ID 0x200
+#define XRS7004F_ID 0x201
+
+const struct xrs700x_info xrs7003e_info = {XRS7003E_ID, "XRS7003E", 3};
+EXPORT_SYMBOL(xrs7003e_info);
+
+const struct xrs700x_info xrs7003f_info = {XRS7003F_ID, "XRS7003F", 3};
+EXPORT_SYMBOL(xrs7003f_info);
+
+const struct xrs700x_info xrs7004e_info = {XRS7004E_ID, "XRS7004E", 4};
+EXPORT_SYMBOL(xrs7004e_info);
+
+const struct xrs700x_info xrs7004f_info = {XRS7004F_ID, "XRS7004F", 4};
+EXPORT_SYMBOL(xrs7004f_info);
+
+struct xrs700x_regfield {
+ struct reg_field rf;
+ struct regmap_field **rmf;
+};
+
+struct xrs700x_mib {
+ unsigned int offset;
+ const char *name;
+ int stats64_offset;
+};
+
+#define XRS700X_MIB_ETHTOOL_ONLY(o, n) {o, n, -1}
+#define XRS700X_MIB(o, n, m) {o, n, offsetof(struct rtnl_link_stats64, m)}
+
+static const struct xrs700x_mib xrs700x_mibs[] = {
+ XRS700X_MIB(XRS_RX_GOOD_OCTETS_L, "rx_good_octets", rx_bytes),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_BAD_OCTETS_L, "rx_bad_octets"),
+ XRS700X_MIB(XRS_RX_UNICAST_L, "rx_unicast", rx_packets),
+ XRS700X_MIB(XRS_RX_BROADCAST_L, "rx_broadcast", rx_packets),
+ XRS700X_MIB(XRS_RX_MULTICAST_L, "rx_multicast", multicast),
+ XRS700X_MIB(XRS_RX_UNDERSIZE_L, "rx_undersize", rx_length_errors),
+ XRS700X_MIB(XRS_RX_FRAGMENTS_L, "rx_fragments", rx_length_errors),
+ XRS700X_MIB(XRS_RX_OVERSIZE_L, "rx_oversize", rx_length_errors),
+ XRS700X_MIB(XRS_RX_JABBER_L, "rx_jabber", rx_length_errors),
+ XRS700X_MIB(XRS_RX_ERR_L, "rx_err", rx_errors),
+ XRS700X_MIB(XRS_RX_CRC_L, "rx_crc", rx_crc_errors),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_64_L, "rx_64"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_65_127_L, "rx_65_127"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_128_255_L, "rx_128_255"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_256_511_L, "rx_256_511"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_512_1023_L, "rx_512_1023"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_1024_1536_L, "rx_1024_1536"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_HSR_PRP_L, "rx_hsr_prp"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_WRONGLAN_L, "rx_wronglan"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_DUPLICATE_L, "rx_duplicate"),
+ XRS700X_MIB(XRS_TX_OCTETS_L, "tx_octets", tx_bytes),
+ XRS700X_MIB(XRS_TX_UNICAST_L, "tx_unicast", tx_packets),
+ XRS700X_MIB(XRS_TX_BROADCAST_L, "tx_broadcast", tx_packets),
+ XRS700X_MIB(XRS_TX_MULTICAST_L, "tx_multicast", tx_packets),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_TX_HSR_PRP_L, "tx_hsr_prp"),
+ XRS700X_MIB(XRS_PRIQ_DROP_L, "priq_drop", tx_dropped),
+ XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped),
+};
+
+static void xrs700x_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
+ strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(xrs700x_mibs);
+}
+
+static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
+{
+ struct xrs700x_port *p = &priv->ports[port];
+ struct rtnl_link_stats64 stats;
+ int i;
+
+ memset(&stats, 0, sizeof(stats));
+
+ mutex_lock(&p->mib_mutex);
+
+ /* Capture counter values */
+ regmap_write(priv->regmap, XRS_CNT_CTRL(port), 1);
+
+ for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
+ unsigned int high = 0, low = 0, reg;
+
+ reg = xrs700x_mibs[i].offset + XRS_PORT_OFFSET * port;
+ regmap_read(priv->regmap, reg, &low);
+ regmap_read(priv->regmap, reg + 2, &high);
+
+ p->mib_data[i] += (high << 16) | low;
+
+ if (xrs700x_mibs[i].stats64_offset >= 0) {
+ u8 *s = (u8 *)&stats + xrs700x_mibs[i].stats64_offset;
+ *(u64 *)s += p->mib_data[i];
+ }
+ }
+
+ /* multicast must be added to rx_packets (which already includes
+ * unicast and broadcast)
+ */
+ stats.rx_packets += stats.multicast;
+
+ u64_stats_update_begin(&p->syncp);
+ p->stats64 = stats;
+ u64_stats_update_end(&p->syncp);
+
+ mutex_unlock(&p->mib_mutex);
+}
+
+static void xrs700x_mib_work(struct work_struct *work)
+{
+ struct xrs700x *priv = container_of(work, struct xrs700x,
+ mib_work.work);
+ int i;
+
+ for (i = 0; i < priv->ds->num_ports; i++)
+ xrs700x_read_port_counters(priv, i);
+
+ schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
+}
+
+static void xrs700x_get_ethtool_stats(struct dsa_switch *ds, int port,
+ u64 *data)
+{
+ struct xrs700x *priv = ds->priv;
+ struct xrs700x_port *p = &priv->ports[port];
+
+ xrs700x_read_port_counters(priv, port);
+
+ mutex_lock(&p->mib_mutex);
+ memcpy(data, p->mib_data, sizeof(*data) * ARRAY_SIZE(xrs700x_mibs));
+ mutex_unlock(&p->mib_mutex);
+}
+
+static void xrs700x_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct xrs700x *priv = ds->priv;
+ struct xrs700x_port *p = &priv->ports[port];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ *s = p->stats64;
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+}
+
+static int xrs700x_setup_regmap_range(struct xrs700x *priv)
+{
+ struct xrs700x_regfield regfields[] = {
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 0, 1,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_forward
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 2, 3,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_management
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 4, 9,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_sel_speed
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 10, 11,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_cur_speed
+ }
+ };
+ int i = 0;
+
+ for (; i < ARRAY_SIZE(regfields); i++) {
+ *regfields[i].rmf = devm_regmap_field_alloc(priv->dev,
+ priv->regmap,
+ regfields[i].rf);
+ if (IS_ERR(*regfields[i].rmf))
+ return PTR_ERR(*regfields[i].rmf);
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol xrs700x_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_XRS700X;
+}
+
+static int xrs700x_reset(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_write(priv->regmap, XRS_GENERAL, XRS_GENERAL_RESET);
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(priv->regmap, XRS_GENERAL,
+ val, !(val & XRS_GENERAL_RESET),
+ 10, 1000);
+error:
+ if (ret) {
+ dev_err_ratelimited(priv->dev, "error resetting switch: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static void xrs700x_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int bpdus = 1;
+ unsigned int val;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ bpdus = 0;
+ fallthrough;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = XRS_PORT_DISABLED;
+ break;
+ case BR_STATE_LEARNING:
+ val = XRS_PORT_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = XRS_PORT_FORWARDING;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ regmap_fields_write(priv->ps_forward, port, val);
+
+ /* Enable/disable inbound policy added by xrs700x_port_add_bpdu_ipf()
+ * which allows BPDU forwarding to the CPU port when the front facing
+ * port is in disabled/learning state.
+ */
+ regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 1, bpdus);
+
+ dev_dbg_ratelimited(priv->dev, "%s - port: %d, state: %u, val: 0x%x\n",
+ __func__, port, state, val);
+}
+
+/* Add an inbound policy filter which matches the BPDU destination MAC
+ * and forwards to the CPU port. Leave the policy disabled, it will be
+ * enabled as needed.
+ */
+static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int i = 0;
+ int ret;
+
+ /* Compare all 48 bits of the destination MAC address. */
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 48 << 2);
+ if (ret)
+ return ret;
+
+ /* match BPDU destination 01:80:c2:00:00:00 */
+ for (i = 0; i < sizeof(eth_stp_addr); i += 2) {
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 0) + i,
+ eth_stp_addr[i] |
+ (eth_stp_addr[i + 1] << 8));
+ if (ret)
+ return ret;
+ }
+
+ /* Mirror BPDU to CPU port */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 0), val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 0), 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xrs700x_port_setup(struct dsa_switch *ds, int port)
+{
+ bool cpu_port = dsa_is_cpu_port(ds, port);
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int ret, i;
+
+ xrs700x_port_stp_state_set(ds, port, BR_STATE_DISABLED);
+
+ /* Disable forwarding to non-CPU ports */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ /* 1 = Disable forwarding to the port */
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
+ if (ret)
+ return ret;
+
+ val = cpu_port ? XRS_PORT_MODE_MANAGEMENT : XRS_PORT_MODE_NORMAL;
+ ret = regmap_fields_write(priv->ps_management, port, val);
+ if (ret)
+ return ret;
+
+ if (!cpu_port) {
+ ret = xrs700x_port_add_bpdu_ipf(ds, port);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xrs700x_setup(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+ int ret, i;
+
+ ret = xrs700x_reset(ds);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ ret = xrs700x_port_setup(ds, i);
+ if (ret)
+ return ret;
+ }
+
+ schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
+
+ return 0;
+}
+
+static void xrs700x_teardown(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+
+ cancel_delayed_work_sync(&priv->mib_work);
+}
+
+static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ switch (port) {
+ case 0:
+ break;
+ case 1:
+ case 2:
+ case 3:
+ phylink_set(mask, 1000baseT_Full);
+ break;
+ default:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported port: %i\n", port);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+
+ /* The switch only supports full duplex. */
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val;
+
+ switch (speed) {
+ case SPEED_1000:
+ val = XRS_PORT_SPEED_1000;
+ break;
+ case SPEED_100:
+ val = XRS_PORT_SPEED_100;
+ break;
+ case SPEED_10:
+ val = XRS_PORT_SPEED_10;
+ break;
+ default:
+ return;
+ }
+
+ regmap_fields_write(priv->ps_sel_speed, port, val);
+
+ dev_dbg_ratelimited(priv->dev, "%s: port: %d mode: %u speed: %u\n",
+ __func__, port, mode, speed);
+}
+
+static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
+ struct net_device *bridge, bool join)
+{
+ unsigned int i, cpu_mask = 0, mask = 0;
+ struct xrs700x *priv = ds->priv;
+ int ret;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+
+ cpu_mask |= BIT(i);
+
+ if (dsa_to_port(ds, i)->bridge_dev == bridge)
+ continue;
+
+ mask |= BIT(i);
+ }
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+
+ /* 1 = Disable forwarding to the port */
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(i), mask);
+ if (ret)
+ return ret;
+ }
+
+ if (!join) {
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port),
+ cpu_mask);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ return xrs700x_bridge_common(ds, port, bridge, true);
+}
+
+static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ xrs700x_bridge_common(ds, port, bridge, false);
+}
+
+static const struct dsa_switch_ops xrs700x_ops = {
+ .get_tag_protocol = xrs700x_get_tag_protocol,
+ .setup = xrs700x_setup,
+ .teardown = xrs700x_teardown,
+ .port_stp_state_set = xrs700x_port_stp_state_set,
+ .phylink_validate = xrs700x_phylink_validate,
+ .phylink_mac_link_up = xrs700x_mac_link_up,
+ .get_strings = xrs700x_get_strings,
+ .get_sset_count = xrs700x_get_sset_count,
+ .get_ethtool_stats = xrs700x_get_ethtool_stats,
+ .get_stats64 = xrs700x_get_stats64,
+ .port_bridge_join = xrs700x_bridge_join,
+ .port_bridge_leave = xrs700x_bridge_leave,
+};
+
+static int xrs700x_detect(struct xrs700x *priv)
+{
+ const struct xrs700x_info *info;
+ unsigned int id;
+ int ret;
+
+ ret = regmap_read(priv->regmap, XRS_DEV_ID0, &id);
+ if (ret) {
+ dev_err(priv->dev, "error %d while reading switch id.\n",
+ ret);
+ return ret;
+ }
+
+ info = of_device_get_match_data(priv->dev);
+ if (!info)
+ return -EINVAL;
+
+ if (info->id == id) {
+ priv->ds->num_ports = info->num_ports;
+ dev_info(priv->dev, "%s detected.\n", info->name);
+ return 0;
+ }
+
+ dev_err(priv->dev, "expected switch id 0x%x but found 0x%x.\n",
+ info->id, id);
+
+ return -ENODEV;
+}
+
+struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
+{
+ struct dsa_switch *ds;
+ struct xrs700x *priv;
+
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
+ ds->dev = base;
+
+ priv = devm_kzalloc(base, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
+
+ ds->ops = &xrs700x_ops;
+ ds->priv = priv;
+ priv->dev = base;
+
+ priv->ds = ds;
+ priv->priv = devpriv;
+
+ return priv;
+}
+EXPORT_SYMBOL(xrs700x_switch_alloc);
+
+static int xrs700x_alloc_port_mib(struct xrs700x *priv, int port)
+{
+ struct xrs700x_port *p = &priv->ports[port];
+
+ p->mib_data = devm_kcalloc(priv->dev, ARRAY_SIZE(xrs700x_mibs),
+ sizeof(*p->mib_data), GFP_KERNEL);
+ if (!p->mib_data)
+ return -ENOMEM;
+
+ mutex_init(&p->mib_mutex);
+ u64_stats_init(&p->syncp);
+
+ return 0;
+}
+
+int xrs700x_switch_register(struct xrs700x *priv)
+{
+ int ret;
+ int i;
+
+ ret = xrs700x_detect(priv);
+ if (ret)
+ return ret;
+
+ ret = xrs700x_setup_regmap_range(priv);
+ if (ret)
+ return ret;
+
+ priv->ports = devm_kcalloc(priv->dev, priv->ds->num_ports,
+ sizeof(*priv->ports), GFP_KERNEL);
+ if (!priv->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ ret = xrs700x_alloc_port_mib(priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return dsa_register_switch(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_register);
+
+void xrs700x_switch_remove(struct xrs700x *priv)
+{
+ dsa_unregister_switch(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_remove);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x.h b/drivers/net/dsa/xrs700x/xrs700x.h
new file mode 100644
index 000000000000..ff62cf61b091
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/workqueue.h>
+#include <linux/u64_stats_sync.h>
+#include <uapi/linux/if_link.h>
+
+struct xrs700x_info {
+ unsigned int id;
+ const char *name;
+ size_t num_ports;
+};
+
+extern const struct xrs700x_info xrs7003e_info;
+extern const struct xrs700x_info xrs7003f_info;
+extern const struct xrs700x_info xrs7004e_info;
+extern const struct xrs700x_info xrs7004f_info;
+
+struct xrs700x_port {
+ struct mutex mib_mutex; /* protects mib_data */
+ u64 *mib_data;
+ struct rtnl_link_stats64 stats64;
+ struct u64_stats_sync syncp;
+};
+
+struct xrs700x {
+ struct dsa_switch *ds;
+ struct device *dev;
+ void *priv;
+ struct regmap *regmap;
+ struct regmap_field *ps_forward;
+ struct regmap_field *ps_management;
+ struct regmap_field *ps_sel_speed;
+ struct regmap_field *ps_cur_speed;
+ struct delayed_work mib_work;
+ struct xrs700x_port *ports;
+};
+
+struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv);
+int xrs700x_switch_register(struct xrs700x *priv);
+void xrs700x_switch_remove(struct xrs700x *priv);
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
new file mode 100644
index 000000000000..a5f8883af829
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+static int xrs700x_i2c_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ unsigned char buf[4];
+ int ret;
+
+ buf[0] = reg >> 23 & 0xff;
+ buf[1] = reg >> 15 & 0xff;
+ buf[2] = reg >> 7 & 0xff;
+ buf[3] = (reg & 0x7f) << 1;
+
+ ret = i2c_master_send(i2c, buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_send returned %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_master_recv(i2c, buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_recv returned %d\n", ret);
+ return ret;
+ }
+
+ *val = buf[0] << 8 | buf[1];
+
+ return 0;
+}
+
+static int xrs700x_i2c_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ unsigned char buf[6];
+ int ret;
+
+ buf[0] = reg >> 23 & 0xff;
+ buf[1] = reg >> 15 & 0xff;
+ buf[2] = reg >> 7 & 0xff;
+ buf[3] = (reg & 0x7f) << 1 | 1;
+ buf[4] = val >> 8 & 0xff;
+ buf[5] = val & 0xff;
+
+ ret = i2c_master_send(i2c, buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_send returned %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config xrs700x_i2c_regmap_config = {
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_bits = 32,
+ .pad_bits = 0,
+ .write_flag_mask = 0,
+ .read_flag_mask = 0,
+ .reg_read = xrs700x_i2c_reg_read,
+ .reg_write = xrs700x_i2c_reg_write,
+ .max_register = 0,
+ .cache_type = REGCACHE_NONE,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG
+};
+
+static int xrs700x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *i2c_id)
+{
+ struct xrs700x *priv;
+ int ret;
+
+ priv = xrs700x_switch_alloc(&i2c->dev, i2c);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&i2c->dev, NULL, &i2c->dev,
+ &xrs700x_i2c_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&i2c->dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, priv);
+
+ ret = xrs700x_switch_register(priv);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xrs700x_i2c_remove(struct i2c_client *i2c)
+{
+ struct xrs700x *priv = i2c_get_clientdata(i2c);
+
+ xrs700x_switch_remove(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id xrs700x_i2c_id[] = {
+ { "xrs700x-switch", 0 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, xrs700x_i2c_id);
+
+static const struct of_device_id xrs700x_i2c_dt_ids[] = {
+ { .compatible = "arrow,xrs7003e", .data = &xrs7003e_info },
+ { .compatible = "arrow,xrs7003f", .data = &xrs7003f_info },
+ { .compatible = "arrow,xrs7004e", .data = &xrs7004e_info },
+ { .compatible = "arrow,xrs7004f", .data = &xrs7004f_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xrs700x_i2c_dt_ids);
+
+static struct i2c_driver xrs700x_i2c_driver = {
+ .driver = {
+ .name = "xrs700x-i2c",
+ .of_match_table = of_match_ptr(xrs700x_i2c_dt_ids),
+ },
+ .probe = xrs700x_i2c_probe,
+ .remove = xrs700x_i2c_remove,
+ .id_table = xrs700x_i2c_id,
+};
+
+module_i2c_driver(xrs700x_i2c_driver);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
new file mode 100644
index 000000000000..a10ee28eb86e
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+#define XRS_MDIO_IBA0 0x10
+#define XRS_MDIO_IBA1 0x11
+#define XRS_MDIO_IBD 0x14
+
+#define XRS_IB_READ 0x0
+#define XRS_IB_WRITE 0x1
+
+static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct mdio_device *mdiodev = context;
+ struct device *dev = &mdiodev->dev;
+ u16 uval;
+ int ret;
+
+ uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_READ);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ ret = mdiobus_read(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_read returned %d\n", ret);
+ return ret;
+ }
+
+ *val = (unsigned int)ret;
+
+ return 0;
+}
+
+static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct mdio_device *mdiodev = context;
+ struct device *dev = &mdiodev->dev;
+ u16 uval;
+ int ret;
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD, (u16)val);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_WRITE);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config xrs700x_mdio_regmap_config = {
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_bits = 32,
+ .pad_bits = 0,
+ .write_flag_mask = 0,
+ .read_flag_mask = 0,
+ .reg_read = xrs700x_mdio_reg_read,
+ .reg_write = xrs700x_mdio_reg_write,
+ .max_register = XRS_VLAN(VLAN_N_VID - 1),
+ .cache_type = REGCACHE_NONE,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG
+};
+
+static int xrs700x_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv;
+ int ret;
+
+ priv = xrs700x_switch_alloc(&mdiodev->dev, mdiodev);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, mdiodev,
+ &xrs700x_mdio_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&mdiodev->dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ ret = xrs700x_switch_register(priv);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+
+ xrs700x_switch_remove(priv);
+}
+
+static const struct of_device_id xrs700x_mdio_dt_ids[] = {
+ { .compatible = "arrow,xrs7003e", .data = &xrs7003e_info },
+ { .compatible = "arrow,xrs7003f", .data = &xrs7003f_info },
+ { .compatible = "arrow,xrs7004e", .data = &xrs7004e_info },
+ { .compatible = "arrow,xrs7004f", .data = &xrs7004f_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xrs700x_mdio_dt_ids);
+
+static struct mdio_driver xrs700x_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "xrs700x-mdio",
+ .of_match_table = xrs700x_mdio_dt_ids,
+ },
+ .probe = xrs700x_mdio_probe,
+ .remove = xrs700x_mdio_remove,
+};
+
+mdio_module_driver(xrs700x_mdio_driver);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA MDIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x_reg.h b/drivers/net/dsa/xrs700x/xrs700x_reg.h
new file mode 100644
index 000000000000..a135d4d92b6d
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_reg.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Register Base Addresses */
+#define XRS_DEVICE_ID_BASE 0x0
+#define XRS_GPIO_BASE 0x10000
+#define XRS_PORT_OFFSET 0x10000
+#define XRS_PORT_BASE(x) (0x200000 + XRS_PORT_OFFSET * (x))
+#define XRS_RTC_BASE 0x280000
+#define XRS_TS_OFFSET 0x8000
+#define XRS_TS_BASE(x) (0x290000 + XRS_TS_OFFSET * (x))
+#define XRS_SWITCH_CONF_BASE 0x300000
+
+/* Device Identification Registers */
+#define XRS_DEV_ID0 (XRS_DEVICE_ID_BASE + 0)
+#define XRS_DEV_ID1 (XRS_DEVICE_ID_BASE + 2)
+#define XRS_INT_ID0 (XRS_DEVICE_ID_BASE + 4)
+#define XRS_INT_ID1 (XRS_DEVICE_ID_BASE + 6)
+#define XRS_REV_ID (XRS_DEVICE_ID_BASE + 8)
+
+/* GPIO Registers */
+#define XRS_CONFIG0 (XRS_GPIO_BASE + 0x1000)
+#define XRS_INPUT_STATUS0 (XRS_GPIO_BASE + 0x1002)
+#define XRS_CONFIG1 (XRS_GPIO_BASE + 0x1004)
+#define XRS_INPUT_STATUS1 (XRS_GPIO_BASE + 0x1006)
+#define XRS_CONFIG2 (XRS_GPIO_BASE + 0x1008)
+#define XRS_INPUT_STATUS2 (XRS_GPIO_BASE + 0x100a)
+
+/* Port Configuration Registers */
+#define XRS_PORT_GEN_BASE(x) (XRS_PORT_BASE(x) + 0x0)
+#define XRS_PORT_HSR_BASE(x) (XRS_PORT_BASE(x) + 0x2000)
+#define XRS_PORT_PTP_BASE(x) (XRS_PORT_BASE(x) + 0x4000)
+#define XRS_PORT_CNT_BASE(x) (XRS_PORT_BASE(x) + 0x6000)
+#define XRS_PORT_IPO_BASE(x) (XRS_PORT_BASE(x) + 0x8000)
+
+/* Port Configuration Registers - General and State */
+#define XRS_PORT_STATE(x) (XRS_PORT_GEN_BASE(x) + 0x0)
+#define XRS_PORT_FORWARDING 0
+#define XRS_PORT_LEARNING 1
+#define XRS_PORT_DISABLED 2
+#define XRS_PORT_MODE_NORMAL 0
+#define XRS_PORT_MODE_MANAGEMENT 1
+#define XRS_PORT_SPEED_1000 0x12
+#define XRS_PORT_SPEED_100 0x20
+#define XRS_PORT_SPEED_10 0x30
+#define XRS_PORT_VLAN(x) (XRS_PORT_GEN_BASE(x) + 0x10)
+#define XRS_PORT_VLAN0_MAPPING(x) (XRS_PORT_GEN_BASE(x) + 0x12)
+#define XRS_PORT_FWD_MASK(x) (XRS_PORT_GEN_BASE(x) + 0x14)
+#define XRS_PORT_VLAN_PRIO(x) (XRS_PORT_GEN_BASE(x) + 0x16)
+
+/* Port Configuration Registers - HSR/PRP */
+#define XRS_HSR_CFG(x) (XRS_PORT_HSR_BASE(x) + 0x0)
+
+/* Port Configuration Registers - PTP */
+#define XRS_PTP_RX_SYNC_DELAY_NS_LO(x) (XRS_PORT_PTP_BASE(x) + 0x2)
+#define XRS_PTP_RX_SYNC_DELAY_NS_HI(x) (XRS_PORT_PTP_BASE(x) + 0x4)
+#define XRS_PTP_RX_EVENT_DELAY_NS(x) (XRS_PORT_PTP_BASE(x) + 0xa)
+#define XRS_PTP_TX_EVENT_DELAY_NS(x) (XRS_PORT_PTP_BASE(x) + 0x12)
+
+/* Port Configuration Registers - Counter */
+#define XRS_CNT_CTRL(x) (XRS_PORT_CNT_BASE(x) + 0x0)
+#define XRS_RX_GOOD_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x200)
+#define XRS_RX_GOOD_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x202)
+#define XRS_RX_BAD_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x204)
+#define XRS_RX_BAD_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x206)
+#define XRS_RX_UNICAST_L (XRS_PORT_CNT_BASE(0) + 0x208)
+#define XRS_RX_UNICAST_H (XRS_PORT_CNT_BASE(0) + 0x20a)
+#define XRS_RX_BROADCAST_L (XRS_PORT_CNT_BASE(0) + 0x20c)
+#define XRS_RX_BROADCAST_H (XRS_PORT_CNT_BASE(0) + 0x20e)
+#define XRS_RX_MULTICAST_L (XRS_PORT_CNT_BASE(0) + 0x210)
+#define XRS_RX_MULTICAST_H (XRS_PORT_CNT_BASE(0) + 0x212)
+#define XRS_RX_UNDERSIZE_L (XRS_PORT_CNT_BASE(0) + 0x214)
+#define XRS_RX_UNDERSIZE_H (XRS_PORT_CNT_BASE(0) + 0x216)
+#define XRS_RX_FRAGMENTS_L (XRS_PORT_CNT_BASE(0) + 0x218)
+#define XRS_RX_FRAGMENTS_H (XRS_PORT_CNT_BASE(0) + 0x21a)
+#define XRS_RX_OVERSIZE_L (XRS_PORT_CNT_BASE(0) + 0x21c)
+#define XRS_RX_OVERSIZE_H (XRS_PORT_CNT_BASE(0) + 0x21e)
+#define XRS_RX_JABBER_L (XRS_PORT_CNT_BASE(0) + 0x220)
+#define XRS_RX_JABBER_H (XRS_PORT_CNT_BASE(0) + 0x222)
+#define XRS_RX_ERR_L (XRS_PORT_CNT_BASE(0) + 0x224)
+#define XRS_RX_ERR_H (XRS_PORT_CNT_BASE(0) + 0x226)
+#define XRS_RX_CRC_L (XRS_PORT_CNT_BASE(0) + 0x228)
+#define XRS_RX_CRC_H (XRS_PORT_CNT_BASE(0) + 0x22a)
+#define XRS_RX_64_L (XRS_PORT_CNT_BASE(0) + 0x22c)
+#define XRS_RX_64_H (XRS_PORT_CNT_BASE(0) + 0x22e)
+#define XRS_RX_65_127_L (XRS_PORT_CNT_BASE(0) + 0x230)
+#define XRS_RX_65_127_H (XRS_PORT_CNT_BASE(0) + 0x232)
+#define XRS_RX_128_255_L (XRS_PORT_CNT_BASE(0) + 0x234)
+#define XRS_RX_128_255_H (XRS_PORT_CNT_BASE(0) + 0x236)
+#define XRS_RX_256_511_L (XRS_PORT_CNT_BASE(0) + 0x238)
+#define XRS_RX_256_511_H (XRS_PORT_CNT_BASE(0) + 0x23a)
+#define XRS_RX_512_1023_L (XRS_PORT_CNT_BASE(0) + 0x23c)
+#define XRS_RX_512_1023_H (XRS_PORT_CNT_BASE(0) + 0x23e)
+#define XRS_RX_1024_1536_L (XRS_PORT_CNT_BASE(0) + 0x240)
+#define XRS_RX_1024_1536_H (XRS_PORT_CNT_BASE(0) + 0x242)
+#define XRS_RX_HSR_PRP_L (XRS_PORT_CNT_BASE(0) + 0x244)
+#define XRS_RX_HSR_PRP_H (XRS_PORT_CNT_BASE(0) + 0x246)
+#define XRS_RX_WRONGLAN_L (XRS_PORT_CNT_BASE(0) + 0x248)
+#define XRS_RX_WRONGLAN_H (XRS_PORT_CNT_BASE(0) + 0x24a)
+#define XRS_RX_DUPLICATE_L (XRS_PORT_CNT_BASE(0) + 0x24c)
+#define XRS_RX_DUPLICATE_H (XRS_PORT_CNT_BASE(0) + 0x24e)
+#define XRS_TX_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x280)
+#define XRS_TX_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x282)
+#define XRS_TX_UNICAST_L (XRS_PORT_CNT_BASE(0) + 0x284)
+#define XRS_TX_UNICAST_H (XRS_PORT_CNT_BASE(0) + 0x286)
+#define XRS_TX_BROADCAST_L (XRS_PORT_CNT_BASE(0) + 0x288)
+#define XRS_TX_BROADCAST_H (XRS_PORT_CNT_BASE(0) + 0x28a)
+#define XRS_TX_MULTICAST_L (XRS_PORT_CNT_BASE(0) + 0x28c)
+#define XRS_TX_MULTICAST_H (XRS_PORT_CNT_BASE(0) + 0x28e)
+#define XRS_TX_HSR_PRP_L (XRS_PORT_CNT_BASE(0) + 0x290)
+#define XRS_TX_HSR_PRP_H (XRS_PORT_CNT_BASE(0) + 0x292)
+#define XRS_PRIQ_DROP_L (XRS_PORT_CNT_BASE(0) + 0x2c0)
+#define XRS_PRIQ_DROP_H (XRS_PORT_CNT_BASE(0) + 0x2c2)
+#define XRS_EARLY_DROP_L (XRS_PORT_CNT_BASE(0) + 0x2c4)
+#define XRS_EARLY_DROP_H (XRS_PORT_CNT_BASE(0) + 0x2c6)
+
+/* Port Configuration Registers - Inbound Policy 0 - 15 */
+#define XRS_ETH_ADDR_CFG(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x0)
+#define XRS_ETH_ADDR_FWD_ALLOW(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x2)
+#define XRS_ETH_ADDR_FWD_MIRROR(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x4)
+#define XRS_ETH_ADDR_0(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x8)
+#define XRS_ETH_ADDR_1(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0xa)
+#define XRS_ETH_ADDR_2(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0xc)
+
+/* RTC Registers */
+#define XRS_CUR_NSEC0 (XRS_RTC_BASE + 0x1004)
+#define XRS_CUR_NSEC1 (XRS_RTC_BASE + 0x1006)
+#define XRS_CUR_SEC0 (XRS_RTC_BASE + 0x1008)
+#define XRS_CUR_SEC1 (XRS_RTC_BASE + 0x100a)
+#define XRS_CUR_SEC2 (XRS_RTC_BASE + 0x100c)
+#define XRS_TIME_CC0 (XRS_RTC_BASE + 0x1010)
+#define XRS_TIME_CC1 (XRS_RTC_BASE + 0x1012)
+#define XRS_TIME_CC2 (XRS_RTC_BASE + 0x1014)
+#define XRS_STEP_SIZE0 (XRS_RTC_BASE + 0x1020)
+#define XRS_STEP_SIZE1 (XRS_RTC_BASE + 0x1022)
+#define XRS_STEP_SIZE2 (XRS_RTC_BASE + 0x1024)
+#define XRS_ADJUST_NSEC0 (XRS_RTC_BASE + 0x1034)
+#define XRS_ADJUST_NSEC1 (XRS_RTC_BASE + 0x1036)
+#define XRS_ADJUST_SEC0 (XRS_RTC_BASE + 0x1038)
+#define XRS_ADJUST_SEC1 (XRS_RTC_BASE + 0x103a)
+#define XRS_ADJUST_SEC2 (XRS_RTC_BASE + 0x103c)
+#define XRS_TIME_CMD (XRS_RTC_BASE + 0x1040)
+
+/* Time Stamper Registers */
+#define XRS_TS_CTRL(x) (XRS_TS_BASE(x) + 0x1000)
+#define XRS_TS_INT_MASK(x) (XRS_TS_BASE(x) + 0x1008)
+#define XRS_TS_INT_STATUS(x) (XRS_TS_BASE(x) + 0x1010)
+#define XRS_TS_NSEC0(x) (XRS_TS_BASE(x) + 0x1104)
+#define XRS_TS_NSEC1(x) (XRS_TS_BASE(x) + 0x1106)
+#define XRS_TS_SEC0(x) (XRS_TS_BASE(x) + 0x1108)
+#define XRS_TS_SEC1(x) (XRS_TS_BASE(x) + 0x110a)
+#define XRS_TS_SEC2(x) (XRS_TS_BASE(x) + 0x110c)
+#define XRS_PNCT0(x) (XRS_TS_BASE(x) + 0x1110)
+#define XRS_PNCT1(x) (XRS_TS_BASE(x) + 0x1112)
+
+/* Switch Configuration Registers */
+#define XRS_SWITCH_GEN_BASE (XRS_SWITCH_CONF_BASE + 0x0)
+#define XRS_SWITCH_TS_BASE (XRS_SWITCH_CONF_BASE + 0x2000)
+#define XRS_SWITCH_VLAN_BASE (XRS_SWITCH_CONF_BASE + 0x4000)
+
+/* Switch Configuration Registers - General */
+#define XRS_GENERAL (XRS_SWITCH_GEN_BASE + 0x10)
+#define XRS_GENERAL_TIME_TRAILER BIT(9)
+#define XRS_GENERAL_MOD_SYNC BIT(10)
+#define XRS_GENERAL_CUT_THRU BIT(13)
+#define XRS_GENERAL_CLR_MAC_TBL BIT(14)
+#define XRS_GENERAL_RESET BIT(15)
+#define XRS_MT_CLEAR_MASK (XRS_SWITCH_GEN_BASE + 0x12)
+#define XRS_ADDRESS_AGING (XRS_SWITCH_GEN_BASE + 0x20)
+#define XRS_TS_CTRL_TX (XRS_SWITCH_GEN_BASE + 0x28)
+#define XRS_TS_CTRL_RX (XRS_SWITCH_GEN_BASE + 0x2a)
+#define XRS_INT_MASK (XRS_SWITCH_GEN_BASE + 0x2c)
+#define XRS_INT_STATUS (XRS_SWITCH_GEN_BASE + 0x2e)
+#define XRS_MAC_TABLE0 (XRS_SWITCH_GEN_BASE + 0x200)
+#define XRS_MAC_TABLE1 (XRS_SWITCH_GEN_BASE + 0x202)
+#define XRS_MAC_TABLE2 (XRS_SWITCH_GEN_BASE + 0x204)
+#define XRS_MAC_TABLE3 (XRS_SWITCH_GEN_BASE + 0x206)
+
+/* Switch Configuration Registers - Frame Timestamp */
+#define XRS_TX_TS_NS_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x0)
+#define XRS_TX_TS_NS_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x2)
+#define XRS_TX_TS_S_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x4)
+#define XRS_TX_TS_S_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x6)
+#define XRS_TX_TS_HDR(t, h) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x2 * (h) + 0xe)
+#define XRS_RX_TS_NS_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x200)
+#define XRS_RX_TS_NS_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x202)
+#define XRS_RX_TS_S_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x204)
+#define XRS_RX_TS_S_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x206)
+#define XRS_RX_TS_HDR(t, h) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x2 * (h) + 0xe)
+
+/* Switch Configuration Registers - VLAN */
+#define XRS_VLAN(v) (XRS_SWITCH_VLAN_BASE + 0x2 * (v))
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index de50e8b9e656..ad04660b97b8 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -33,7 +33,6 @@ source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/aurora/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/cadence/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index f8f38dcb5f8a..1e7dc8a7762d 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
-obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 06596fa1f9fe..1db6cfd2b55c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1585,10 +1585,9 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
int ret;
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
- xdp->data = page_address(rx_info->page) + rx_info->page_offset;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_hard_start = page_address(rx_info->page);
- xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
+ xdp_prepare_buff(xdp, page_address(rx_info->page),
+ rx_info->page_offset,
+ rx_ring->ena_bufs[0].len, false);
/* If for some reason we received a bigger packet than
* we expect, then we simply drop it
*/
@@ -1634,8 +1633,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid);
res_budget = budget;
- xdp.rxq = &rx_ring->xdp_rxq;
- xdp.frame_sz = ENA_PAGE_SIZE;
+ xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
do {
xdp_verdict = XDP_PASS;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 2709a2db5657..99b6d5a9f1d9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2295,8 +2295,6 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_setup_tc = xgbe_setup_tc,
.ndo_fix_features = xgbe_fix_features,
.ndo_set_features = xgbe_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = xgbe_features_check,
};
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index efb33c078a3c..cec2018c84a9 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -19,7 +19,6 @@ if NET_VENDOR_AQUANTIA
config AQTION
tristate "aQuantia AQtion(tm) Support"
depends on PCI
- depends on X86_64 || ARM64 || COMPILE_TEST
depends on MACSEC || MACSEC=n
help
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
deleted file mode 100644
index 9ee30ea90bfa..000000000000
--- a/drivers/net/ethernet/aurora/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config NET_VENDOR_AURORA
- bool "Aurora VLSI devices"
- default y
- help
- If you have a network (Ethernet) device belonging to this class,
- say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- questions about Aurora devices. If you say Y, you will be asked
- for your specific device in the following questions.
-
-if NET_VENDOR_AURORA
-
-config AURORA_NB8800
- tristate "Aurora AU-NB8800 support"
- depends on HAS_DMA
- select PHYLIB
- help
- Support for the AU-NB8800 gigabit Ethernet controller.
-
-endif
diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile
deleted file mode 100644
index f3d599867619..000000000000
--- a/drivers/net/ethernet/aurora/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AURORA_NB8800) += nb8800.o
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
deleted file mode 100644
index 5b20185cbd62..000000000000
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ /dev/null
@@ -1,1520 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
- *
- * Mostly rewritten, based on driver from Sigma Designs. Original
- * copyright notice below.
- *
- * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
- *
- * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
- */
-
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_mdio.h>
-#include <linux/of_net.h>
-#include <linux/dma-mapping.h>
-#include <linux/phy.h>
-#include <linux/cache.h>
-#include <linux/jiffies.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <asm/barrier.h>
-
-#include "nb8800.h"
-
-static void nb8800_tx_done(struct net_device *dev);
-static int nb8800_dma_stop(struct net_device *dev);
-
-static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
-{
- return readb_relaxed(priv->base + reg);
-}
-
-static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
-{
- return readl_relaxed(priv->base + reg);
-}
-
-static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
-{
- writeb_relaxed(val, priv->base + reg);
-}
-
-static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
-{
- writew_relaxed(val, priv->base + reg);
-}
-
-static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
-{
- writel_relaxed(val, priv->base + reg);
-}
-
-static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
- u32 mask, u32 val)
-{
- u32 old = nb8800_readb(priv, reg);
- u32 new = (old & ~mask) | (val & mask);
-
- if (new != old)
- nb8800_writeb(priv, reg, new);
-}
-
-static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
- u32 mask, u32 val)
-{
- u32 old = nb8800_readl(priv, reg);
- u32 new = (old & ~mask) | (val & mask);
-
- if (new != old)
- nb8800_writel(priv, reg, new);
-}
-
-static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
- bool set)
-{
- nb8800_maskb(priv, reg, bits, set ? bits : 0);
-}
-
-static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
-{
- nb8800_maskb(priv, reg, bits, bits);
-}
-
-static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
-{
- nb8800_maskb(priv, reg, bits, 0);
-}
-
-static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
- bool set)
-{
- nb8800_maskl(priv, reg, bits, set ? bits : 0);
-}
-
-static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
-{
- nb8800_maskl(priv, reg, bits, bits);
-}
-
-static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
-{
- nb8800_maskl(priv, reg, bits, 0);
-}
-
-static int nb8800_mdio_wait(struct mii_bus *bus)
-{
- struct nb8800_priv *priv = bus->priv;
- u32 val;
-
- return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
- val, !(val & MDIO_CMD_GO), 1, 1000);
-}
-
-static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
-{
- struct nb8800_priv *priv = bus->priv;
- int err;
-
- err = nb8800_mdio_wait(bus);
- if (err)
- return err;
-
- nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
- udelay(10);
- nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
-
- return nb8800_mdio_wait(bus);
-}
-
-static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
-{
- struct nb8800_priv *priv = bus->priv;
- u32 val;
- int err;
-
- err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
- if (err)
- return err;
-
- val = nb8800_readl(priv, NB8800_MDIO_STS);
- if (val & MDIO_STS_ERR)
- return 0xffff;
-
- return val & 0xffff;
-}
-
-static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
-{
- u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
- MDIO_CMD_DATA(val) | MDIO_CMD_WR;
-
- return nb8800_mdio_cmd(bus, cmd);
-}
-
-static void nb8800_mac_tx(struct net_device *dev, bool enable)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
- cpu_relax();
-
- nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
-}
-
-static void nb8800_mac_rx(struct net_device *dev, bool enable)
-{
- nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
-}
-
-static void nb8800_mac_af(struct net_device *dev, bool enable)
-{
- nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
-}
-
-static void nb8800_start_rx(struct net_device *dev)
-{
- nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
-}
-
-static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
- struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
- int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
- dma_addr_t dma_addr;
- struct page *page;
- unsigned long offset;
- void *data;
-
- data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
- if (!data)
- return -ENOMEM;
-
- page = virt_to_head_page(data);
- offset = data - page_address(page);
-
- dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(&dev->dev, dma_addr)) {
- skb_free_frag(data);
- return -ENOMEM;
- }
-
- rxb->page = page;
- rxb->offset = offset;
- rxd->desc.s_addr = dma_addr;
-
- return 0;
-}
-
-static void nb8800_receive(struct net_device *dev, unsigned int i,
- unsigned int len)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
- struct page *page = priv->rx_bufs[i].page;
- int offset = priv->rx_bufs[i].offset;
- void *data = page_address(page) + offset;
- dma_addr_t dma = rxd->desc.s_addr;
- struct sk_buff *skb;
- unsigned int size;
- int err;
-
- size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
-
- skb = napi_alloc_skb(&priv->napi, size);
- if (!skb) {
- netdev_err(dev, "rx skb allocation failed\n");
- dev->stats.rx_dropped++;
- return;
- }
-
- if (len <= RX_COPYBREAK) {
- dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
- skb_put_data(skb, data, len);
- dma_sync_single_for_device(&dev->dev, dma, len,
- DMA_FROM_DEVICE);
- } else {
- err = nb8800_alloc_rx(dev, i, true);
- if (err) {
- netdev_err(dev, "rx buffer allocation failed\n");
- dev->stats.rx_dropped++;
- dev_kfree_skb(skb);
- return;
- }
-
- dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
- skb_put_data(skb, data, RX_COPYHDR);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- offset + RX_COPYHDR, len - RX_COPYHDR,
- RX_BUF_SIZE);
- }
-
- skb->protocol = eth_type_trans(skb, dev);
- napi_gro_receive(&priv->napi, skb);
-}
-
-static void nb8800_rx_error(struct net_device *dev, u32 report)
-{
- if (report & RX_LENGTH_ERR)
- dev->stats.rx_length_errors++;
-
- if (report & RX_FCS_ERR)
- dev->stats.rx_crc_errors++;
-
- if (report & RX_FIFO_OVERRUN)
- dev->stats.rx_fifo_errors++;
-
- if (report & RX_ALIGNMENT_ERROR)
- dev->stats.rx_frame_errors++;
-
- dev->stats.rx_errors++;
-}
-
-static int nb8800_poll(struct napi_struct *napi, int budget)
-{
- struct net_device *dev = napi->dev;
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd;
- unsigned int last = priv->rx_eoc;
- unsigned int next;
- int work = 0;
-
- nb8800_tx_done(dev);
-
-again:
- do {
- unsigned int len;
-
- next = (last + 1) % RX_DESC_COUNT;
-
- rxd = &priv->rx_descs[next];
-
- if (!rxd->report)
- break;
-
- len = RX_BYTES_TRANSFERRED(rxd->report);
-
- if (IS_RX_ERROR(rxd->report))
- nb8800_rx_error(dev, rxd->report);
- else
- nb8800_receive(dev, next, len);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
-
- if (rxd->report & RX_MULTICAST_PKT)
- dev->stats.multicast++;
-
- rxd->report = 0;
- last = next;
- work++;
- } while (work < budget);
-
- if (work) {
- priv->rx_descs[last].desc.config |= DESC_EOC;
- wmb(); /* ensure new EOC is written before clearing old */
- priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
- priv->rx_eoc = last;
- nb8800_start_rx(dev);
- }
-
- if (work < budget) {
- nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
-
- /* If a packet arrived after we last checked but
- * before writing RX_ITR, the interrupt will be
- * delayed, so we retrieve it now.
- */
- if (priv->rx_descs[next].report)
- goto again;
-
- napi_complete_done(napi, work);
- }
-
- return work;
-}
-
-static void __nb8800_tx_dma_start(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_tx_buf *txb;
- u32 txc_cr;
-
- txb = &priv->tx_bufs[priv->tx_queue];
- if (!txb->ready)
- return;
-
- txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
- if (txc_cr & TCR_EN)
- return;
-
- nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
- wmb(); /* ensure desc addr is written before starting DMA */
- nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
-
- priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
-}
-
-static void nb8800_tx_dma_start(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- spin_lock_irq(&priv->tx_lock);
- __nb8800_tx_dma_start(dev);
- spin_unlock_irq(&priv->tx_lock);
-}
-
-static void nb8800_tx_dma_start_irq(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- spin_lock(&priv->tx_lock);
- __nb8800_tx_dma_start(dev);
- spin_unlock(&priv->tx_lock);
-}
-
-static netdev_tx_t nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_tx_desc *txd;
- struct nb8800_tx_buf *txb;
- struct nb8800_dma_desc *desc;
- dma_addr_t dma_addr;
- unsigned int dma_len;
- unsigned int align;
- unsigned int next;
- bool xmit_more;
-
- if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- }
-
- align = (8 - (uintptr_t)skb->data) & 7;
-
- dma_len = skb->len - align;
- dma_addr = dma_map_single(&dev->dev, skb->data + align,
- dma_len, DMA_TO_DEVICE);
-
- if (dma_mapping_error(&dev->dev, dma_addr)) {
- netdev_err(dev, "tx dma mapping error\n");
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
-
- xmit_more = netdev_xmit_more();
- if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
- netif_stop_queue(dev);
- xmit_more = false;
- }
-
- next = priv->tx_next;
- txb = &priv->tx_bufs[next];
- txd = &priv->tx_descs[next];
- desc = &txd->desc[0];
-
- next = (next + 1) % TX_DESC_COUNT;
-
- if (align) {
- memcpy(txd->buf, skb->data, align);
-
- desc->s_addr =
- txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
- desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
- desc->config = DESC_BTS(2) | DESC_DS | align;
-
- desc++;
- }
-
- desc->s_addr = dma_addr;
- desc->n_addr = priv->tx_bufs[next].dma_desc;
- desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
-
- if (!xmit_more)
- desc->config |= DESC_EOC;
-
- txb->skb = skb;
- txb->dma_addr = dma_addr;
- txb->dma_len = dma_len;
-
- if (!priv->tx_chain) {
- txb->chain_len = 1;
- priv->tx_chain = txb;
- } else {
- priv->tx_chain->chain_len++;
- }
-
- netdev_sent_queue(dev, skb->len);
-
- priv->tx_next = next;
-
- if (!xmit_more) {
- smp_wmb();
- priv->tx_chain->ready = true;
- priv->tx_chain = NULL;
- nb8800_tx_dma_start(dev);
- }
-
- return NETDEV_TX_OK;
-}
-
-static void nb8800_tx_error(struct net_device *dev, u32 report)
-{
- if (report & TX_LATE_COLLISION)
- dev->stats.collisions++;
-
- if (report & TX_PACKET_DROPPED)
- dev->stats.tx_dropped++;
-
- if (report & TX_FIFO_UNDERRUN)
- dev->stats.tx_fifo_errors++;
-
- dev->stats.tx_errors++;
-}
-
-static void nb8800_tx_done(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- unsigned int limit = priv->tx_next;
- unsigned int done = priv->tx_done;
- unsigned int packets = 0;
- unsigned int len = 0;
-
- while (done != limit) {
- struct nb8800_tx_desc *txd = &priv->tx_descs[done];
- struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
- struct sk_buff *skb;
-
- if (!txd->report)
- break;
-
- skb = txb->skb;
- len += skb->len;
-
- dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
- DMA_TO_DEVICE);
-
- if (IS_TX_ERROR(txd->report)) {
- nb8800_tx_error(dev, txd->report);
- kfree_skb(skb);
- } else {
- consume_skb(skb);
- }
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
- dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
-
- txb->skb = NULL;
- txb->ready = false;
- txd->report = 0;
-
- done = (done + 1) % TX_DESC_COUNT;
- packets++;
- }
-
- if (packets) {
- smp_mb__before_atomic();
- atomic_add(packets, &priv->tx_free);
- netdev_completed_queue(dev, packets, len);
- netif_wake_queue(dev);
- priv->tx_done = done;
- }
-}
-
-static irqreturn_t nb8800_irq(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct nb8800_priv *priv = netdev_priv(dev);
- irqreturn_t ret = IRQ_NONE;
- u32 val;
-
- /* tx interrupt */
- val = nb8800_readl(priv, NB8800_TXC_SR);
- if (val) {
- nb8800_writel(priv, NB8800_TXC_SR, val);
-
- if (val & TSR_DI)
- nb8800_tx_dma_start_irq(dev);
-
- if (val & TSR_TI)
- napi_schedule_irqoff(&priv->napi);
-
- if (unlikely(val & TSR_DE))
- netdev_err(dev, "TX DMA error\n");
-
- /* should never happen with automatic status retrieval */
- if (unlikely(val & TSR_TO))
- netdev_err(dev, "TX Status FIFO overflow\n");
-
- ret = IRQ_HANDLED;
- }
-
- /* rx interrupt */
- val = nb8800_readl(priv, NB8800_RXC_SR);
- if (val) {
- nb8800_writel(priv, NB8800_RXC_SR, val);
-
- if (likely(val & (RSR_RI | RSR_DI))) {
- nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
- napi_schedule_irqoff(&priv->napi);
- }
-
- if (unlikely(val & RSR_DE))
- netdev_err(dev, "RX DMA error\n");
-
- /* should never happen with automatic status retrieval */
- if (unlikely(val & RSR_RO))
- netdev_err(dev, "RX Status FIFO overflow\n");
-
- ret = IRQ_HANDLED;
- }
-
- return ret;
-}
-
-static void nb8800_mac_config(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- bool gigabit = priv->speed == SPEED_1000;
- u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
- u32 mac_mode = 0;
- u32 slot_time;
- u32 phy_clk;
- u32 ict;
-
- if (!priv->duplex)
- mac_mode |= HALF_DUPLEX;
-
- if (gigabit) {
- if (phy_interface_is_rgmii(dev->phydev))
- mac_mode |= RGMII_MODE;
-
- mac_mode |= GMAC_MODE;
- phy_clk = 125000000;
-
- /* Should be 512 but register is only 8 bits */
- slot_time = 255;
- } else {
- phy_clk = 25000000;
- slot_time = 128;
- }
-
- ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
-
- nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
- nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
- nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
-}
-
-static void nb8800_pause_config(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- u32 rxcr;
-
- if (priv->pause_aneg) {
- if (!phydev || !phydev->link)
- return;
-
- priv->pause_rx = phydev->pause;
- priv->pause_tx = phydev->pause ^ phydev->asym_pause;
- }
-
- nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
-
- rxcr = nb8800_readl(priv, NB8800_RXC_CR);
- if (!!(rxcr & RCR_FL) == priv->pause_tx)
- return;
-
- if (netif_running(dev)) {
- napi_disable(&priv->napi);
- netif_tx_lock_bh(dev);
- nb8800_dma_stop(dev);
- nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
- nb8800_start_rx(dev);
- netif_tx_unlock_bh(dev);
- napi_enable(&priv->napi);
- } else {
- nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
- }
-}
-
-static void nb8800_link_reconfigure(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int change = 0;
-
- if (phydev->link) {
- if (phydev->speed != priv->speed) {
- priv->speed = phydev->speed;
- change = 1;
- }
-
- if (phydev->duplex != priv->duplex) {
- priv->duplex = phydev->duplex;
- change = 1;
- }
-
- if (change)
- nb8800_mac_config(dev);
-
- nb8800_pause_config(dev);
- }
-
- if (phydev->link != priv->link) {
- priv->link = phydev->link;
- change = 1;
- }
-
- if (change)
- phy_print_status(phydev);
-}
-
-static void nb8800_update_mac_addr(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
-
- for (i = 0; i < ETH_ALEN; i++)
- nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
-}
-
-static int nb8800_set_mac_address(struct net_device *dev, void *addr)
-{
- struct sockaddr *sock = addr;
-
- if (netif_running(dev))
- return -EBUSY;
-
- ether_addr_copy(dev->dev_addr, sock->sa_data);
- nb8800_update_mac_addr(dev);
-
- return 0;
-}
-
-static void nb8800_mc_init(struct net_device *dev, int val)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- nb8800_writeb(priv, NB8800_MC_INIT, val);
- readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
- 1, 1000);
-}
-
-static void nb8800_set_rx_mode(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- int i;
-
- if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
- nb8800_mac_af(dev, false);
- return;
- }
-
- nb8800_mac_af(dev, true);
- nb8800_mc_init(dev, 0);
-
- netdev_for_each_mc_addr(ha, dev) {
- for (i = 0; i < ETH_ALEN; i++)
- nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
-
- nb8800_mc_init(dev, 0xff);
- }
-}
-
-#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
-#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
-
-static void nb8800_dma_free(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- unsigned int i;
-
- if (priv->rx_bufs) {
- for (i = 0; i < RX_DESC_COUNT; i++)
- if (priv->rx_bufs[i].page)
- put_page(priv->rx_bufs[i].page);
-
- kfree(priv->rx_bufs);
- priv->rx_bufs = NULL;
- }
-
- if (priv->tx_bufs) {
- for (i = 0; i < TX_DESC_COUNT; i++)
- kfree_skb(priv->tx_bufs[i].skb);
-
- kfree(priv->tx_bufs);
- priv->tx_bufs = NULL;
- }
-
- if (priv->rx_descs) {
- dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
- priv->rx_desc_dma);
- priv->rx_descs = NULL;
- }
-
- if (priv->tx_descs) {
- dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
- priv->tx_desc_dma);
- priv->tx_descs = NULL;
- }
-}
-
-static void nb8800_dma_reset(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd;
- struct nb8800_tx_desc *txd;
- unsigned int i;
-
- for (i = 0; i < RX_DESC_COUNT; i++) {
- dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
-
- rxd = &priv->rx_descs[i];
- rxd->desc.n_addr = rx_dma + sizeof(*rxd);
- rxd->desc.r_addr =
- rx_dma + offsetof(struct nb8800_rx_desc, report);
- rxd->desc.config = priv->rx_dma_config;
- rxd->report = 0;
- }
-
- rxd->desc.n_addr = priv->rx_desc_dma;
- rxd->desc.config |= DESC_EOC;
-
- priv->rx_eoc = RX_DESC_COUNT - 1;
-
- for (i = 0; i < TX_DESC_COUNT; i++) {
- struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
- dma_addr_t r_dma = txb->dma_desc +
- offsetof(struct nb8800_tx_desc, report);
-
- txd = &priv->tx_descs[i];
- txd->desc[0].r_addr = r_dma;
- txd->desc[1].r_addr = r_dma;
- txd->report = 0;
- }
-
- priv->tx_next = 0;
- priv->tx_queue = 0;
- priv->tx_done = 0;
- atomic_set(&priv->tx_free, TX_DESC_COUNT);
-
- nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
-
- wmb(); /* ensure all setup is written before starting */
-}
-
-static int nb8800_dma_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- unsigned int n_rx = RX_DESC_COUNT;
- unsigned int n_tx = TX_DESC_COUNT;
- unsigned int i;
- int err;
-
- priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
- &priv->rx_desc_dma, GFP_KERNEL);
- if (!priv->rx_descs)
- goto err_out;
-
- priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
- if (!priv->rx_bufs)
- goto err_out;
-
- for (i = 0; i < n_rx; i++) {
- err = nb8800_alloc_rx(dev, i, false);
- if (err)
- goto err_out;
- }
-
- priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
- &priv->tx_desc_dma, GFP_KERNEL);
- if (!priv->tx_descs)
- goto err_out;
-
- priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
- if (!priv->tx_bufs)
- goto err_out;
-
- for (i = 0; i < n_tx; i++)
- priv->tx_bufs[i].dma_desc =
- priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
-
- nb8800_dma_reset(dev);
-
- return 0;
-
-err_out:
- nb8800_dma_free(dev);
-
- return -ENOMEM;
-}
-
-static int nb8800_dma_stop(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
- struct nb8800_tx_desc *txd = &priv->tx_descs[0];
- int retry = 5;
- u32 txcr;
- u32 rxcr;
- int err;
- unsigned int i;
-
- /* wait for tx to finish */
- err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
- !(txcr & TCR_EN) &&
- priv->tx_done == priv->tx_next,
- 1000, 1000000);
- if (err)
- return err;
-
- /* The rx DMA only stops if it reaches the end of chain.
- * To make this happen, we set the EOC flag on all rx
- * descriptors, put the device in loopback mode, and send
- * a few dummy frames. The interrupt handler will ignore
- * these since NAPI is disabled and no real frames are in
- * the tx queue.
- */
-
- for (i = 0; i < RX_DESC_COUNT; i++)
- priv->rx_descs[i].desc.config |= DESC_EOC;
-
- txd->desc[0].s_addr =
- txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
- txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
- memset(txd->buf, 0, sizeof(txd->buf));
-
- nb8800_mac_af(dev, false);
- nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
-
- do {
- nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
- wmb();
- nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
-
- err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
- rxcr, !(rxcr & RCR_EN),
- 1000, 100000);
- } while (err && --retry);
-
- nb8800_mac_af(dev, true);
- nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
- nb8800_dma_reset(dev);
-
- return retry ? 0 : -ETIMEDOUT;
-}
-
-static void nb8800_pause_adv(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
-
- if (!phydev)
- return;
-
- phy_set_asym_pause(phydev, priv->pause_rx, priv->pause_tx);
-}
-
-static int nb8800_open(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev;
- int err;
-
- /* clear any pending interrupts */
- nb8800_writel(priv, NB8800_RXC_SR, 0xf);
- nb8800_writel(priv, NB8800_TXC_SR, 0xf);
-
- err = nb8800_dma_init(dev);
- if (err)
- return err;
-
- err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
- if (err)
- goto err_free_dma;
-
- nb8800_mac_rx(dev, true);
- nb8800_mac_tx(dev, true);
-
- phydev = of_phy_connect(dev, priv->phy_node,
- nb8800_link_reconfigure, 0,
- priv->phy_mode);
- if (!phydev) {
- err = -ENODEV;
- goto err_free_irq;
- }
-
- nb8800_pause_adv(dev);
-
- netdev_reset_queue(dev);
- napi_enable(&priv->napi);
- netif_start_queue(dev);
-
- nb8800_start_rx(dev);
- phy_start(phydev);
-
- return 0;
-
-err_free_irq:
- free_irq(dev->irq, dev);
-err_free_dma:
- nb8800_dma_free(dev);
-
- return err;
-}
-
-static int nb8800_stop(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
-
- phy_stop(phydev);
-
- netif_stop_queue(dev);
- napi_disable(&priv->napi);
-
- nb8800_dma_stop(dev);
- nb8800_mac_rx(dev, false);
- nb8800_mac_tx(dev, false);
-
- phy_disconnect(phydev);
-
- free_irq(dev->irq, dev);
-
- nb8800_dma_free(dev);
-
- return 0;
-}
-
-static const struct net_device_ops nb8800_netdev_ops = {
- .ndo_open = nb8800_open,
- .ndo_stop = nb8800_stop,
- .ndo_start_xmit = nb8800_xmit,
- .ndo_set_mac_address = nb8800_set_mac_address,
- .ndo_set_rx_mode = nb8800_set_rx_mode,
- .ndo_do_ioctl = phy_do_ioctl,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static void nb8800_get_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pp)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- pp->autoneg = priv->pause_aneg;
- pp->rx_pause = priv->pause_rx;
- pp->tx_pause = priv->pause_tx;
-}
-
-static int nb8800_set_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pp)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
-
- priv->pause_aneg = pp->autoneg;
- priv->pause_rx = pp->rx_pause;
- priv->pause_tx = pp->tx_pause;
-
- nb8800_pause_adv(dev);
-
- if (!priv->pause_aneg)
- nb8800_pause_config(dev);
- else if (phydev)
- phy_start_aneg(phydev);
-
- return 0;
-}
-
-static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
- "rx_bytes_ok",
- "rx_frames_ok",
- "rx_undersize_frames",
- "rx_fragment_frames",
- "rx_64_byte_frames",
- "rx_127_byte_frames",
- "rx_255_byte_frames",
- "rx_511_byte_frames",
- "rx_1023_byte_frames",
- "rx_max_size_frames",
- "rx_oversize_frames",
- "rx_bad_fcs_frames",
- "rx_broadcast_frames",
- "rx_multicast_frames",
- "rx_control_frames",
- "rx_pause_frames",
- "rx_unsup_control_frames",
- "rx_align_error_frames",
- "rx_overrun_frames",
- "rx_jabber_frames",
- "rx_bytes",
- "rx_frames",
-
- "tx_bytes_ok",
- "tx_frames_ok",
- "tx_64_byte_frames",
- "tx_127_byte_frames",
- "tx_255_byte_frames",
- "tx_511_byte_frames",
- "tx_1023_byte_frames",
- "tx_max_size_frames",
- "tx_oversize_frames",
- "tx_broadcast_frames",
- "tx_multicast_frames",
- "tx_control_frames",
- "tx_pause_frames",
- "tx_underrun_frames",
- "tx_single_collision_frames",
- "tx_multi_collision_frames",
- "tx_deferred_collision_frames",
- "tx_late_collision_frames",
- "tx_excessive_collision_frames",
- "tx_bytes",
- "tx_frames",
- "tx_collisions",
-};
-
-#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
-
-static int nb8800_get_sset_count(struct net_device *dev, int sset)
-{
- if (sset == ETH_SS_STATS)
- return NB8800_NUM_STATS;
-
- return -EOPNOTSUPP;
-}
-
-static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
-{
- if (sset == ETH_SS_STATS)
- memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
-}
-
-static u32 nb8800_read_stat(struct net_device *dev, int index)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- nb8800_writeb(priv, NB8800_STAT_INDEX, index);
-
- return nb8800_readl(priv, NB8800_STAT_DATA);
-}
-
-static void nb8800_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *estats, u64 *st)
-{
- unsigned int i;
- u32 rx, tx;
-
- for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
- rx = nb8800_read_stat(dev, i);
- tx = nb8800_read_stat(dev, i | 0x80);
- st[i] = rx;
- st[i + NB8800_NUM_STATS / 2] = tx;
- }
-}
-
-static const struct ethtool_ops nb8800_ethtool_ops = {
- .nway_reset = phy_ethtool_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = nb8800_get_pauseparam,
- .set_pauseparam = nb8800_set_pauseparam,
- .get_sset_count = nb8800_get_sset_count,
- .get_strings = nb8800_get_strings,
- .get_ethtool_stats = nb8800_get_ethtool_stats,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-static int nb8800_hw_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- u32 val;
-
- val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
- nb8800_writeb(priv, NB8800_TX_CTL1, val);
-
- /* Collision retry count */
- nb8800_writeb(priv, NB8800_TX_CTL2, 5);
-
- val = RX_PAD_STRIP | RX_AF_EN;
- nb8800_writeb(priv, NB8800_RX_CTL, val);
-
- /* Chosen by fair dice roll */
- nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
-
- /* TX cycles per deferral period */
- nb8800_writeb(priv, NB8800_TX_SDP, 12);
-
- /* The following three threshold values have been
- * experimentally determined for good results.
- */
-
- /* RX/TX FIFO threshold for partial empty (64-bit entries) */
- nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
-
- /* RX/TX FIFO threshold for partial full (64-bit entries) */
- nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
-
- /* Buffer size for transmit (64-bit entries) */
- nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
-
- /* Configure tx DMA */
-
- val = nb8800_readl(priv, NB8800_TXC_CR);
- val &= TCR_LE; /* keep endian setting */
- val |= TCR_DM; /* DMA descriptor mode */
- val |= TCR_RS; /* automatically store tx status */
- val |= TCR_DIE; /* interrupt on DMA chain completion */
- val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
- val |= TCR_BTS(2); /* 32-byte bus transaction size */
- nb8800_writel(priv, NB8800_TXC_CR, val);
-
- /* TX complete interrupt after 10 ms or 7 frames (see above) */
- val = clk_get_rate(priv->clk) / 100;
- nb8800_writel(priv, NB8800_TX_ITR, val);
-
- /* Configure rx DMA */
-
- val = nb8800_readl(priv, NB8800_RXC_CR);
- val &= RCR_LE; /* keep endian setting */
- val |= RCR_DM; /* DMA descriptor mode */
- val |= RCR_RS; /* automatically store rx status */
- val |= RCR_DIE; /* interrupt at end of DMA chain */
- val |= RCR_RFI(7); /* interrupt after 7 frames received */
- val |= RCR_BTS(2); /* 32-byte bus transaction size */
- nb8800_writel(priv, NB8800_RXC_CR, val);
-
- /* The rx interrupt can fire before the DMA has completed
- * unless a small delay is added. 50 us is hopefully enough.
- */
- priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
-
- /* In NAPI poll mode we want to disable interrupts, but the
- * hardware does not permit this. Delay 10 ms instead.
- */
- priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
-
- nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
-
- priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
-
- /* Flow control settings */
-
- /* Pause time of 0.1 ms */
- val = 100000 / 512;
- nb8800_writeb(priv, NB8800_PQ1, val >> 8);
- nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
-
- /* Auto-negotiate by default */
- priv->pause_aneg = true;
- priv->pause_rx = true;
- priv->pause_tx = true;
-
- nb8800_mc_init(dev, 0);
-
- return 0;
-}
-
-static int nb8800_tangox_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- u32 pad_mode = PAD_MODE_MII;
-
- switch (priv->phy_mode) {
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_GMII:
- pad_mode = PAD_MODE_MII;
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- pad_mode = PAD_MODE_RGMII;
- break;
-
- default:
- dev_err(dev->dev.parent, "unsupported phy mode %s\n",
- phy_modes(priv->phy_mode));
- return -EINVAL;
- }
-
- nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
-
- return 0;
-}
-
-static int nb8800_tangox_reset(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- int clk_div;
-
- nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
- usleep_range(1000, 10000);
- nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
-
- wmb(); /* ensure reset is cleared before proceeding */
-
- clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
- nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
-
- return 0;
-}
-
-static const struct nb8800_ops nb8800_tangox_ops = {
- .init = nb8800_tangox_init,
- .reset = nb8800_tangox_reset,
-};
-
-static int nb8800_tango4_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- int err;
-
- err = nb8800_tangox_init(dev);
- if (err)
- return err;
-
- /* On tango4 interrupt on DMA completion per frame works and gives
- * better performance despite generating more rx interrupts.
- */
-
- /* Disable unnecessary interrupt on rx completion */
- nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
-
- /* Request interrupt on descriptor DMA completion */
- priv->rx_dma_config |= DESC_ID;
-
- return 0;
-}
-
-static const struct nb8800_ops nb8800_tango4_ops = {
- .init = nb8800_tango4_init,
- .reset = nb8800_tangox_reset,
-};
-
-static const struct of_device_id nb8800_dt_ids[] = {
- {
- .compatible = "aurora,nb8800",
- },
- {
- .compatible = "sigma,smp8642-ethernet",
- .data = &nb8800_tangox_ops,
- },
- {
- .compatible = "sigma,smp8734-ethernet",
- .data = &nb8800_tango4_ops,
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
-
-static int nb8800_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- const struct nb8800_ops *ops = NULL;
- struct nb8800_priv *priv;
- struct resource *res;
- struct net_device *dev;
- struct mii_bus *bus;
- const unsigned char *mac;
- void __iomem *base;
- int irq;
- int ret;
-
- match = of_match_device(nb8800_dt_ids, &pdev->dev);
- if (match)
- ops = match->data;
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
-
- dev = alloc_etherdev(sizeof(*priv));
- if (!dev)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- priv = netdev_priv(dev);
- priv->base = base;
-
- ret = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
- if (ret)
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
-
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- ret = PTR_ERR(priv->clk);
- goto err_free_dev;
- }
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- goto err_free_dev;
-
- spin_lock_init(&priv->tx_lock);
-
- if (ops && ops->reset) {
- ret = ops->reset(dev);
- if (ret)
- goto err_disable_clk;
- }
-
- bus = devm_mdiobus_alloc(&pdev->dev);
- if (!bus) {
- ret = -ENOMEM;
- goto err_disable_clk;
- }
-
- bus->name = "nb8800-mii";
- bus->read = nb8800_mdio_read;
- bus->write = nb8800_mdio_write;
- bus->parent = &pdev->dev;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
- (unsigned long)res->start);
- bus->priv = priv;
-
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
- if (ret) {
- dev_err(&pdev->dev, "failed to register MII bus\n");
- goto err_disable_clk;
- }
-
- if (of_phy_is_fixed_link(pdev->dev.of_node)) {
- ret = of_phy_register_fixed_link(pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&pdev->dev, "bad fixed-link spec\n");
- goto err_free_bus;
- }
- priv->phy_node = of_node_get(pdev->dev.of_node);
- }
-
- if (!priv->phy_node)
- priv->phy_node = of_parse_phandle(pdev->dev.of_node,
- "phy-handle", 0);
-
- if (!priv->phy_node) {
- dev_err(&pdev->dev, "no PHY specified\n");
- ret = -ENODEV;
- goto err_free_bus;
- }
-
- priv->mii_bus = bus;
-
- ret = nb8800_hw_init(dev);
- if (ret)
- goto err_deregister_fixed_link;
-
- if (ops && ops->init) {
- ret = ops->init(dev);
- if (ret)
- goto err_deregister_fixed_link;
- }
-
- dev->netdev_ops = &nb8800_netdev_ops;
- dev->ethtool_ops = &nb8800_ethtool_ops;
- dev->flags |= IFF_MULTICAST;
- dev->irq = irq;
-
- mac = of_get_mac_address(pdev->dev.of_node);
- if (!IS_ERR(mac))
- ether_addr_copy(dev->dev_addr, mac);
-
- if (!is_valid_ether_addr(dev->dev_addr))
- eth_hw_addr_random(dev);
-
- nb8800_update_mac_addr(dev);
-
- netif_carrier_off(dev);
-
- ret = register_netdev(dev);
- if (ret) {
- netdev_err(dev, "failed to register netdev\n");
- goto err_free_dma;
- }
-
- netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
-
- netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
-
- return 0;
-
-err_free_dma:
- nb8800_dma_free(dev);
-err_deregister_fixed_link:
- if (of_phy_is_fixed_link(pdev->dev.of_node))
- of_phy_deregister_fixed_link(pdev->dev.of_node);
-err_free_bus:
- of_node_put(priv->phy_node);
- mdiobus_unregister(bus);
-err_disable_clk:
- clk_disable_unprepare(priv->clk);
-err_free_dev:
- free_netdev(dev);
-
- return ret;
-}
-
-static int nb8800_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct nb8800_priv *priv = netdev_priv(ndev);
-
- unregister_netdev(ndev);
- if (of_phy_is_fixed_link(pdev->dev.of_node))
- of_phy_deregister_fixed_link(pdev->dev.of_node);
- of_node_put(priv->phy_node);
-
- mdiobus_unregister(priv->mii_bus);
-
- clk_disable_unprepare(priv->clk);
-
- nb8800_dma_free(ndev);
- free_netdev(ndev);
-
- return 0;
-}
-
-static struct platform_driver nb8800_driver = {
- .driver = {
- .name = "nb8800",
- .of_match_table = nb8800_dt_ids,
- },
- .probe = nb8800_probe,
- .remove = nb8800_remove,
-};
-
-module_platform_driver(nb8800_driver);
-
-MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
-MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
deleted file mode 100644
index 40941fb6065b..000000000000
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NB8800_H_
-#define _NB8800_H_
-
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/phy.h>
-#include <linux/clk.h>
-#include <linux/bitops.h>
-
-#define RX_DESC_COUNT 256
-#define TX_DESC_COUNT 256
-
-#define NB8800_DESC_LOW 4
-
-#define RX_BUF_SIZE 1552
-
-#define RX_COPYBREAK 256
-#define RX_COPYHDR 128
-
-#define MAX_MDC_CLOCK 2500000
-
-/* Stargate Solutions SSN8800 core registers */
-#define NB8800_TX_CTL1 0x000
-#define TX_TPD BIT(5)
-#define TX_APPEND_FCS BIT(4)
-#define TX_PAD_EN BIT(3)
-#define TX_RETRY_EN BIT(2)
-#define TX_EN BIT(0)
-
-#define NB8800_TX_CTL2 0x001
-
-#define NB8800_RX_CTL 0x004
-#define RX_BC_DISABLE BIT(7)
-#define RX_RUNT BIT(6)
-#define RX_AF_EN BIT(5)
-#define RX_PAUSE_EN BIT(3)
-#define RX_SEND_CRC BIT(2)
-#define RX_PAD_STRIP BIT(1)
-#define RX_EN BIT(0)
-
-#define NB8800_RANDOM_SEED 0x008
-#define NB8800_TX_SDP 0x14
-#define NB8800_TX_TPDP1 0x18
-#define NB8800_TX_TPDP2 0x19
-#define NB8800_SLOT_TIME 0x1c
-
-#define NB8800_MDIO_CMD 0x020
-#define MDIO_CMD_GO BIT(31)
-#define MDIO_CMD_WR BIT(26)
-#define MDIO_CMD_ADDR(x) ((x) << 21)
-#define MDIO_CMD_REG(x) ((x) << 16)
-#define MDIO_CMD_DATA(x) ((x) << 0)
-
-#define NB8800_MDIO_STS 0x024
-#define MDIO_STS_ERR BIT(31)
-
-#define NB8800_MC_ADDR(i) (0x028 + (i))
-#define NB8800_MC_INIT 0x02e
-#define NB8800_UC_ADDR(i) (0x03c + (i))
-
-#define NB8800_MAC_MODE 0x044
-#define RGMII_MODE BIT(7)
-#define HALF_DUPLEX BIT(4)
-#define BURST_EN BIT(3)
-#define LOOPBACK_EN BIT(2)
-#define GMAC_MODE BIT(0)
-
-#define NB8800_IC_THRESHOLD 0x050
-#define NB8800_PE_THRESHOLD 0x051
-#define NB8800_PF_THRESHOLD 0x052
-#define NB8800_TX_BUFSIZE 0x054
-#define NB8800_FIFO_CTL 0x056
-#define NB8800_PQ1 0x060
-#define NB8800_PQ2 0x061
-#define NB8800_SRC_ADDR(i) (0x06a + (i))
-#define NB8800_STAT_DATA 0x078
-#define NB8800_STAT_INDEX 0x07c
-#define NB8800_STAT_CLEAR 0x07d
-
-#define NB8800_SLEEP_MODE 0x07e
-#define SLEEP_MODE BIT(0)
-
-#define NB8800_WAKEUP 0x07f
-#define WAKEUP BIT(0)
-
-/* Aurora NB8800 host interface registers */
-#define NB8800_TXC_CR 0x100
-#define TCR_LK BIT(12)
-#define TCR_DS BIT(11)
-#define TCR_BTS(x) (((x) & 0x7) << 8)
-#define TCR_DIE BIT(7)
-#define TCR_TFI(x) (((x) & 0x7) << 4)
-#define TCR_LE BIT(3)
-#define TCR_RS BIT(2)
-#define TCR_DM BIT(1)
-#define TCR_EN BIT(0)
-
-#define NB8800_TXC_SR 0x104
-#define TSR_DE BIT(3)
-#define TSR_DI BIT(2)
-#define TSR_TO BIT(1)
-#define TSR_TI BIT(0)
-
-#define NB8800_TX_SAR 0x108
-#define NB8800_TX_DESC_ADDR 0x10c
-
-#define NB8800_TX_REPORT_ADDR 0x110
-#define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff)
-#define TX_FIRST_DEFERRAL BIT(7)
-#define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf)
-#define TX_LATE_COLLISION BIT(2)
-#define TX_PACKET_DROPPED BIT(1)
-#define TX_FIFO_UNDERRUN BIT(0)
-#define IS_TX_ERROR(r) ((r) & 0x07)
-
-#define NB8800_TX_FIFO_SR 0x114
-#define NB8800_TX_ITR 0x118
-
-#define NB8800_RXC_CR 0x200
-#define RCR_FL BIT(13)
-#define RCR_LK BIT(12)
-#define RCR_DS BIT(11)
-#define RCR_BTS(x) (((x) & 7) << 8)
-#define RCR_DIE BIT(7)
-#define RCR_RFI(x) (((x) & 7) << 4)
-#define RCR_LE BIT(3)
-#define RCR_RS BIT(2)
-#define RCR_DM BIT(1)
-#define RCR_EN BIT(0)
-
-#define NB8800_RXC_SR 0x204
-#define RSR_DE BIT(3)
-#define RSR_DI BIT(2)
-#define RSR_RO BIT(1)
-#define RSR_RI BIT(0)
-
-#define NB8800_RX_SAR 0x208
-#define NB8800_RX_DESC_ADDR 0x20c
-
-#define NB8800_RX_REPORT_ADDR 0x210
-#define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF)
-#define RX_MULTICAST_PKT BIT(9)
-#define RX_BROADCAST_PKT BIT(8)
-#define RX_LENGTH_ERR BIT(7)
-#define RX_FCS_ERR BIT(6)
-#define RX_RUNT_PKT BIT(5)
-#define RX_FIFO_OVERRUN BIT(4)
-#define RX_LATE_COLLISION BIT(3)
-#define RX_ALIGNMENT_ERROR BIT(2)
-#define RX_ERROR_MASK 0xfc
-#define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK)
-
-#define NB8800_RX_FIFO_SR 0x214
-#define NB8800_RX_ITR 0x218
-
-/* Sigma Designs SMP86xx additional registers */
-#define NB8800_TANGOX_PAD_MODE 0x400
-#define PAD_MODE_MASK 0x7
-#define PAD_MODE_MII 0x0
-#define PAD_MODE_RGMII 0x1
-#define PAD_MODE_GTX_CLK_INV BIT(3)
-#define PAD_MODE_GTX_CLK_DELAY BIT(4)
-
-#define NB8800_TANGOX_MDIO_CLKDIV 0x420
-#define NB8800_TANGOX_RESET 0x424
-
-/* Hardware DMA descriptor */
-struct nb8800_dma_desc {
- u32 s_addr; /* start address */
- u32 n_addr; /* next descriptor address */
- u32 r_addr; /* report address */
- u32 config;
-} __aligned(8);
-
-#define DESC_ID BIT(23)
-#define DESC_EOC BIT(22)
-#define DESC_EOF BIT(21)
-#define DESC_LK BIT(20)
-#define DESC_DS BIT(19)
-#define DESC_BTS(x) (((x) & 0x7) << 16)
-
-/* DMA descriptor and associated data for rx.
- * Allocated from coherent memory.
- */
-struct nb8800_rx_desc {
- /* DMA descriptor */
- struct nb8800_dma_desc desc;
-
- /* Status report filled in by hardware */
- u32 report;
-};
-
-/* Address of buffer on rx ring */
-struct nb8800_rx_buf {
- struct page *page;
- unsigned long offset;
-};
-
-/* DMA descriptors and associated data for tx.
- * Allocated from coherent memory.
- */
-struct nb8800_tx_desc {
- /* DMA descriptor. The second descriptor is used if packet
- * data is unaligned.
- */
- struct nb8800_dma_desc desc[2];
-
- /* Status report filled in by hardware */
- u32 report;
-
- /* Bounce buffer for initial unaligned part of packet */
- u8 buf[8] __aligned(8);
-};
-
-/* Packet in tx queue */
-struct nb8800_tx_buf {
- /* Currently queued skb */
- struct sk_buff *skb;
-
- /* DMA address of the first descriptor */
- dma_addr_t dma_desc;
-
- /* DMA address of packet data */
- dma_addr_t dma_addr;
-
- /* Length of DMA mapping, less than skb->len if alignment
- * buffer is used.
- */
- unsigned int dma_len;
-
- /* Number of packets in chain starting here */
- unsigned int chain_len;
-
- /* Packet chain ready to be submitted to hardware */
- bool ready;
-};
-
-struct nb8800_priv {
- struct napi_struct napi;
-
- void __iomem *base;
-
- /* RX DMA descriptors */
- struct nb8800_rx_desc *rx_descs;
-
- /* RX buffers referenced by DMA descriptors */
- struct nb8800_rx_buf *rx_bufs;
-
- /* Current end of chain */
- u32 rx_eoc;
-
- /* Value for rx interrupt time register in NAPI interrupt mode */
- u32 rx_itr_irq;
-
- /* Value for rx interrupt time register in NAPI poll mode */
- u32 rx_itr_poll;
-
- /* Value for config field of rx DMA descriptors */
- u32 rx_dma_config;
-
- /* TX DMA descriptors */
- struct nb8800_tx_desc *tx_descs;
-
- /* TX packet queue */
- struct nb8800_tx_buf *tx_bufs;
-
- /* Number of free tx queue entries */
- atomic_t tx_free;
-
- /* First free tx queue entry */
- u32 tx_next;
-
- /* Next buffer to transmit */
- u32 tx_queue;
-
- /* Start of current packet chain */
- struct nb8800_tx_buf *tx_chain;
-
- /* Next buffer to reclaim */
- u32 tx_done;
-
- /* Lock for DMA activation */
- spinlock_t tx_lock;
-
- struct mii_bus *mii_bus;
- struct device_node *phy_node;
-
- /* PHY connection type from DT */
- phy_interface_t phy_mode;
-
- /* Current link status */
- int speed;
- int duplex;
- int link;
-
- /* Pause settings */
- bool pause_aneg;
- bool pause_rx;
- bool pause_tx;
-
- /* DMA base address of rx descriptors, see rx_descs above */
- dma_addr_t rx_desc_dma;
-
- /* DMA base address of tx descriptors, see tx_descs above */
- dma_addr_t tx_desc_dma;
-
- struct clk *clk;
-};
-
-struct nb8800_ops {
- int (*init)(struct net_device *dev);
- int (*reset)(struct net_device *dev);
-};
-
-#endif /* _NB8800_H_ */
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 7b79528d6eed..4bdf8fbe75a6 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -174,7 +174,6 @@ config BGMAC_BCMA
config BGMAC_PLATFORM
tristate "Broadcom iProc GBit platform support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on OF
select BGMAC
select PHYLIB
select FIXED_PHY
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 916824cca3fd..fd8767213165 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -220,7 +220,7 @@ static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
/*
* refill rx queue
*/
-static int bcm_enet_refill_rx(struct net_device *dev)
+static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
{
struct bcm_enet_priv *priv;
@@ -228,26 +228,29 @@ static int bcm_enet_refill_rx(struct net_device *dev)
while (priv->rx_desc_count < priv->rx_ring_size) {
struct bcm_enet_desc *desc;
- struct sk_buff *skb;
- dma_addr_t p;
int desc_idx;
u32 len_stat;
desc_idx = priv->rx_dirty_desc;
desc = &priv->rx_desc_cpu[desc_idx];
- if (!priv->rx_skb[desc_idx]) {
- skb = netdev_alloc_skb(dev, priv->rx_skb_size);
- if (!skb)
+ if (!priv->rx_buf[desc_idx]) {
+ void *buf;
+
+ if (likely(napi_mode))
+ buf = napi_alloc_frag(priv->rx_frag_size);
+ else
+ buf = netdev_alloc_frag(priv->rx_frag_size);
+ if (unlikely(!buf))
break;
- priv->rx_skb[desc_idx] = skb;
- p = dma_map_single(&priv->pdev->dev, skb->data,
- priv->rx_skb_size,
- DMA_FROM_DEVICE);
- desc->address = p;
+ priv->rx_buf[desc_idx] = buf;
+ desc->address = dma_map_single(&priv->pdev->dev,
+ buf + priv->rx_buf_offset,
+ priv->rx_buf_size,
+ DMA_FROM_DEVICE);
}
- len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
+ len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
len_stat |= DMADESC_OWNER_MASK;
if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
@@ -287,7 +290,7 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
- bcm_enet_refill_rx(dev);
+ bcm_enet_refill_rx(dev, false);
spin_unlock(&priv->rx_lock);
}
@@ -297,10 +300,12 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
struct bcm_enet_priv *priv;
+ struct list_head rx_list;
struct device *kdev;
int processed;
priv = netdev_priv(dev);
+ INIT_LIST_HEAD(&rx_list);
kdev = &priv->pdev->dev;
processed = 0;
@@ -315,6 +320,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
int desc_idx;
u32 len_stat;
unsigned int len;
+ void *buf;
desc_idx = priv->rx_curr_desc;
desc = &priv->rx_desc_cpu[desc_idx];
@@ -333,7 +339,6 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
priv->rx_curr_desc++;
if (priv->rx_curr_desc == priv->rx_ring_size)
priv->rx_curr_desc = 0;
- priv->rx_desc_count--;
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */
@@ -360,16 +365,14 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
}
/* valid packet */
- skb = priv->rx_skb[desc_idx];
+ buf = priv->rx_buf[desc_idx];
len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
/* don't include FCS */
len -= 4;
if (len < copybreak) {
- struct sk_buff *nskb;
-
- nskb = napi_alloc_skb(&priv->napi, len);
- if (!nskb) {
+ skb = napi_alloc_skb(&priv->napi, len);
+ if (unlikely(!skb)) {
/* forget packet, just rearm desc */
dev->stats.rx_dropped++;
continue;
@@ -377,26 +380,36 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
dma_sync_single_for_cpu(kdev, desc->address,
len, DMA_FROM_DEVICE);
- memcpy(nskb->data, skb->data, len);
+ memcpy(skb->data, buf + priv->rx_buf_offset, len);
dma_sync_single_for_device(kdev, desc->address,
len, DMA_FROM_DEVICE);
- skb = nskb;
} else {
- dma_unmap_single(&priv->pdev->dev, desc->address,
- priv->rx_skb_size, DMA_FROM_DEVICE);
- priv->rx_skb[desc_idx] = NULL;
+ dma_unmap_single(kdev, desc->address,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ priv->rx_buf[desc_idx] = NULL;
+
+ skb = build_skb(buf, priv->rx_frag_size);
+ if (unlikely(!skb)) {
+ skb_free_frag(buf);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, priv->rx_buf_offset);
}
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- netif_receive_skb(skb);
+ list_add_tail(&skb->list, &rx_list);
- } while (--budget > 0);
+ } while (processed < budget);
+
+ netif_receive_skb_list(&rx_list);
+ priv->rx_desc_count -= processed;
if (processed || !priv->rx_desc_count) {
- bcm_enet_refill_rx(dev);
+ bcm_enet_refill_rx(dev, true);
/* kick rx dma */
enet_dmac_writel(priv, priv->dma_chan_en_mask,
@@ -413,9 +426,11 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
{
struct bcm_enet_priv *priv;
+ unsigned int bytes;
int released;
priv = netdev_priv(dev);
+ bytes = 0;
released = 0;
while (priv->tx_desc_count < priv->tx_ring_size) {
@@ -452,10 +467,13 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
if (desc->len_stat & DMADESC_UNDER_MASK)
dev->stats.tx_errors++;
+ bytes += skb->len;
dev_kfree_skb(skb);
released++;
}
+ netdev_completed_queue(dev, released, bytes);
+
if (netif_queue_stopped(dev) && released)
netif_wake_queue(dev);
@@ -622,8 +640,11 @@ bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc->len_stat = len_stat;
wmb();
+ netdev_sent_queue(dev, skb->len);
+
/* kick tx dma */
- enet_dmac_writel(priv, priv->dma_chan_en_mask,
+ if (!netdev_xmit_more() || !priv->tx_desc_count)
+ enet_dmac_writel(priv, priv->dma_chan_en_mask,
ENETDMAC_CHANCFG, priv->tx_chan);
/* stop queue if no more desc available */
@@ -845,6 +866,24 @@ static void bcm_enet_adjust_link(struct net_device *dev)
priv->pause_tx ? "tx" : "off");
}
+static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ struct bcm_enet_desc *desc;
+
+ if (!priv->rx_buf[i])
+ continue;
+
+ desc = &priv->rx_desc_cpu[i];
+ dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
+ DMA_FROM_DEVICE);
+ skb_free_frag(priv->rx_buf[i]);
+ }
+ kfree(priv->rx_buf);
+}
+
/*
* open callback, allocate dma rings & buffers and start rx operation
*/
@@ -954,10 +993,10 @@ static int bcm_enet_open(struct net_device *dev)
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
- /* init & fill rx ring with skbs */
- priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+ /* init & fill rx ring with buffers */
+ priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL);
- if (!priv->rx_skb) {
+ if (!priv->rx_buf) {
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -974,8 +1013,8 @@ static int bcm_enet_open(struct net_device *dev)
enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMAC_BUFALLOC, priv->rx_chan);
- if (bcm_enet_refill_rx(dev)) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (bcm_enet_refill_rx(dev, false)) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
@@ -1069,18 +1108,7 @@ static int bcm_enet_open(struct net_device *dev)
return 0;
out:
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
- kfree(priv->rx_skb);
+ bcm_enet_free_rx_buf_ring(kdev, priv);
out_free_tx_skb:
kfree(priv->tx_skb);
@@ -1159,12 +1187,12 @@ static int bcm_enet_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
- int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(dev->phydev);
@@ -1186,21 +1214,10 @@ static int bcm_enet_stop(struct net_device *dev)
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
- /* free the rx skb ring */
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
+ /* free the rx buffer ring */
+ bcm_enet_free_rx_buf_ring(kdev, priv);
/* free remaining allocated memory */
- kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -1622,9 +1639,12 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
* align rx buffer size to dma burst len, account FCS since
* it's appended
*/
- priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
+ priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
priv->dma_maxburst * 4);
+ priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
dev->mtu = new_mtu;
return 0;
}
@@ -1709,6 +1729,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->enet_is_sw = false;
priv->dma_maxburst = BCMENET_DMA_MAXBURST;
+ priv->rx_buf_offset = NET_SKB_PAD;
ret = bcm_enet_change_mtu(dev, dev->mtu);
if (ret)
@@ -2126,7 +2147,7 @@ static int bcm_enetsw_open(struct net_device *dev)
priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->tx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ dev_err(kdev, "cannot allocate tx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
@@ -2136,11 +2157,11 @@ static int bcm_enetsw_open(struct net_device *dev)
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
- /* init & fill rx ring with skbs */
- priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+ /* init & fill rx ring with buffers */
+ priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL);
- if (!priv->rx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (!priv->rx_buf) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -2187,8 +2208,8 @@ static int bcm_enetsw_open(struct net_device *dev)
enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMA_BUFALLOC_REG(priv->rx_chan));
- if (bcm_enet_refill_rx(dev)) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (bcm_enet_refill_rx(dev, false)) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
@@ -2287,18 +2308,7 @@ static int bcm_enetsw_open(struct net_device *dev)
return 0;
out:
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
- kfree(priv->rx_skb);
+ bcm_enet_free_rx_buf_ring(kdev, priv);
out_free_tx_skb:
kfree(priv->tx_skb);
@@ -2327,13 +2337,13 @@ static int bcm_enetsw_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
- int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
del_timer_sync(&priv->swphy_poll);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
napi_disable(&priv->napi);
del_timer_sync(&priv->rx_timeout);
@@ -2348,21 +2358,10 @@ static int bcm_enetsw_stop(struct net_device *dev)
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
- /* free the rx skb ring */
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
+ /* free the rx buffer ring */
+ bcm_enet_free_rx_buf_ring(kdev, priv);
/* free remaining allocated memory */
- kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -2659,6 +2658,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
+ priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
pd = dev_get_platdata(&pdev->dev);
if (pd) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 1d3c917eb830..78f1830fb3cb 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -230,11 +230,17 @@ struct bcm_enet_priv {
/* next dirty rx descriptor to refill */
int rx_dirty_desc;
- /* size of allocated rx skbs */
- unsigned int rx_skb_size;
+ /* size of allocated rx buffers */
+ unsigned int rx_buf_size;
- /* list of skb given to hw for rx */
- struct sk_buff **rx_skb;
+ /* allocated rx buffer offset */
+ unsigned int rx_buf_offset;
+
+ /* size of allocated rx frag */
+ unsigned int rx_frag_size;
+
+ /* list of buffer given to hw for rx */
+ void **rx_buf;
/* used when rx skb allocation failed, so we defer rx queue
* refill */
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0fdd19d99d99..777bbf6d2586 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/dsa/brcm.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -2310,33 +2311,22 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_select_queue = bcm_sysport_select_queue,
};
-static int bcm_sysport_map_queues(struct notifier_block *nb,
- struct dsa_notifier_register_info *info)
+static int bcm_sysport_map_queues(struct net_device *dev,
+ struct net_device *slave_dev)
{
+ struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
- struct bcm_sysport_priv *priv;
- struct net_device *slave_dev;
unsigned int num_tx_queues;
unsigned int q, qp, port;
- struct net_device *dev;
-
- priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
- if (priv->netdev != info->master)
- return 0;
-
- dev = info->master;
/* We can't be setting up queue inspection for non directly attached
* switches
*/
- if (info->switch_number)
+ if (dp->ds->index)
return 0;
- if (dev->netdev_ops != &bcm_sysport_netdev_ops)
- return 0;
-
- port = info->port_number;
- slave_dev = info->info.dev;
+ port = dp->index;
/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
* 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
@@ -2376,27 +2366,16 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
return 0;
}
-static int bcm_sysport_unmap_queues(struct notifier_block *nb,
- struct dsa_notifier_register_info *info)
+static int bcm_sysport_unmap_queues(struct net_device *dev,
+ struct net_device *slave_dev)
{
+ struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
- struct bcm_sysport_priv *priv;
- struct net_device *slave_dev;
unsigned int num_tx_queues;
- struct net_device *dev;
unsigned int q, qp, port;
- priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
- if (priv->netdev != info->master)
- return 0;
-
- dev = info->master;
-
- if (dev->netdev_ops != &bcm_sysport_netdev_ops)
- return 0;
-
- port = info->port_number;
- slave_dev = info->info.dev;
+ port = dp->index;
num_tx_queues = slave_dev->real_num_tx_queues;
@@ -2417,17 +2396,30 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
return 0;
}
-static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int bcm_sysport_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
- int ret = NOTIFY_DONE;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct bcm_sysport_priv *priv;
+ int ret = 0;
+
+ priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
+ if (priv->netdev != dev)
+ return NOTIFY_DONE;
switch (event) {
- case DSA_PORT_REGISTER:
- ret = bcm_sysport_map_queues(nb, ptr);
- break;
- case DSA_PORT_UNREGISTER:
- ret = bcm_sysport_unmap_queues(nb, ptr);
+ case NETDEV_CHANGEUPPER:
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+ return NOTIFY_DONE;
+
+ if (!dsa_slave_dev_check(info->upper_dev))
+ return NOTIFY_DONE;
+
+ if (info->linking)
+ ret = bcm_sysport_map_queues(dev, info->upper_dev);
+ else
+ ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
break;
}
@@ -2503,8 +2495,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_free_netdev;
+ }
/* Allocate number of TX rings */
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
@@ -2577,6 +2571,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ dev->max_mtu = UMAC_MAX_MTU_SIZE;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;
@@ -2599,9 +2594,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rx_max_coalesced_frames = 1;
u64_stats_init(&priv->syncp);
- priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
+ priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
- ret = register_dsa_notifier(&priv->dsa_notifier);
+ ret = register_netdevice_notifier(&priv->netdev_notifier);
if (ret) {
dev_err(&pdev->dev, "failed to register DSA notifier\n");
goto err_deregister_fixed_link;
@@ -2628,7 +2623,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
err_deregister_notifier:
- unregister_dsa_notifier(&priv->dsa_notifier);
+ unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
@@ -2646,7 +2641,7 @@ static int bcm_sysport_remove(struct platform_device *pdev)
/* Not much to do, ndo_close has been called
* and we use managed allocations
*/
- unregister_dsa_notifier(&priv->dsa_notifier);
+ unregister_netdevice_notifier(&priv->netdev_notifier);
unregister_netdev(dev);
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 3a5cb6f128f5..984f76e74b43 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/dim.h>
+#include "unimac.h"
+
/* Receive/transmit descriptor format */
#define DESC_ADDR_HI_STATUS_LEN 0x00
#define DESC_ADDR_HI_SHIFT 0
@@ -213,39 +215,6 @@ struct bcm_rsb {
/* UniMAC offset and defines */
#define SYS_PORT_UMAC_OFFSET 0x800
-#define UMAC_CMD 0x008
-#define CMD_TX_EN (1 << 0)
-#define CMD_RX_EN (1 << 1)
-#define CMD_SPEED_SHIFT 2
-#define CMD_SPEED_10 0
-#define CMD_SPEED_100 1
-#define CMD_SPEED_1000 2
-#define CMD_SPEED_2500 3
-#define CMD_SPEED_MASK 3
-#define CMD_PROMISC (1 << 4)
-#define CMD_PAD_EN (1 << 5)
-#define CMD_CRC_FWD (1 << 6)
-#define CMD_PAUSE_FWD (1 << 7)
-#define CMD_RX_PAUSE_IGNORE (1 << 8)
-#define CMD_TX_ADDR_INS (1 << 9)
-#define CMD_HD_EN (1 << 10)
-#define CMD_SW_RESET (1 << 13)
-#define CMD_LCL_LOOP_EN (1 << 15)
-#define CMD_AUTO_CONFIG (1 << 22)
-#define CMD_CNTL_FRM_EN (1 << 23)
-#define CMD_NO_LEN_CHK (1 << 24)
-#define CMD_RMT_LOOP_EN (1 << 25)
-#define CMD_PRBL_EN (1 << 27)
-#define CMD_TX_PAUSE_IGNORE (1 << 28)
-#define CMD_TX_RX_EN (1 << 29)
-#define CMD_RUNT_FILTER_DIS (1 << 30)
-
-#define UMAC_MAC0 0x00c
-#define UMAC_MAC1 0x010
-#define UMAC_MAX_FRAME_LEN 0x014
-
-#define UMAC_TX_FLUSH 0x334
-
#define UMAC_MIB_START 0x400
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
@@ -787,7 +756,7 @@ struct bcm_sysport_priv {
struct u64_stats_sync syncp;
/* map information between switch port queues and local queues */
- struct notifier_block dsa_notifier;
+ struct notifier_block netdev_notifier;
unsigned int per_port_num_tx_queues;
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 98ec1b8a7d8e..075f6e146b29 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -746,25 +746,25 @@ error:
/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
* nothing to change? Try if after stabilizng driver.
*/
-static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
- bool force)
+static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set,
+ bool force)
{
- u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ u32 cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD);
u32 new_val = (cmdcfg & mask) | set;
u32 cmdcfg_sr;
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+ cmdcfg_sr = CMD_SW_RESET_OLD;
- bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
+ bgmac_umac_maskset(bgmac, UMAC_CMD, ~0, cmdcfg_sr);
udelay(2);
if (new_val != cmdcfg || force)
- bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
+ bgmac_umac_write(bgmac, UMAC_CMD, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
+ bgmac_umac_maskset(bgmac, UMAC_CMD, ~cmdcfg_sr, 0);
udelay(2);
}
@@ -773,9 +773,9 @@ static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
u32 tmp;
tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
- bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+ bgmac_umac_write(bgmac, UMAC_MAC0, tmp);
tmp = (addr[4] << 8) | addr[5];
- bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+ bgmac_umac_write(bgmac, UMAC_MAC1, tmp);
}
static void bgmac_set_rx_mode(struct net_device *net_dev)
@@ -783,9 +783,9 @@ static void bgmac_set_rx_mode(struct net_device *net_dev)
struct bgmac *bgmac = netdev_priv(net_dev);
if (net_dev->flags & IFF_PROMISC)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_PROMISC, true);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_PROMISC, 0, true);
}
#if 0 /* We don't use that regs yet */
@@ -825,21 +825,21 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
static void bgmac_mac_speed(struct bgmac *bgmac)
{
- u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
+ u32 mask = ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT | CMD_HD_EN);
u32 set = 0;
switch (bgmac->mac_speed) {
case SPEED_10:
- set |= BGMAC_CMDCFG_ES_10;
+ set |= CMD_SPEED_10 << CMD_SPEED_SHIFT;
break;
case SPEED_100:
- set |= BGMAC_CMDCFG_ES_100;
+ set |= CMD_SPEED_100 << CMD_SPEED_SHIFT;
break;
case SPEED_1000:
- set |= BGMAC_CMDCFG_ES_1000;
+ set |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
break;
case SPEED_2500:
- set |= BGMAC_CMDCFG_ES_2500;
+ set |= CMD_SPEED_2500 << CMD_SPEED_SHIFT;
break;
default:
dev_err(bgmac->dev, "Unsupported speed: %d\n",
@@ -847,9 +847,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
}
if (bgmac->mac_duplex == DUPLEX_HALF)
- set |= BGMAC_CMDCFG_HD;
+ set |= CMD_HD_EN;
- bgmac_cmdcfg_maskset(bgmac, mask, set, true);
+ bgmac_umac_cmd_maskset(bgmac, mask, set, true);
}
static void bgmac_miiconfig(struct bgmac *bgmac)
@@ -917,7 +917,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_LCL_LOOP_EN, false);
udelay(1);
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
@@ -986,34 +986,34 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
- * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
- * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
+ * Specs don't say about using UMAC_CMD_SR, but in this routine
+ * UMAC_CMD is read _after_ putting chip in a reset. So it has to
* be keps until taking MAC out of the reset.
*/
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
-
- bgmac_cmdcfg_maskset(bgmac,
- ~(BGMAC_CMDCFG_TE |
- BGMAC_CMDCFG_RE |
- BGMAC_CMDCFG_RPI |
- BGMAC_CMDCFG_TAI |
- BGMAC_CMDCFG_HD |
- BGMAC_CMDCFG_ML |
- BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_RL |
- BGMAC_CMDCFG_RED |
- BGMAC_CMDCFG_PE |
- BGMAC_CMDCFG_TPI |
- BGMAC_CMDCFG_PAD_EN |
- BGMAC_CMDCFG_PF),
- BGMAC_CMDCFG_PROM |
- BGMAC_CMDCFG_NLC |
- BGMAC_CMDCFG_CFE |
- cmdcfg_sr,
- false);
+ cmdcfg_sr = CMD_SW_RESET_OLD;
+
+ bgmac_umac_cmd_maskset(bgmac,
+ ~(CMD_TX_EN |
+ CMD_RX_EN |
+ CMD_RX_PAUSE_IGNORE |
+ CMD_TX_ADDR_INS |
+ CMD_HD_EN |
+ CMD_LCL_LOOP_EN |
+ CMD_CNTL_FRM_EN |
+ CMD_RMT_LOOP_EN |
+ CMD_RX_ERR_DISC |
+ CMD_PRBL_EN |
+ CMD_TX_PAUSE_IGNORE |
+ CMD_PAD_EN |
+ CMD_PAUSE_FWD),
+ CMD_PROMISC |
+ CMD_NO_LEN_CHK |
+ CMD_CNTL_FRM_EN |
+ cmdcfg_sr,
+ false);
bgmac->mac_speed = SPEED_UNKNOWN;
bgmac->mac_duplex = DUPLEX_UNKNOWN;
@@ -1049,16 +1049,16 @@ static void bgmac_enable(struct bgmac *bgmac)
u32 mode;
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+ cmdcfg_sr = CMD_SW_RESET_OLD;
- cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
- bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- cmdcfg_sr, true);
+ cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD);
+ bgmac_umac_cmd_maskset(bgmac, ~(CMD_TX_EN | CMD_RX_EN),
+ cmdcfg_sr, true);
udelay(2);
- cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
- bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
+ cmdcfg |= CMD_TX_EN | CMD_RX_EN;
+ bgmac_umac_write(bgmac, UMAC_CMD, cmdcfg);
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
BGMAC_DS_MM_SHIFT;
@@ -1078,7 +1078,7 @@ static void bgmac_enable(struct bgmac *bgmac)
fl_ctl = 0x03cb04cb;
bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
- bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
+ bgmac_umac_write(bgmac, UMAC_PAUSE_CTRL, 0x27fff);
}
if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
@@ -1105,18 +1105,18 @@ static void bgmac_chip_init(struct bgmac *bgmac)
bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
/* Enable 802.3x tx flow control (honor received PAUSE frames) */
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_RX_PAUSE_IGNORE, 0, true);
bgmac_set_rx_mode(bgmac->net_dev);
bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
if (bgmac->loopback)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_LCL_LOOP_EN, false);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_LCL_LOOP_EN, 0, false);
- bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
+ bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, 32 + ETHER_MAX_LEN);
bgmac_chip_intrs_on(bgmac);
@@ -1252,7 +1252,7 @@ static int bgmac_change_mtu(struct net_device *net_dev, int mtu)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + mtu);
+ bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, 32 + mtu);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 351c598a3ec6..110088e662ea 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -4,6 +4,8 @@
#include <linux/netdevice.h>
+#include "unimac.h"
+
#define BGMAC_DEV_CTL 0x000
#define BGMAC_DC_TSM 0x00000002
#define BGMAC_DC_CFCO 0x00000004
@@ -169,47 +171,7 @@
#define BGMAC_RX_NONPAUSE_PKTS 0x420
#define BGMAC_RX_SACHANGES 0x424
#define BGMAC_RX_UNI_PKTS 0x428
-#define BGMAC_UNIMAC_VERSION 0x800
-#define BGMAC_HDBKP_CTL 0x804
-#define BGMAC_CMDCFG 0x808 /* Configuration */
-#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */
-#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */
-#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */
-#define BGMAC_CMDCFG_ES_10 0x00000000
-#define BGMAC_CMDCFG_ES_100 0x00000004
-#define BGMAC_CMDCFG_ES_1000 0x00000008
-#define BGMAC_CMDCFG_ES_2500 0x0000000C
-#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
-#define BGMAC_CMDCFG_PAD_EN 0x00000020
-#define BGMAC_CMDCFG_CF 0x00000040
-#define BGMAC_CMDCFG_PF 0x00000080
-#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */
-#define BGMAC_CMDCFG_TAI 0x00000200
-#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
-#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
-#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
-#define BGMAC_CMDCFG_AE 0x00400000
-#define BGMAC_CMDCFG_CFE 0x00800000
-#define BGMAC_CMDCFG_NLC 0x01000000
-#define BGMAC_CMDCFG_RL 0x02000000
-#define BGMAC_CMDCFG_RED 0x04000000
-#define BGMAC_CMDCFG_PE 0x08000000
-#define BGMAC_CMDCFG_TPI 0x10000000
-#define BGMAC_CMDCFG_AT 0x20000000
-#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */
-#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */
-#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */
-#define BGMAC_PAUSEQUANTA 0x818
-#define BGMAC_MAC_MODE 0x844
-#define BGMAC_OUTERTAG 0x848
-#define BGMAC_INNERTAG 0x84c
-#define BGMAC_TXIPG 0x85c
-#define BGMAC_PAUSE_CTL 0xb30
-#define BGMAC_TX_FLUSH 0xb34
-#define BGMAC_RX_STATUS 0xb38
-#define BGMAC_TX_STATUS 0xb3c
+#define BGMAC_UNIMAC 0x800
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
@@ -556,6 +518,16 @@ static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
bgmac->write(bgmac, offset, value);
}
+static inline u32 bgmac_umac_read(struct bgmac *bgmac, u16 offset)
+{
+ return bgmac_read(bgmac, BGMAC_UNIMAC + offset);
+}
+
+static inline void bgmac_umac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bgmac_write(bgmac, BGMAC_UNIMAC + offset, value);
+}
+
static inline u32 bgmac_idm_read(struct bgmac *bgmac, u16 offset)
{
return bgmac->idm_read(bgmac, offset);
@@ -609,6 +581,11 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
bgmac_maskset(bgmac, offset, ~0, set);
}
+static inline void bgmac_umac_maskset(struct bgmac *bgmac, u16 offset, u32 mask, u32 set)
+{
+ bgmac_maskset(bgmac, BGMAC_UNIMAC + offset, mask, set);
+}
+
static inline int bgmac_phy_connect(struct bgmac *bgmac)
{
return bgmac->phy_connect(bgmac);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 28069b290862..b652ed72a621 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13071,8 +13071,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4edd6f8e017e..f508c5c61a30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -255,6 +255,7 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
+ ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
};
@@ -1265,8 +1266,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
} else {
tpa_info->hash_type = PKT_HASH_TYPE_NONE;
tpa_info->gso_type = 0;
- if (netif_msg_rx_err(bp))
- netdev_warn(bp->dev, "TPA packet without valid hash\n");
+ netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
@@ -2021,10 +2021,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
- case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
- if (netif_msg_hw(bp))
- netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n",
- data1, data2);
+ case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
+ char *fatal_str = "non-fatal";
+
if (!bp->fw_health)
goto async_event_process_exit;
@@ -2036,14 +2035,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (!bp->fw_reset_max_dsecs)
bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
- netdev_warn(bp->dev, "Firmware fatal reset event received\n");
+ fatal_str = "fatal";
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
- } else {
- netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
- bp->fw_reset_max_dsecs * 100);
}
+ netif_warn(bp, hw, bp->dev,
+ "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
+ fatal_str, data1, data2,
+ bp->fw_reset_min_dsecs * 100,
+ bp->fw_reset_max_dsecs * 100);
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
break;
+ }
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -2055,13 +2057,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (!fw_health->enabled)
break;
- if (netif_msg_drv(bp))
- netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
- fw_health->enabled, fw_health->master,
- bnxt_fw_health_readl(bp,
- BNXT_FW_RESET_CNT_REG),
- bnxt_fw_health_readl(bp,
- BNXT_FW_HEALTH_REG));
+ netif_info(bp, drv, bp->dev,
+ "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
+ fw_health->enabled, fw_health->master,
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG),
+ bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
@@ -2072,6 +2072,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
+ netif_notice(bp, hw, bp->dev,
+ "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
+ goto async_event_process_exit;
case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
struct bnxt_rx_ring_info *rxr;
u16 grp_idx;
@@ -2394,6 +2399,10 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int work_done = 0;
+ if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
+ napi_complete(napi);
+ return 0;
+ }
while (1) {
work_done += bnxt_poll_work(bp, cpr, budget - work_done);
@@ -2468,6 +2477,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
int work_done = 0;
u32 cons;
+ if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
+ napi_complete(napi);
+ return 0;
+ }
if (cpr->has_more_work) {
cpr->has_more_work = 0;
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
@@ -4272,6 +4285,9 @@ static void bnxt_disable_int_sync(struct bnxt *bp)
{
int i;
+ if (!bp->irq_tbl)
+ return;
+
atomic_inc(&bp->intr_sem);
bnxt_disable_int(bp);
@@ -4425,6 +4441,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
+ /* Limit timeout to an upper limit */
+ timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
/* convert timeout to usec */
timeout *= 1000;
@@ -6790,8 +6808,10 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
+ else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+ ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + 1;
+ tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
if (!ctx_pg) {
kfree(ctx);
@@ -6843,6 +6863,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct hwrm_func_backing_store_cfg_input req = {0};
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
+ u32 req_len = sizeof(req);
__le32 *num_entries;
__le64 *pg_dir;
u32 flags = 0;
@@ -6853,6 +6874,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
if (!ctx)
return 0;
+ if (req_len > bp->hwrm_max_ext_req_len)
+ req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
req.enables = cpu_to_le32(enables);
@@ -6925,7 +6948,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
- i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ i < BNXT_MAX_TQM_RINGS;
+ i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
@@ -6935,7 +6959,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
req.flags = cpu_to_le32(flags);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
@@ -7435,9 +7459,22 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
- if (bp->fw_health)
- bp->fw_health->status_reliable = false;
- return;
+ if (!bp->chip_num) {
+ __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
+ bp->chip_num = readl(bp->bar0 +
+ BNXT_FW_HEALTH_WIN_BASE +
+ BNXT_GRC_REG_CHIP_NUM);
+ }
+ if (!BNXT_CHIP_P5(bp)) {
+ if (bp->fw_health)
+ bp->fw_health->status_reliable = false;
+ return;
+ }
+ status_loc = BNXT_GRC_REG_STATUS_P5 |
+ BNXT_FW_HEALTH_REG_TYPE_BAR0;
+ } else {
+ status_loc = readl(hs + offsetof(struct hcomm_status,
+ fw_status_loc));
}
if (__bnxt_alloc_fw_health(bp)) {
@@ -7445,7 +7482,6 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
return;
}
- status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc));
bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
@@ -8600,7 +8636,7 @@ msix_setup_exit:
static int bnxt_init_inta(struct bnxt *bp)
{
- bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+ bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
if (!bp->irq_tbl)
return -ENOMEM;
@@ -8808,7 +8844,8 @@ static void bnxt_disable_napi(struct bnxt *bp)
{
int i;
- if (!bp->bnapi)
+ if (!bp->bnapi ||
+ test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -8825,6 +8862,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
{
int i;
+ clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
@@ -9331,13 +9369,60 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
static int bnxt_fw_init_one(struct bnxt *bp);
+static int bnxt_fw_reset_via_optee(struct bnxt *bp)
+{
+#ifdef CONFIG_TEE_BNXT_FW
+ int rc = tee_bnxt_fw_load();
+
+ if (rc)
+ netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
+
+ return rc;
+#else
+ netdev_err(bp->dev, "OP-TEE not supported\n");
+ return -ENODEV;
+#endif
+}
+
+static int bnxt_try_recover_fw(struct bnxt *bp)
+{
+ if (bp->fw_health && bp->fw_health->status_reliable) {
+ int retry = 0, rc;
+ u32 sts;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ do {
+ rc = __bnxt_hwrm_ver_get(bp, true);
+ sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (!sts || !BNXT_FW_IS_BOOTING(sts))
+ break;
+ retry++;
+ } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (!BNXT_FW_IS_HEALTHY(sts)) {
+ netdev_err(bp->dev,
+ "Firmware not responding, status: 0x%x\n",
+ sts);
+ rc = -ENODEV;
+ }
+ if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
+ netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
+ return bnxt_fw_reset_via_optee(bp);
+ }
+ return rc;
+ }
+
+ return -ENODEV;
+}
+
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_if_change_input req = {0};
bool resc_reinit = false, fw_reset = false;
+ int rc, retry = 0;
u32 flags = 0;
- int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
@@ -9346,10 +9431,25 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (up)
req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ while (retry < BNXT_FW_IF_RETRY) {
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc != -EAGAIN)
+ break;
+
+ msleep(50);
+ retry++;
+ }
if (!rc)
flags = le32_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc == -EAGAIN)
+ return rc;
+ if (rc && up) {
+ rc = bnxt_try_recover_fw(bp);
+ fw_reset = true;
+ }
if (rc)
return rc;
@@ -9689,6 +9789,25 @@ static void bnxt_preset_reg_win(struct bnxt *bp)
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
+static int bnxt_reinit_after_abort(struct bnxt *bp)
+{
+ int rc;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return -EBUSY;
+
+ rc = bnxt_fw_init_one(bp);
+ if (!rc) {
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (!rc) {
+ clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
+ }
+ }
+ return rc;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -9847,8 +9966,14 @@ static int bnxt_open(struct net_device *dev)
int rc;
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
- netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
- return -ENODEV;
+ rc = bnxt_reinit_after_abort(bp);
+ if (rc) {
+ if (rc == -EBUSY)
+ netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
+ else
+ netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
+ return -ENODEV;
+ }
}
rc = bnxt_hwrm_if_change(bp, true);
@@ -10785,11 +10910,18 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
- /* When firmware is fatal state, disable PCI device to prevent
- * any potential bad DMAs before freeing kernel memory.
+ /* When firmware is in fatal state, quiesce device and disable
+ * bus master to prevent any potential bad DMAs before freeing
+ * kernel memory.
*/
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
+ }
__bnxt_close_nic(bp, true, false);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
@@ -11177,21 +11309,6 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
-static int bnxt_fw_reset_via_optee(struct bnxt *bp)
-{
-#ifdef CONFIG_TEE_BNXT_FW
- int rc = tee_bnxt_fw_load();
-
- if (rc)
- netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
-
- return rc;
-#else
- netdev_err(bp->dev, "OP-TEE not supported\n");
- return -ENODEV;
-#endif
-}
-
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -11200,19 +11317,10 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
rc = bnxt_hwrm_ver_get(bp);
bnxt_try_map_fw_health_reg(bp);
if (rc) {
- if (bp->fw_health && bp->fw_health->status_reliable) {
- u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
-
- netdev_err(bp->dev,
- "Firmware not responding, status: 0x%x\n",
- sts);
- if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
- netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
- rc = bnxt_fw_reset_via_optee(bp);
- if (!rc)
- rc = bnxt_hwrm_ver_get(bp);
- }
- }
+ rc = bnxt_try_recover_fw(bp);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_ver_get(bp);
if (rc)
return rc;
}
@@ -11412,6 +11520,12 @@ static void bnxt_reset_all(struct bnxt *bp)
bp->fw_reset_timestamp = jiffies;
}
+static bool bnxt_fw_reset_timeout(struct bnxt *bp)
+{
+ return time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10));
+}
+
static void bnxt_fw_reset_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
@@ -11433,8 +11547,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_timestamp));
goto fw_reset_abort;
} else if (n > 0) {
- if (time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ if (bnxt_fw_reset_timeout(bp)) {
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bp->fw_reset_state = 0;
netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
@@ -11463,8 +11576,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
- !time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ !bnxt_fw_reset_timeout(bp)) {
bnxt_queue_fw_reset_work(bp, HZ / 5);
return;
}
@@ -11506,8 +11618,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
rc = __bnxt_hwrm_ver_get(bp, true);
if (rc) {
- if (time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ if (bnxt_fw_reset_timeout(bp)) {
netdev_err(bp->dev, "Firmware reset aborted\n");
goto fw_reset_abort_status;
}
@@ -12088,8 +12199,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = bnxt_xdp,
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
@@ -12541,9 +12650,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
- if (BNXT_PF(bp))
- bnxt_vpd_read_info(bp);
-
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
goto init_err_pci_clean;
@@ -12555,6 +12661,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ if (BNXT_PF(bp))
+ bnxt_vpd_read_info(bp);
+
if (BNXT_CHIP_P5(bp)) {
bp->flags |= BNXT_FLAG_CHIP_P5;
if (BNXT_CHIP_SR2(bp))
@@ -12887,10 +12996,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err = 0, off;
- pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12919,22 +13028,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
- if (!err) {
- err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
- }
- bnxt_ulp_start(bp, err);
- if (!err) {
- bnxt_reenable_sriov(bp);
+ if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- }
- }
-
- if (result != PCI_ERS_RESULT_RECOVERED) {
- if (netif_running(netdev))
- dev_close(netdev);
- pci_disable_device(pdev);
}
rtnl_unlock();
@@ -12952,10 +13047,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
static void bnxt_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
+ int err;
+ netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
- netif_device_attach(netdev);
+ err = bnxt_hwrm_func_qcaps(bp);
+ if (!err && netif_running(netdev))
+ err = bnxt_open(netdev);
+
+ bnxt_ulp_start(bp, err);
+ if (!err) {
+ bnxt_reenable_sriov(bp);
+ netif_device_attach(netdev);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 950ea26ae0d2..4ef6888acdc6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -656,6 +656,7 @@ struct nqe_cn {
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
+#define HWRM_CMD_MAX_TIMEOUT 40000
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -1345,9 +1346,14 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
+#define BNXT_GRC_REG_STATUS_P5 0x520
+
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
+#define BNXT_GRC_REG_CHIP_NUM 0x48
+#define BNXT_GRC_REG_BASE 0x260000
+
#define BNXT_GRC_BASE_MASK 0xfffff000
#define BNXT_GRC_OFFSET_MASK 0x00000ffc
@@ -1436,6 +1442,13 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
+#define BNXT_MAX_TQM_SP_RINGS 1
+#define BNXT_MAX_TQM_FP_RINGS 8
+#define BNXT_MAX_TQM_RINGS \
+ (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
+#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@@ -1474,7 +1487,7 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[9];
+ struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
};
struct bnxt_fw_health {
@@ -1527,9 +1540,22 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_HEALTH_WIN_OFF(reg) (BNXT_FW_HEALTH_WIN_BASE + \
((reg) & BNXT_GRC_OFFSET_MASK))
+#define BNXT_FW_STATUS_HEALTH_MSK 0xffff
#define BNXT_FW_STATUS_HEALTHY 0x8000
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
+#define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_BOOTING(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) < \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_RETRY 5
+#define BNXT_FW_IF_RETRY 10
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1783,6 +1809,7 @@ struct bnxt {
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
+#define BNXT_STATE_NAPI_DISABLED 9
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 6b7b69ed62db..90a31b4a3020 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -44,21 +44,20 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- u32 val, health_status;
+ u32 val;
int rc;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return 0;
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- health_status = val & 0xffff;
- if (health_status < BNXT_FW_STATUS_HEALTHY) {
+ if (BNXT_FW_IS_BOOTING(val)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Description",
"Not yet completed initialization");
if (rc)
return rc;
- } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
+ } else if (BNXT_FW_IS_ERR(val)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Description",
"Encountered fatal error and cannot recover");
if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ff79d5d14c4..2f8b193a772d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2532,7 +2532,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
- install.flags |=
+ install.flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = _hwrm_send_message_silent(bp, &install,
@@ -2546,6 +2546,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
* UPDATE directory and try the flash again
*/
defrag_attempted = true;
+ install.flags = 0;
rc = __bnxt_flash_nvram(bp->dev,
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 2d3e962bdac3..d5c6e6a3d22d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2020 Broadcom Inc.
+ * Copyright (c) 2018-2021 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -164,6 +164,7 @@ struct cmd_nums {
#define HWRM_VNIC_PLCMODES_CFG 0x48UL
#define HWRM_VNIC_PLCMODES_QCFG 0x49UL
#define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
#define HWRM_RING_ALLOC 0x50UL
#define HWRM_RING_FREE 0x51UL
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
@@ -184,6 +185,9 @@ struct cmd_nums {
#define HWRM_QUEUE_MPLS_QCAPS 0x80UL
#define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
#define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -217,6 +221,8 @@ struct cmd_nums {
#define HWRM_PORT_TX_FIR_CFG 0xbbUL
#define HWRM_PORT_TX_FIR_QCFG 0xbcUL
#define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -347,6 +353,8 @@ struct cmd_nums {
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_FUNC_QSTATS_EXT 0x198UL
#define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -359,6 +367,11 @@ struct cmd_nums {
#define HWRM_MFG_HDMA_TEST 0x209UL
#define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
#define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_SEEPROM_SYNC 0x20eUL
+ #define HWRM_MFG_PARAM_SEEPROM_READ 0x20fUL
+ #define HWRM_MFG_PARAM_SEEPROM_HEALTH 0x210UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -384,6 +397,7 @@ struct cmd_nums {
#define HWRM_TF_EXT_EM_QCFG 0x2e9UL
#define HWRM_TF_EM_INSERT 0x2eaUL
#define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
#define HWRM_TF_TCAM_SET 0x2f8UL
#define HWRM_TF_TCAM_GET 0x2f9UL
#define HWRM_TF_TCAM_MOVE 0x2faUL
@@ -486,9 +500,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 68
-#define HWRM_VERSION_STR "1.10.1.68"
+#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_RSVD 11
+#define HWRM_VERSION_STR "1.10.2.11"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -563,8 +577,9 @@ struct hwrm_ver_get_output {
__le16 max_resp_len;
__le16 def_req_timeout;
u8 flags;
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
- #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
u8 unused_0[2];
u8 always_1;
__le16 hwrm_intf_major;
@@ -708,6 +723,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x42UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -815,6 +831,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
__le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
u8 opaque_v;
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
@@ -832,7 +850,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
};
@@ -1271,6 +1290,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -1315,6 +1338,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
#define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
#define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1731,6 +1755,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
#define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1993,7 +2018,7 @@ struct hwrm_func_backing_store_qcaps_input {
__le64 resp_addr;
};
-/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */
+/* hwrm_func_backing_store_qcaps_output (size:704b/88B) */
struct hwrm_func_backing_store_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -2024,13 +2049,25 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
- __le32 rsvd;
- __le16 rsvd1;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
u8 tqm_fp_rings_count;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 rsvd[6];
u8 valid;
};
-/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+/* hwrm_func_backing_store_cfg_input (size:2432b/304B) */
struct hwrm_func_backing_store_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2041,22 +2078,25 @@ struct hwrm_func_backing_store_cfg_input {
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
__le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
u8 qpc_pg_size_qpc_lvl;
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
@@ -2358,6 +2398,63 @@ struct hwrm_func_backing_store_cfg_input {
__le16 tqm_entry_size;
__le16 mrav_entry_size;
__le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
};
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -2930,6 +3027,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
u8 option_flags;
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
__le16 support_pam4_speeds;
@@ -3528,8 +3626,8 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
#define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
#define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xc0UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 6
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1 0x80UL
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -4119,7 +4217,10 @@ struct hwrm_queue_qportcfg_output {
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
- u8 unused_0;
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
char qid0_name[16];
char qid1_name[16];
char qid2_name[16];
@@ -4128,7 +4229,34 @@ struct hwrm_queue_qportcfg_output {
char qid5_name[16];
char qid6_name[16];
char qid7_name[16];
- u8 unused_1[7];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
u8 valid;
};
@@ -5142,8 +5270,10 @@ struct hwrm_vnic_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- u8 unused_0[4];
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ __le16 virtio_net_fid;
+ u8 unused_0[2];
};
/* hwrm_vnic_alloc_output (size:128b/16B) */
@@ -5260,6 +5390,8 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
#define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
#define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -5585,7 +5717,11 @@ struct hwrm_ring_alloc_output {
__le16 resp_len;
__le16 ring_id;
__le16 logical_ring_id;
- u8 unused_0[3];
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
u8 valid;
};
@@ -5644,7 +5780,11 @@ struct hwrm_ring_reset_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_0[4];
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
u8 consumer_idx[3];
u8 valid;
};
@@ -6988,21 +7128,23 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
u8 unused_0[3];
u8 valid;
};
@@ -7472,7 +7614,8 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF
__le16 len;
u8 version;
u8 count;
@@ -8000,6 +8143,9 @@ struct hwrm_dbg_coredump_initiate_output {
struct coredump_data_hdr {
__le32 address;
__le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
__le32 instance;
__le32 next_offset;
};
@@ -8669,7 +8815,6 @@ struct hcomm_status {
#define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
#define HCOMM_STATUS_TRUE_OFFSET_SFT 2
};
-
#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 8c8368c2f335..64dbbb04b043 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -222,8 +222,12 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
- return BNXT_MIN_ROCE_STAT_CTXS;
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_en_dev *edev = bp->edev;
+
+ if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ return BNXT_MIN_ROCE_STAT_CTXS;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index fcc262064766..641303894341 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -133,12 +133,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
txr = rxr->bnapi->tx_ring;
- xdp.data_hard_start = *data_ptr - offset;
- xdp.data = *data_ptr;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = *data_ptr + *len;
- xdp.rxq = &rxr->xdp_rxq;
- xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
+ xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index f6ca01da141d..0a6d91b0f0aa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -16,6 +16,8 @@
#include <linux/dim.h>
#include <linux/ethtool.h>
+#include "../unimac.h"
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -150,63 +152,6 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
-#define UMAC_HD_BKP_CTRL 0x004
-#define HD_FC_EN (1 << 0)
-#define HD_FC_BKOFF_OK (1 << 1)
-#define IPG_CONFIG_RX_SHIFT 2
-#define IPG_CONFIG_RX_MASK 0x1F
-
-#define UMAC_CMD 0x008
-#define CMD_TX_EN (1 << 0)
-#define CMD_RX_EN (1 << 1)
-#define UMAC_SPEED_10 0
-#define UMAC_SPEED_100 1
-#define UMAC_SPEED_1000 2
-#define UMAC_SPEED_2500 3
-#define CMD_SPEED_SHIFT 2
-#define CMD_SPEED_MASK 3
-#define CMD_PROMISC (1 << 4)
-#define CMD_PAD_EN (1 << 5)
-#define CMD_CRC_FWD (1 << 6)
-#define CMD_PAUSE_FWD (1 << 7)
-#define CMD_RX_PAUSE_IGNORE (1 << 8)
-#define CMD_TX_ADDR_INS (1 << 9)
-#define CMD_HD_EN (1 << 10)
-#define CMD_SW_RESET (1 << 13)
-#define CMD_LCL_LOOP_EN (1 << 15)
-#define CMD_AUTO_CONFIG (1 << 22)
-#define CMD_CNTL_FRM_EN (1 << 23)
-#define CMD_NO_LEN_CHK (1 << 24)
-#define CMD_RMT_LOOP_EN (1 << 25)
-#define CMD_PRBL_EN (1 << 27)
-#define CMD_TX_PAUSE_IGNORE (1 << 28)
-#define CMD_TX_RX_EN (1 << 29)
-#define CMD_RUNT_FILTER_DIS (1 << 30)
-
-#define UMAC_MAC0 0x00C
-#define UMAC_MAC1 0x010
-#define UMAC_MAX_FRAME_LEN 0x014
-
-#define UMAC_MODE 0x44
-#define MODE_LINK_STATUS (1 << 5)
-
-#define UMAC_EEE_CTRL 0x064
-#define EN_LPI_RX_PAUSE (1 << 0)
-#define EN_LPI_TX_PFC (1 << 1)
-#define EN_LPI_TX_PAUSE (1 << 2)
-#define EEE_EN (1 << 3)
-#define RX_FIFO_CHECK (1 << 4)
-#define EEE_TX_CLK_DIS (1 << 5)
-#define DIS_EEE_10M (1 << 6)
-#define LP_IDLE_PREDICTION_MODE (1 << 7)
-
-#define UMAC_EEE_LPI_TIMER 0x068
-#define UMAC_EEE_WAKE_TIMER 0x06C
-#define UMAC_EEE_REF_COUNT 0x070
-#define EEE_REFERENCE_COUNT_MASK 0xffff
-
-#define UMAC_TX_FLUSH 0x334
-
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 6fb6c3556285..17f997ef950f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -63,11 +63,11 @@ void bcmgenet_mii_setup(struct net_device *dev)
/* speed */
if (phydev->speed == SPEED_1000)
- cmd_bits = UMAC_SPEED_1000;
+ cmd_bits = CMD_SPEED_1000;
else if (phydev->speed == SPEED_100)
- cmd_bits = UMAC_SPEED_100;
+ cmd_bits = CMD_SPEED_100;
else
- cmd_bits = UMAC_SPEED_10;
+ cmd_bits = CMD_SPEED_10;
cmd_bits <<= CMD_SPEED_SHIFT;
/* duplex */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5143cdd0eeca..8936c2bc6286 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12826,11 +12826,13 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
offset = tg3_nvram_logical_addr(tp, offset);
}
- }
- if (!offset || !len) {
- offset = TG3_NVM_VPD_OFF;
- len = TG3_NVM_VPD_LEN;
+ if (!offset || !len) {
+ offset = TG3_NVM_VPD_OFF;
+ len = TG3_NVM_VPD_LEN;
+ }
+ } else {
+ len = TG3_NVM_PCI_VPD_MAX_LEN;
}
buf = kmalloc(len, GFP_KERNEL);
@@ -12846,26 +12848,16 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
goto error;
}
+ *vpdlen = len;
} else {
- u8 *ptr;
ssize_t cnt;
- unsigned int pos = 0;
-
- ptr = (u8 *)&buf[0];
- for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
- cnt = pci_read_vpd(tp->pdev, pos,
- len - pos, ptr);
- if (cnt == -ETIMEDOUT || cnt == -EINTR)
- cnt = 0;
- else if (cnt < 0)
- goto error;
- }
- if (pos != len)
+
+ cnt = pci_read_vpd(tp->pdev, 0, len, (u8 *)buf);
+ if (cnt < 0)
goto error;
+ *vpdlen = cnt;
}
- *vpdlen = len;
-
return buf;
error:
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1000c894064f..46ec4fdfd16a 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2101,6 +2101,7 @@
/* Hardware Legacy NVRAM layout */
#define TG3_NVM_VPD_OFF 0x100
#define TG3_NVM_VPD_LEN 256
+#define TG3_NVM_PCI_VPD_MAX_LEN 512
/* Hardware Selfboot NVRAM layout */
#define TG3_NVM_HWSB_CFG1 0x00000004
diff --git a/drivers/net/ethernet/broadcom/unimac.h b/drivers/net/ethernet/broadcom/unimac.h
new file mode 100644
index 000000000000..585a85286257
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/unimac.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __UNIMAC_H
+#define __UNIMAC_H
+
+#define UMAC_HD_BKP_CTRL 0x004
+#define HD_FC_EN (1 << 0)
+#define HD_FC_BKOFF_OK (1 << 1)
+#define IPG_CONFIG_RX_SHIFT 2
+#define IPG_CONFIG_RX_MASK 0x1F
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define CMD_SPEED_10 0
+#define CMD_SPEED_100 1
+#define CMD_SPEED_1000 2
+#define CMD_SPEED_2500 3
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET_OLD (1 << 11)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_RX_ERR_DISC (1 << 26)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+#define UMAC_MAC0 0x00c
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+#define UMAC_PAUSE_QUANTA 0x018
+#define UMAC_MODE 0x044
+#define MODE_LINK_STATUS (1 << 5)
+#define UMAC_FRM_TAG0 0x048 /* outer tag */
+#define UMAC_FRM_TAG1 0x04c /* inner tag */
+#define UMAC_TX_IPG_LEN 0x05c
+#define UMAC_EEE_CTRL 0x064
+#define EN_LPI_RX_PAUSE (1 << 0)
+#define EN_LPI_TX_PFC (1 << 1)
+#define EN_LPI_TX_PAUSE (1 << 2)
+#define EEE_EN (1 << 3)
+#define RX_FIFO_CHECK (1 << 4)
+#define EEE_TX_CLK_DIS (1 << 5)
+#define DIS_EEE_10M (1 << 6)
+#define LP_IDLE_PREDICTION_MODE (1 << 7)
+#define UMAC_EEE_LPI_TIMER 0x068
+#define UMAC_EEE_WAKE_TIMER 0x06C
+#define UMAC_EEE_REF_COUNT 0x070
+#define EEE_REFERENCE_COUNT_MASK 0xffff
+#define UMAC_RX_IPG_INV 0x078
+#define UMAC_MACSEC_PROG_TX_CRC 0x310
+#define UMAC_MACSEC_CTRL 0x314
+#define UMAC_PAUSE_CTRL 0x330
+#define UMAC_TX_FLUSH 0x334
+#define UMAC_RX_FIFO_STATUS 0x338
+#define UMAC_TX_FIFO_STATUS 0x33c
+
+#endif
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d5d910916c2e..472bf8f220bc 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -467,7 +467,11 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
{
long ferr, rate, rate_rounded;
- if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG))
+ if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
+ return;
+
+ /* In case of MII the PHY is the clock master */
+ if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
return;
switch (speed) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 37d064193f0f..2a0d64e5797c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1163,7 +1163,7 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
oct->flags |= LIO_FLAG_MSI_ENABLED;
/* allocate storage for the names assigned to the irq */
- oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
+ oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL);
if (!oct->irq_name_storage)
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7d00d3a8ded4..7c5af4beedc6 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -3219,8 +3219,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_set_vf_mac = liquidio_set_vf_mac,
.ndo_set_vf_vlan = liquidio_set_vf_vlan,
.ndo_get_vf_config = liquidio_get_vf_config,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 103440f97bc8..516f166ceff8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1879,8 +1879,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 387a57cbfb73..e159194d0aef 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -545,7 +545,7 @@ static atomic_t adapter_fw_states[MAX_OCTEON_DEVICES];
static u32 octeon_device_count;
/* locks device array (i.e. octeon_device[]) */
-static spinlock_t octeon_devices_lock;
+static DEFINE_SPINLOCK(octeon_devices_lock);
static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
@@ -563,7 +563,6 @@ void octeon_init_device_list(int conf_type)
memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
for (i = 0; i < MAX_OCTEON_DEVICES; i++)
oct_set_config_info(i, conf_type);
- spin_lock_init(&octeon_devices_lock);
}
static void *__retrieve_octeon_config_info(struct octeon_device *oct,
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 5e50bb19bf26..ecffebd513be 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1556,18 +1556,7 @@ static struct platform_driver octeon_mgmt_driver = {
.remove = octeon_mgmt_remove,
};
-static int __init octeon_mgmt_mod_init(void)
-{
- return platform_driver_register(&octeon_mgmt_driver);
-}
-
-static void __exit octeon_mgmt_mod_exit(void)
-{
- platform_driver_unregister(&octeon_mgmt_driver);
-}
-
-module_init(octeon_mgmt_mod_init);
-module_exit(octeon_mgmt_mod_exit);
+module_platform_driver(octeon_mgmt_driver);
MODULE_SOFTDEP("pre: mdio-cavium");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index f3b7b443f964..c33b4e837515 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -530,6 +530,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
struct rcv_queue *rq, struct sk_buff **skb)
{
+ unsigned char *hard_start, *data;
struct xdp_buff xdp;
struct page *page;
u32 action;
@@ -547,12 +548,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
cpu_addr = (u64)phys_to_virt(cpu_addr);
page = virt_to_page((void *)cpu_addr);
- xdp.data_hard_start = page_address(page);
- xdp.data = (void *)cpu_addr;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + len;
- xdp.rxq = &rq->xdp_rxq;
- xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ &rq->xdp_rxq);
+ hard_start = page_address(page);
+ data = (unsigned char *)cpu_addr;
+ xdp_prepare_buff(&xdp, hard_start, data - hard_start, len, false);
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 7fd264a6d085..9f1965c80fb1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3882,8 +3882,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
@@ -5139,7 +5137,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* See if FW supports FW_FILTER2 work request */
if (is_t4(adap->params.chip)) {
- adap->params.filter2_wr_support = 0;
+ adap->params.filter2_wr_support = false;
} else {
params[0] = FW_PARAM_DEV(FILTER2_WR);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 196652a114c5..550cc065649f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1600,7 +1600,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* has opened up.
*/
eth_txq_stop(q);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (chip_ver > CHELSIO_T5)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr = (void *)&q->q.desc[q->q.pidx];
@@ -1832,6 +1833,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
struct adapter *adapter;
int qidx, credits, ret;
size_t fw_hdr_copy_len;
+ unsigned int chip_ver;
u64 cntrl, *end;
u32 wr_mid;
@@ -1896,6 +1898,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
goto out_free;
}
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
/* After we're done injecting the Work Request for this
@@ -1907,7 +1910,8 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* has opened up.
*/
eth_txq_stop(txq);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (chip_ver > CHELSIO_T5)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/* Start filling in our Work Request. Note that we do _not_ handle
@@ -1960,7 +1964,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
*/
cpl = (void *)(lso + 1);
- if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ if (chip_ver <= CHELSIO_T5)
cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
else
cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
@@ -3598,6 +3602,25 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
}
txq = &s->ethtxq[pi->first_qset + rspq->idx];
+
+ /* We've got the Hardware Consumer Index Update in the Egress Update
+ * message. These Egress Update messages will be our sole CIDX Updates
+ * we get since we don't want to chew up PCIe bandwidth for both Ingress
+ * Messages and Status Page writes. However, The code which manages
+ * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
+ * stored in the Status Page at the end of the TX Queue. It's easiest
+ * to simply copy the CIDX Update value from the Egress Update message
+ * to the Status Page. Also note that no Endian issues need to be
+ * considered here since both are Big Endian and we're just copying
+ * bytes consistently ...
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+ struct cpl_sge_egr_update *egr;
+
+ egr = (struct cpl_sge_egr_update *)rsp;
+ WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
+ }
+
t4_sge_eth_txq_egress_update(adapter, txq, -1);
}
@@ -4583,11 +4606,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
* write the CIDX Updates into the Status Page at the end of the
* TX Queue.
*/
- c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+ c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ?
+ FW_EQ_ETH_CMD_AUTOEQUIQE_F :
+ FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
c.fetchszm_to_iqid =
- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ?
+ HOSTFCMODE_INGRESS_QUEUE_X :
+ HOSTFCMODE_STATUS_PAGE_X) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
@@ -4598,6 +4625,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
: FETCHBURSTMIN_64B_T6_X) |
FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) |
FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 98d01a7497ec..98829e482bfa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2689,7 +2689,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
-#define CHELSIO_VPD_UNIQUE_ID 0x82
/**
* t4_eeprom_ptov - translate a physical EEPROM address to virtual
@@ -2745,7 +2744,7 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
int i, ret = 0, addr;
int ec, sn, pn, na;
- u8 *vpd, csum;
+ u8 *vpd, csum, base_val = 0;
unsigned int vpdr_len, kw_offset, id_len;
vpd = vmalloc(VPD_LEN);
@@ -2755,17 +2754,11 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
/* Card information normally starts at VPD_BASE but early cards had
* it at 0.
*/
- ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
if (ret < 0)
goto out;
- /* The VPD shall have a unique identifier specified by the PCI SIG.
- * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
- * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
- * is expected to automatically put this entry at the
- * beginning of the VPD.
- */
- addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
+ addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : VPD_BASE_OLD;
ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 92473dda55d9..22a0220123ad 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -40,6 +40,13 @@
#define TCB_L2T_IX_M 0xfffULL
#define TCB_L2T_IX_V(x) ((x) << TCB_L2T_IX_S)
+#define TCB_T_FLAGS_W 1
+#define TCB_T_FLAGS_S 0
+#define TCB_T_FLAGS_M 0xffffffffffffffffULL
+#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+
+#define TCB_FIELD_COOKIE_TFLAG 1
+
#define TCB_SMAC_SEL_W 0
#define TCB_SMAC_SEL_S 24
#define TCB_SMAC_SEL_M 0xffULL
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index 47d9268a7e3c..585590520076 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -92,9 +92,6 @@ static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
static struct cxgb4_uld_info ch_ipsec_uld_info = {
.name = CHIPSEC_DRV_MODULE_NAME,
- .nrxq = MAX_ULD_QSETS,
- /* Max ntxq will be derived from fw config file*/
- .rxq_size = 1024,
.add = ch_ipsec_uld_add,
.state_change = ch_ipsec_uld_state_change,
.tx_handler = ch_ipsec_xmit,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 72bb123d53db..9e2378013642 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -575,7 +575,11 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+ u64 mask, u64 val, u8 cookie,
+ int through_l2t);
int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
+void chtls_set_quiesce_ctrl(struct sock *sk, int val);
void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
unsigned int keyid_to_addr(int start_addr, int keyid);
void free_tls_keyid(struct sock *sk);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index a0e0d8a83681..19dc7dc054a2 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -32,6 +32,7 @@
#include "chtls.h"
#include "chtls_cm.h"
#include "clip_tbl.h"
+#include "t4_tcb.h"
/*
* State transitions and actions for close. Note that if we are in SYN_SENT
@@ -267,7 +268,9 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
if (sk->sk_state != TCP_SYN_RECV)
chtls_send_abort(sk, mode, skb);
else
- goto out;
+ chtls_set_tcb_field_rpl_skb(sk, TCB_T_FLAGS_W,
+ TCB_T_FLAGS_V(TCB_T_FLAGS_M), 0,
+ TCB_FIELD_COOKIE_TFLAG, 1);
return;
out:
@@ -621,7 +624,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
while (!skb_queue_empty(&listen_ctx->synq)) {
struct chtls_sock *csk =
- container_of((struct synq *)__skb_dequeue
+ container_of((struct synq *)skb_peek
(&listen_ctx->synq), struct chtls_sock, synq);
struct sock *child = csk->sk;
@@ -1109,6 +1112,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
+ struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct neighbour *n = NULL;
struct inet_sock *newinet;
const struct iphdr *iph;
@@ -1118,9 +1122,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct dst_entry *dst;
struct tcp_sock *tp;
struct sock *newsk;
+ bool found = false;
u16 port_id;
int rxq_idx;
- int step;
+ int step, i;
iph = (const struct iphdr *)network_hdr;
newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@@ -1152,15 +1157,20 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
n = dst_neigh_lookup(dst, &ip6h->saddr);
#endif
}
- if (!n)
- goto free_sk;
+ if (!n || !n->dev)
+ goto free_dst;
ndev = n->dev;
- if (!ndev)
- goto free_dst;
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
+ for_each_port(adap, i)
+ if (cdev->ports[i] == ndev)
+ found = true;
+
+ if (!found)
+ goto free_dst;
+
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
@@ -1238,6 +1248,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
+ if (n)
+ neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
@@ -1387,7 +1399,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
- goto free_oreq;
+ goto reject;
if (chtls_get_module(newsk))
goto reject;
@@ -1403,8 +1415,6 @@ static void chtls_pass_accept_request(struct sock *sk,
kfree_skb(skb);
return;
-free_oreq:
- chtls_reqsk_free(oreq);
reject:
mk_tid_release(reply_skb, 0, tid);
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
@@ -1589,6 +1599,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
sk_wake_async(sk, 0, POLL_OUT);
data = lookup_stid(cdev->tids, stid);
+ if (!data) {
+ /* listening server close */
+ kfree_skb(skb);
+ goto unlock;
+ }
lsk = ((struct listen_ctx *)data)->lsk;
bh_lock_sock(lsk);
@@ -1936,6 +1951,8 @@ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
else if (tcp_sk(sk)->linger2 < 0 &&
!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
chtls_abort_conn(sk, skb);
+ else if (csk_flag_nochk(csk, CSK_TX_DATA_SENT))
+ chtls_set_quiesce_ctrl(sk, 0);
break;
default:
pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
@@ -1997,39 +2014,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
spin_unlock_bh(&cdev->deferq.lock);
}
-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
- struct chtls_dev *cdev, int status, int queue)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct sk_buff *reply_skb;
- struct chtls_sock *csk;
-
- csk = rcu_dereference_sk_user_data(sk);
-
- reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
- GFP_KERNEL);
-
- if (!reply_skb) {
- req->status = (queue << 1);
- t4_defer_reply(skb, cdev, send_defer_abort_rpl);
- return;
- }
-
- set_abort_rpl_wr(reply_skb, GET_TID(req), status);
- kfree_skb(skb);
-
- set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
- if (csk_conn_inline(csk)) {
- struct l2t_entry *e = csk->l2t_entry;
-
- if (e && sk->sk_state != TCP_SYN_RECV) {
- cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
- return;
- }
- }
- cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2078,9 +2062,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
queue = csk->txq_idx;
skb->sk = NULL;
+ chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+ CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(child, lsk);
- send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
- CPL_ABORT_NO_RST, queue);
}
static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@@ -2110,8 +2094,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
if (!sock_owned_by_user(psk)) {
int queue = csk->txq_idx;
+ chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(sk, psk);
- send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
} else {
skb->sk = sk;
BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@@ -2129,9 +2113,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
int queue = csk->txq_idx;
if (is_neg_adv(req->status)) {
- if (sk->sk_state == TCP_SYN_RECV)
- chtls_set_tcb_tflag(sk, 0, 0);
-
kfree_skb(skb);
return;
}
@@ -2158,12 +2139,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return;
- chtls_release_resources(sk);
- chtls_conn_done(sk);
}
chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
rst_status, queue);
+ chtls_release_resources(sk);
+ chtls_conn_done(sk);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
@@ -2315,6 +2296,28 @@ static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
+static int chtls_set_tcb_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_set_tcb_rpl *rpl = cplhdr(skb) + RSS_HDR;
+ unsigned int hwtid = GET_TID(rpl);
+ struct sock *sk;
+
+ sk = lookup_tid(cdev->tids, hwtid);
+
+ /* return EINVAL if socket doesn't exist */
+ if (!sk)
+ return -EINVAL;
+
+ /* Reusing the skb as size of cpl_set_tcb_field structure
+ * is greater than cpl_abort_req
+ */
+ if (TCB_COOKIE_G(rpl->cookie) == TCB_FIELD_COOKIE_TFLAG)
+ chtls_send_abort(sk, CPL_ABORT_SEND_RST, NULL);
+
+ kfree_skb(skb);
+ return 0;
+}
+
chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
[CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl,
[CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
@@ -2327,5 +2330,6 @@ chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = chtls_conn_cpl,
[CPL_ABORT_REQ_RSS] = chtls_conn_cpl,
[CPL_ABORT_RPL_RSS] = chtls_conn_cpl,
- [CPL_FW4_ACK] = chtls_wr_ack,
+ [CPL_FW4_ACK] = chtls_wr_ack,
+ [CPL_SET_TCB_RPL] = chtls_set_tcb_rpl,
};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index a4fb463af22a..1e67140b0f80 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -88,6 +88,24 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
return ret < 0 ? ret : 0;
}
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+ u64 mask, u64 val, u8 cookie,
+ int through_l2t)
+{
+ struct sk_buff *skb;
+ unsigned int wrlen;
+
+ wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+ wrlen = roundup(wrlen, 16);
+
+ skb = alloc_skb(wrlen, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return;
+
+ __set_tcb_field(sk, skb, word, mask, val, cookie, 0);
+ send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
+}
+
/*
* Set one of the t_flags bits in the TCB.
*/
@@ -113,6 +131,29 @@ static int chtls_set_tcb_quiesce(struct sock *sk, int val)
TF_RX_QUIESCE_V(val));
}
+void chtls_set_quiesce_ctrl(struct sock *sk, int val)
+{
+ struct chtls_sock *csk;
+ struct sk_buff *skb;
+ unsigned int wrlen;
+ int ret;
+
+ wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+ wrlen = roundup(wrlen, 16);
+
+ skb = alloc_skb(wrlen, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ csk = rcu_dereference_sk_user_data(sk);
+
+ __set_tcb_field(sk, skb, 1, TF_RX_QUIESCE_V(1), 0, 0, 1);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+ ret = cxgb4_ofld_send(csk->egress_dev, skb);
+ if (ret < 0)
+ kfree_skb(skb);
+}
+
/* TLS Key bitmap processing */
int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
{
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index fb269d587b74..f04ec53544ae 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2509,8 +2509,6 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = enic_features_check,
};
@@ -2535,8 +2533,6 @@ static const struct net_device_ops enic_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = enic_features_check,
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d402d83d9edd..b6eba29d8e99 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5179,8 +5179,6 @@ static const struct net_device_ops be_netdev_ops = {
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = be_features_check,
.ndo_get_phys_port_id = be_get_phys_port_id,
};
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 0981fe9652e5..3d9b0b161e24 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1211,7 +1211,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free2;
+ goto free3;
}
ret = ethoc_mdio_probe(netdev);
@@ -1243,6 +1243,7 @@ error2:
netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
+free3:
mdiobus_free(priv->mdio);
free2:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 4360ce4d3fb6..d8e568f6caf3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2532,12 +2532,10 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
return XDP_PASS;
}
- xdp.data = vaddr + fd_off;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + qm_fd_get_length(fd);
- xdp.frame_sz = DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE;
- xdp.rxq = &dpaa_fq->xdp_rxq;
+ xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
+ &dpaa_fq->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
+ XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
/* We reserve a fixed headroom of 256 bytes under the erratum and we
* offer it all to XDP programs to use. If no room is left for the
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index fb0bcd18ec0c..41e225baf571 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -350,7 +350,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
u32 xdp_act = XDP_PASS;
- int err;
+ int err, offset;
rcu_read_lock();
@@ -358,14 +358,10 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
if (!xdp_prog)
goto out;
- xdp.data = vaddr + dpaa2_fd_get_offset(fd);
- xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp_set_data_meta_invalid(&xdp);
- xdp.rxq = &ch->xdp_rxq;
-
- xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
- (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
+ offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
+ dpaa2_fd_get_len(fd), false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -1262,6 +1258,22 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
+static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
+ bool enable)
+{
+ int err;
+
+ err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
+
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_enable_vlan_filter failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1691,7 +1703,7 @@ static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
*/
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
goto out;
/* Chech link state; speed / duplex changes are not treated yet */
@@ -1730,7 +1742,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
priv->dpbp_dev->obj_desc.id, priv->bpid);
}
- if (!priv->mac) {
+ if (!dpaa2_eth_is_type_phy(priv)) {
/* We'll only start the txqs when the link is actually ready;
* make sure we don't race against the link up notification,
* which may come immediately after dpni_enable();
@@ -1752,7 +1764,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
phylink_start(priv->mac->phylink);
return 0;
@@ -1826,11 +1838,11 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
- if (!priv->mac) {
+ if (dpaa2_eth_is_type_phy(priv)) {
+ phylink_stop(priv->mac->phylink);
+ } else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
- } else {
- phylink_stop(priv->mac->phylink);
}
/* On dpni_disable(), the MC firmware will:
@@ -1952,6 +1964,43 @@ static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
}
}
+static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
+ vid, 0, 0, 0);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not add the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not remove the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -2058,6 +2107,13 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
bool enable;
int err;
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
+ if (err)
+ return err;
+ }
+
if (changed & NETIF_F_RXCSUM) {
enable = !!(features & NETIF_F_RXCSUM);
err = dpaa2_eth_set_rx_csum(priv, enable);
@@ -2115,7 +2171,7 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (cmd == SIOCSHWTSTAMP)
return dpaa2_eth_ts_ioctl(dev, rq, cmd);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
return -EOPNOTSUPP;
@@ -2507,6 +2563,8 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
.ndo_setup_tc = dpaa2_eth_setup_tc,
+ .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -4015,6 +4073,9 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
NETIF_F_LLTX | NETIF_F_HW_TC;
net_dev->hw_features = net_dev->features;
+ if (priv->dpni_attrs.vlan_filter_entries)
+ net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
return 0;
}
@@ -4042,10 +4103,11 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
- if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
- return 0;
- if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ return PTR_ERR(dpmac_dev);
+
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
return 0;
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
@@ -4056,23 +4118,38 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
mac->mc_io = priv->mc_io;
mac->net_dev = priv->net_dev;
- err = dpaa2_mac_connect(mac);
- if (err) {
- netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
- kfree(mac);
- return err;
- }
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
priv->mac = mac;
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
+ goto err_close_mac;
+ }
+ }
+
return 0;
+
+err_close_mac:
+ dpaa2_mac_close(mac);
+ priv->mac = NULL;
+err_free_mac:
+ kfree(mac);
+ return err;
}
static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
{
- if (!priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
+ dpaa2_mac_disconnect(priv->mac);
+
+ if (!dpaa2_eth_has_mac(priv))
return;
- dpaa2_mac_disconnect(priv->mac);
+ dpaa2_mac_close(priv->mac);
kfree(priv->mac);
priv->mac = NULL;
}
@@ -4101,7 +4178,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
dpaa2_eth_update_tx_fqids(priv);
rtnl_lock();
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_eth_disconnect_mac(priv);
else
dpaa2_eth_connect_mac(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index d236b8695c39..c3d456c45102 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -693,6 +693,19 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
}
+static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
+{
+ if (priv->mac && priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY)
+ return true;
+
+ return false;
+}
+
+static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
+{
+ return priv->mac ? true : false;
+}
+
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
int dpaa2_eth_cls_key_size(u64 key);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index f981a523e13a..bf59708b869e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -85,7 +85,7 @@ static int dpaa2_eth_nway_reset(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_nway_reset(priv->mac->phylink);
return -EOPNOTSUPP;
@@ -97,7 +97,7 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_ksettings_get(priv->mac->phylink,
link_settings);
@@ -115,7 +115,7 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (!priv->mac)
+ if (!dpaa2_eth_is_type_phy(priv))
return -ENOTSUPP;
return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
@@ -127,7 +127,7 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
- if (priv->mac) {
+ if (dpaa2_eth_is_type_phy(priv)) {
phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
return;
}
@@ -150,7 +150,7 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_set_pauseparam(priv->mac->phylink,
pause);
if (pause->autoneg)
@@ -198,7 +198,7 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_strings(p);
break;
}
@@ -211,7 +211,7 @@ static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
num_ss_stats += dpaa2_mac_get_sset_count();
return num_ss_stats;
default:
@@ -313,7 +313,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
*(data + i++) = buf_cnt;
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 828c177df03d..69ad869446cf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -174,30 +174,22 @@ static void dpaa2_mac_link_up(struct phylink_config *config,
dpmac_state->up = 1;
- if (mac->if_link_type == DPMAC_LINK_TYPE_PHY) {
- /* If the DPMAC is configured for PHY mode, we need
- * to pass the link parameters to the MC firmware.
- */
- dpmac_state->rate = speed;
-
- if (duplex == DUPLEX_HALF)
- dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
- else if (duplex == DUPLEX_FULL)
- dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
-
- /* This is lossy; the firmware really should take the pause
- * enablement status rather than pause/asym pause status.
- */
- if (rx_pause)
- dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
-
- if (rx_pause ^ tx_pause)
- dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
- }
+ dpmac_state->rate = speed;
+
+ if (duplex == DUPLEX_HALF)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else if (duplex == DUPLEX_FULL)
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+
+ if (rx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (rx_pause ^ tx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
err = dpmac_set_link_state(mac->mc_io, 0,
mac->mc_dev->mc_handle, dpmac_state);
@@ -228,32 +220,6 @@ static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
.mac_link_down = dpaa2_mac_link_down,
};
-bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
- struct fsl_mc_io *mc_io)
-{
- struct dpmac_attr attr;
- bool fixed = false;
- u16 mc_handle = 0;
- int err;
-
- err = dpmac_open(mc_io, 0, dpmac_dev->obj_desc.id,
- &mc_handle);
- if (err || !mc_handle)
- return false;
-
- err = dpmac_get_attributes(mc_io, 0, mc_handle, &attr);
- if (err)
- goto out;
-
- if (attr.link_type == DPMAC_LINK_TYPE_FIXED)
- fixed = true;
-
-out:
- dpmac_close(mc_io, 0, mc_handle);
-
- return fixed;
-}
-
static int dpaa2_pcs_create(struct dpaa2_mac *mac,
struct device_node *dpmac_node, int id)
{
@@ -302,36 +268,20 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
- struct fsl_mc_device *dpmac_dev = mac->mc_dev;
struct net_device *net_dev = mac->net_dev;
struct device_node *dpmac_node;
struct phylink *phylink;
- struct dpmac_attr attr;
int err;
- err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
- &dpmac_dev->mc_handle);
- if (err || !dpmac_dev->mc_handle) {
- netdev_err(net_dev, "dpmac_open() = %d\n", err);
- return -ENODEV;
- }
-
- err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle, &attr);
- if (err) {
- netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
- goto err_close_dpmac;
- }
+ mac->if_link_type = mac->attr.link_type;
- mac->if_link_type = attr.link_type;
-
- dpmac_node = dpaa2_mac_get_node(attr.id);
+ dpmac_node = dpaa2_mac_get_node(mac->attr.id);
if (!dpmac_node) {
- netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
- err = -ENODEV;
- goto err_close_dpmac;
+ netdev_err(net_dev, "No dpmac@%d node found.\n", mac->attr.id);
+ return -ENODEV;
}
- err = dpaa2_mac_get_if_mode(dpmac_node, attr);
+ err = dpaa2_mac_get_if_mode(dpmac_node, mac->attr);
if (err < 0) {
err = -EINVAL;
goto err_put_node;
@@ -351,9 +301,9 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
goto err_put_node;
}
- if (attr.link_type == DPMAC_LINK_TYPE_PHY &&
- attr.eth_if != DPMAC_ETH_IF_RGMII) {
- err = dpaa2_pcs_create(mac, dpmac_node, attr.id);
+ if (mac->attr.link_type == DPMAC_LINK_TYPE_PHY &&
+ mac->attr.eth_if != DPMAC_ETH_IF_RGMII) {
+ err = dpaa2_pcs_create(mac, dpmac_node, mac->attr.id);
if (err)
goto err_put_node;
}
@@ -389,8 +339,7 @@ err_pcs_destroy:
dpaa2_pcs_destroy(mac);
err_put_node:
of_node_put(dpmac_node);
-err_close_dpmac:
- dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+
return err;
}
@@ -402,8 +351,40 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
+}
+
+int dpaa2_mac_open(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ struct net_device *net_dev = mac->net_dev;
+ int err;
- dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
+ err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+ &dpmac_dev->mc_handle);
+ if (err || !dpmac_dev->mc_handle) {
+ netdev_err(net_dev, "dpmac_open() = %d\n", err);
+ return -ENODEV;
+ }
+
+ err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle,
+ &mac->attr);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ return 0;
+
+err_close_dpmac:
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ return err;
+}
+
+void dpaa2_mac_close(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
}
static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 955a52856210..13d42dd58ec9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -17,6 +17,7 @@ struct dpaa2_mac {
struct dpmac_link_state state;
struct net_device *net_dev;
struct fsl_mc_io *mc_io;
+ struct dpmac_attr attr;
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -28,6 +29,10 @@ struct dpaa2_mac {
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
struct fsl_mc_io *mc_io);
+int dpaa2_mac_open(struct dpaa2_mac *mac);
+
+void dpaa2_mac_close(struct dpaa2_mac *mac);
+
int dpaa2_mac_connect(struct dpaa2_mac *mac);
void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 90453dc7baef..9f80bdfeedec 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -62,6 +62,10 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230)
+#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD_V2(0x231)
+#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232)
+
#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
@@ -662,4 +666,17 @@ struct dpni_rsp_single_step_cfg {
__le32 peer_delay;
};
+struct dpni_cmd_enable_vlan_filter {
+ /* only the LSB */
+ u8 en;
+};
+
+struct dpni_cmd_vlan_id {
+ u8 flags;
+ u8 tc_id;
+ u8 flow_id;
+ u8 pad;
+ __le16 vlan_id;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 6ea7db66a632..aa429c17c343 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1225,6 +1225,99 @@ int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 en)
+{
+ struct dpni_cmd_enable_vlan_filter *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params;
+ dpni_set_field(cmd_params->en, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ * @flags: 0 - tc_id and flow_id will be ignored.
+ * Pkt with this vlan_id will be passed to the next
+ * classification stages
+ * DPNI_VLAN_SET_QUEUE_ACTION
+ * Pkt with this vlan_id will be forward directly to
+ * queue defined by the tc_id and flow_id
+ *
+ * @tc_id: Traffic class selection (0-7)
+ * @flow_id: Selects the specific queue out of the set allocated for the
+ * same as tc_id. Value must be in range 0 to NUM_QUEUES - 1
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->flags = flags;
+ cmd_params->tc_id = tc_id;
+ cmd_params->flow_id = flow_id;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpni_add_mac_addr() - Add MAC address filter
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index e7b9e195b534..4e96d9362dd2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1114,4 +1114,13 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
u16 token,
struct dpni_single_step_cfg *ptp_cfg);
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u32 en);
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id);
+
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index ee0116ed4738..70e6d97b380f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -14,23 +14,6 @@
#define ENETC_MDIO_DATA 0x8 /* MDIO data */
#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
-{
- return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
-}
-
-static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
- u32 val)
-{
- enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
-}
-
-#define enetc_mdio_rd(mdio_priv, off) \
- _enetc_mdio_rd(mdio_priv, ENETC_##off)
-#define enetc_mdio_wr(mdio_priv, off, val) \
- _enetc_mdio_wr(mdio_priv, ENETC_##off, val)
-#define enetc_mdio_rd_reg(off) enetc_mdio_rd(mdio_priv, off)
-
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
#define MDIO_CFG_BSY BIT(0)
#define MDIO_CFG_RD_ER BIT(1)
@@ -47,15 +30,29 @@ static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5)
#define MDIO_CTL_READ BIT(15)
-#define MDIO_DATA(x) ((x) & 0xffff)
-#define TIMEOUT 1000
+static inline u32 enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
+{
+ return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
+}
+
+static inline void enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
+ u32 val)
+{
+ enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+}
+
+static bool enetc_mdio_is_busy(struct enetc_mdio_priv *mdio_priv)
+{
+ return enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_BSY;
+}
+
static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
{
- u32 val;
+ bool is_busy;
- return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val,
- !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
+ return readx_poll_timeout(enetc_mdio_is_busy, mdio_priv,
+ is_busy, !is_busy, 10, 10 * 1000);
}
int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
@@ -75,7 +72,7 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -83,11 +80,11 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -95,7 +92,7 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
}
/* write the value */
- enetc_mdio_wr(mdio_priv, MDIO_DATA, MDIO_DATA(value));
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -121,7 +118,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -129,11 +126,11 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -141,21 +138,21 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
}
/* initiate the read */
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_mdio_rd(mdio_priv, MDIO_CFG) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_mdio_rd(mdio_priv, MDIO_DATA) & 0xffff;
+ value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff;
return value;
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c527f4ee1d3a..0602d5d5d2ee 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -462,6 +462,11 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+/* Some link partners do not tolerate the momentary reset of the REF_CLK
+ * frequency when the RNCTL register is cleared by hardware reset.
+ */
+#define FEC_QUIRK_NO_HARD_RESET (1 << 18)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 04f24c66cf36..9ebdb0e54291 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -100,7 +100,8 @@ static const struct fec_devinfo fec_imx27_info = {
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_NO_HARD_RESET,
};
static const struct fec_devinfo fec_imx6q_info = {
@@ -953,7 +954,8 @@ fec_restart(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
@@ -2165,9 +2167,9 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->parent = &pdev->dev;
err = of_mdiobus_register(fep->mii_bus, node);
- of_node_put(node);
if (err)
goto err_out_free_mdiobus;
+ of_node_put(node);
mii_cnt++;
@@ -2180,6 +2182,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
err_out_free_mdiobus:
mdiobus_free(fep->mii_bus);
err_out:
+ of_node_put(node);
return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index bb9887f98841..62f42921933d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -111,6 +111,7 @@ do { \
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
+#define IF_MODE_MII 0x00000001 /* 30-31 MII interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
@@ -442,6 +443,9 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
case PHY_INTERFACE_MODE_XGMII:
tmp |= IF_MODE_10G;
break;
+ case PHY_INTERFACE_MODE_MII:
+ tmp |= IF_MODE_MII;
+ break;
default:
tmp |= IF_MODE_GMII;
if (phy_if == PHY_INTERFACE_MODE_RGMII ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index c8e5d889bd81..21de56345503 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
};
module_platform_driver(fs_enet_bb_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 8b51ee142fa3..152f4d83765a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
};
module_platform_driver(fs_enet_fec_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d391a45cebb6..541de32ea662 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -58,7 +58,6 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ba8869c3d891..ef4e2febeb5b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -70,9 +70,32 @@ static struct {
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
-static struct ucc_geth_info ugeth_primary_info = {
+static int ucc_geth_thread_count(enum ucc_geth_num_of_threads idx)
+{
+ static const u8 count[] = {
+ [UCC_GETH_NUM_OF_THREADS_1] = 1,
+ [UCC_GETH_NUM_OF_THREADS_2] = 2,
+ [UCC_GETH_NUM_OF_THREADS_4] = 4,
+ [UCC_GETH_NUM_OF_THREADS_6] = 6,
+ [UCC_GETH_NUM_OF_THREADS_8] = 8,
+ };
+ if (idx >= ARRAY_SIZE(count))
+ return 0;
+ return count[idx];
+}
+
+static inline int ucc_geth_tx_queues(const struct ucc_geth_info *info)
+{
+ return 1;
+}
+
+static inline int ucc_geth_rx_queues(const struct ucc_geth_info *info)
+{
+ return 1;
+}
+
+static const struct ucc_geth_info ugeth_primary_info = {
.uf_info = {
- .bd_mem_part = MEM_PART_SYSTEM,
.rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
.max_rx_buf_length = 1536,
/* adjusted at startup if max-speed 1000 */
@@ -90,8 +113,6 @@ static struct ucc_geth_info ugeth_primary_info = {
.tcrc = UCC_FAST_16_BIT_CRC,
.synl = UCC_FAST_SYNC_LEN_NOT_USED,
},
- .numQueuesTx = 1,
- .numQueuesRx = 1,
.extendedFilteringChainPointer = ((uint32_t) NULL),
.typeorlen = 3072 /*1536 */ ,
.nonBackToBackIfgPart1 = 0x40,
@@ -157,8 +178,6 @@ static struct ucc_geth_info ugeth_primary_info = {
.riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
};
-static struct ucc_geth_info ugeth_info[8];
-
#ifdef DEBUG
static void mem_disp(u8 *addr, int size)
{
@@ -558,7 +577,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
int i;
int length;
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
if (ugeth->p_tx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenTx[i] *
@@ -567,7 +586,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
mem_disp(ugeth->p_tx_bd_ring[i], length);
}
}
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
if (ugeth->p_rx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenRx[i] *
@@ -671,32 +690,12 @@ static void dump_regs(struct ucc_geth_private *ugeth)
in_be32(&ugeth->ug_regs->scam));
if (ugeth->p_thread_data_tx) {
- int numThreadsTxNumerical;
- switch (ugeth->ug_info->numThreadsTx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsTxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsTxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsTxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsTxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsTxNumerical = 8;
- break;
- default:
- numThreadsTxNumerical = 0;
- break;
- }
+ int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsTx);
pr_info("Thread data TXs:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_thread_data_tx);
- for (i = 0; i < numThreadsTxNumerical; i++) {
+ for (i = 0; i < count; i++) {
pr_info("Thread data TX[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_thread_data_tx[i]);
@@ -705,32 +704,12 @@ static void dump_regs(struct ucc_geth_private *ugeth)
}
}
if (ugeth->p_thread_data_rx) {
- int numThreadsRxNumerical;
- switch (ugeth->ug_info->numThreadsRx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsRxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsRxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsRxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsRxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsRxNumerical = 8;
- break;
- default:
- numThreadsRxNumerical = 0;
- break;
- }
+ int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsRx);
pr_info("Thread data RX:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_thread_data_rx);
- for (i = 0; i < numThreadsRxNumerical; i++) {
+ for (i = 0; i < count; i++) {
pr_info("Thread data RX[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_thread_data_rx[i]);
@@ -905,7 +884,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
if (ugeth->p_send_q_mem_reg) {
pr_info("Send Q memory registers:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
pr_info("SQQD[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
@@ -937,7 +916,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
pr_info("RX IRQ coalescing tables:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_rx_irq_coalescing_tbl);
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
pr_info("RX IRQ coalescing table entry[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_rx_irq_coalescing_tbl->
@@ -959,7 +938,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
if (ugeth->p_rx_bd_qs_tbl) {
pr_info("RX BD QS tables:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
pr_info("RX BD QS table[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_rx_bd_qs_tbl[i]);
@@ -1835,7 +1814,7 @@ static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
if (ugeth->p_rx_bd_ring[i]) {
/* Return existing data buffers in ring */
bd = ugeth->p_rx_bd_ring[i];
@@ -1856,12 +1835,7 @@ static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
kfree(ugeth->rx_skbuff[i]);
- if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_SYSTEM)
- kfree((void *)ugeth->rx_bd_ring_offset[i]);
- else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM)
- qe_muram_free(ugeth->rx_bd_ring_offset[i]);
+ kfree(ugeth->p_rx_bd_ring[i]);
ugeth->p_rx_bd_ring[i] = NULL;
}
}
@@ -1880,7 +1854,7 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
bd = ugeth->p_tx_bd_ring[i];
if (!bd)
continue;
@@ -1898,15 +1872,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
kfree(ugeth->tx_skbuff[i]);
- if (ugeth->p_tx_bd_ring[i]) {
- if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_SYSTEM)
- kfree((void *)ugeth->tx_bd_ring_offset[i]);
- else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM)
- qe_muram_free(ugeth->tx_bd_ring_offset[i]);
- ugeth->p_tx_bd_ring[i] = NULL;
- }
+ kfree(ugeth->p_tx_bd_ring[i]);
+ ugeth->p_tx_bd_ring[i] = NULL;
}
}
@@ -1921,50 +1888,39 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
ugeth->uccf = NULL;
}
- if (ugeth->p_thread_data_tx) {
- qe_muram_free(ugeth->thread_dat_tx_offset);
- ugeth->p_thread_data_tx = NULL;
- }
- if (ugeth->p_thread_data_rx) {
- qe_muram_free(ugeth->thread_dat_rx_offset);
- ugeth->p_thread_data_rx = NULL;
- }
- if (ugeth->p_exf_glbl_param) {
- qe_muram_free(ugeth->exf_glbl_param_offset);
- ugeth->p_exf_glbl_param = NULL;
- }
- if (ugeth->p_rx_glbl_pram) {
- qe_muram_free(ugeth->rx_glbl_pram_offset);
- ugeth->p_rx_glbl_pram = NULL;
- }
- if (ugeth->p_tx_glbl_pram) {
- qe_muram_free(ugeth->tx_glbl_pram_offset);
- ugeth->p_tx_glbl_pram = NULL;
- }
- if (ugeth->p_send_q_mem_reg) {
- qe_muram_free(ugeth->send_q_mem_reg_offset);
- ugeth->p_send_q_mem_reg = NULL;
- }
- if (ugeth->p_scheduler) {
- qe_muram_free(ugeth->scheduler_offset);
- ugeth->p_scheduler = NULL;
- }
- if (ugeth->p_tx_fw_statistics_pram) {
- qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
- ugeth->p_tx_fw_statistics_pram = NULL;
- }
- if (ugeth->p_rx_fw_statistics_pram) {
- qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
- ugeth->p_rx_fw_statistics_pram = NULL;
- }
- if (ugeth->p_rx_irq_coalescing_tbl) {
- qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
- ugeth->p_rx_irq_coalescing_tbl = NULL;
- }
- if (ugeth->p_rx_bd_qs_tbl) {
- qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
- ugeth->p_rx_bd_qs_tbl = NULL;
- }
+ qe_muram_free_addr(ugeth->p_thread_data_tx);
+ ugeth->p_thread_data_tx = NULL;
+
+ qe_muram_free_addr(ugeth->p_thread_data_rx);
+ ugeth->p_thread_data_rx = NULL;
+
+ qe_muram_free_addr(ugeth->p_exf_glbl_param);
+ ugeth->p_exf_glbl_param = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_glbl_pram);
+ ugeth->p_rx_glbl_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_tx_glbl_pram);
+ ugeth->p_tx_glbl_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_send_q_mem_reg);
+ ugeth->p_send_q_mem_reg = NULL;
+
+ qe_muram_free_addr(ugeth->p_scheduler);
+ ugeth->p_scheduler = NULL;
+
+ qe_muram_free_addr(ugeth->p_tx_fw_statistics_pram);
+ ugeth->p_tx_fw_statistics_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_fw_statistics_pram);
+ ugeth->p_rx_fw_statistics_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_irq_coalescing_tbl);
+ ugeth->p_rx_irq_coalescing_tbl = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_bd_qs_tbl);
+ ugeth->p_rx_bd_qs_tbl = NULL;
+
if (ugeth->p_init_enet_param_shadow) {
return_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
@@ -2073,15 +2029,8 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
- (uf_info->bd_mem_part == MEM_PART_MURAM))) {
- if (netif_msg_probe(ugeth))
- pr_err("Bad memory partition value\n");
- return -EINVAL;
- }
-
/* Rx BD lengths */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
(ug_info->bdRingLenRx[i] %
UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
@@ -2092,7 +2041,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
}
/* Tx BD lengths */
- for (i = 0; i < ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
if (netif_msg_probe(ugeth))
pr_err("Tx BD ring length must be no smaller than 2\n");
@@ -2109,14 +2058,14 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
}
/* num Tx queues */
- if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
+ if (ucc_geth_tx_queues(ug_info) > NUM_TX_QUEUES) {
if (netif_msg_probe(ugeth))
pr_err("number of tx queues too large\n");
return -EINVAL;
}
/* num Rx queues */
- if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
+ if (ucc_geth_rx_queues(ug_info) > NUM_RX_QUEUES) {
if (netif_msg_probe(ugeth))
pr_err("number of rx queues too large\n");
return -EINVAL;
@@ -2124,7 +2073,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* l2qt */
for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
- if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
+ if (ug_info->l2qt[i] >= ucc_geth_rx_queues(ug_info)) {
if (netif_msg_probe(ugeth))
pr_err("VLAN priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
@@ -2133,7 +2082,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* l3qt */
for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
- if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
+ if (ug_info->l3qt[i] >= ucc_geth_rx_queues(ug_info)) {
if (netif_msg_probe(ugeth))
pr_err("IP priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
@@ -2156,10 +2105,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* Generate uccm_mask for receive */
uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
- for (i = 0; i < ug_info->numQueuesRx; i++)
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
- for (i = 0; i < ug_info->numQueuesTx; i++)
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
/* Initialize the general fast UCC block. */
if (ucc_fast_init(uf_info, &ugeth->uccf)) {
@@ -2198,53 +2147,32 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
uf_info = &ug_info->uf_info;
/* Allocate Tx bds */
- for (j = 0; j < ug_info->numQueuesTx; j++) {
- /* Allocate in multiple of
- UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
- according to spec */
- length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
- / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
- * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
- UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
- length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
- u32 align = 4;
- if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
- align = UCC_GETH_TX_BD_RING_ALIGNMENT;
- ugeth->tx_bd_ring_offset[j] =
- (u32) kmalloc((u32) (length + align), GFP_KERNEL);
-
- if (ugeth->tx_bd_ring_offset[j] != 0)
- ugeth->p_tx_bd_ring[j] =
- (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
- align) & ~(align - 1));
- } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
- ugeth->tx_bd_ring_offset[j] =
- qe_muram_alloc(length,
- UCC_GETH_TX_BD_RING_ALIGNMENT);
- if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
- ugeth->p_tx_bd_ring[j] =
- (u8 __iomem *) qe_muram_addr(ugeth->
- tx_bd_ring_offset[j]);
- }
+ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
+ u32 align = max(UCC_GETH_TX_BD_RING_ALIGNMENT,
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT);
+ u32 alloc;
+
+ length = ug_info->bdRingLenTx[j] * sizeof(struct qe_bd);
+ alloc = round_up(length, align);
+ alloc = roundup_pow_of_two(alloc);
+
+ ugeth->p_tx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
+
if (!ugeth->p_tx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Tx bd rings\n");
return -ENOMEM;
}
/* Zero unused end of bd ring, according to spec */
- memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
- ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
- length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
+ memset(ugeth->p_tx_bd_ring[j] + length, 0, alloc - length);
}
/* Init Tx bds */
- for (j = 0; j < ug_info->numQueuesTx; j++) {
+ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
/* Setup the skbuff rings */
ugeth->tx_skbuff[j] =
- kmalloc_array(ugeth->ug_info->bdRingLenTx[j],
- sizeof(struct sk_buff *), GFP_KERNEL);
+ kcalloc(ugeth->ug_info->bdRingLenTx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
if (ugeth->tx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
@@ -2252,9 +2180,6 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
- ugeth->tx_skbuff[j][i] = NULL;
-
ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
@@ -2284,27 +2209,15 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
uf_info = &ug_info->uf_info;
/* Allocate Rx bds */
- for (j = 0; j < ug_info->numQueuesRx; j++) {
+ for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
+ u32 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
+ u32 alloc;
+
length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
- if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
- u32 align = 4;
- if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
- align = UCC_GETH_RX_BD_RING_ALIGNMENT;
- ugeth->rx_bd_ring_offset[j] =
- (u32) kmalloc((u32) (length + align), GFP_KERNEL);
- if (ugeth->rx_bd_ring_offset[j] != 0)
- ugeth->p_rx_bd_ring[j] =
- (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
- align) & ~(align - 1));
- } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
- ugeth->rx_bd_ring_offset[j] =
- qe_muram_alloc(length,
- UCC_GETH_RX_BD_RING_ALIGNMENT);
- if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
- ugeth->p_rx_bd_ring[j] =
- (u8 __iomem *) qe_muram_addr(ugeth->
- rx_bd_ring_offset[j]);
- }
+ alloc = round_up(length, align);
+ alloc = roundup_pow_of_two(alloc);
+
+ ugeth->p_rx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
if (!ugeth->p_rx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Rx bd rings\n");
@@ -2313,11 +2226,11 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
}
/* Init Rx bds */
- for (j = 0; j < ug_info->numQueuesRx; j++) {
+ for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
/* Setup the skbuff rings */
ugeth->rx_skbuff[j] =
- kmalloc_array(ugeth->ug_info->bdRingLenRx[j],
- sizeof(struct sk_buff *), GFP_KERNEL);
+ kcalloc(ugeth->ug_info->bdRingLenRx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
if (ugeth->rx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
@@ -2325,9 +2238,6 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
- ugeth->rx_skbuff[j][i] = NULL;
-
ugeth->skb_currx[j] = 0;
bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
@@ -2359,10 +2269,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
u32 init_enet_pram_offset, cecr_subblock, command;
u32 ifstat, i, j, size, l2qt, l3qt;
u16 temoder = UCC_GETH_TEMODER_INIT;
- u16 test;
u8 function_code = 0;
u8 __iomem *endOfRing;
u8 numThreadsRxNumerical, numThreadsTxNumerical;
+ s32 rx_glbl_pram_offset, tx_glbl_pram_offset;
ugeth_vdbg("%s: IN", __func__);
uccf = ugeth->uccf;
@@ -2371,45 +2281,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
uf_regs = uccf->uf_regs;
ug_regs = ugeth->ug_regs;
- switch (ug_info->numThreadsRx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsRxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsRxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsRxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsRxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsRxNumerical = 8;
- break;
- default:
+ numThreadsRxNumerical = ucc_geth_thread_count(ug_info->numThreadsRx);
+ if (!numThreadsRxNumerical) {
if (netif_msg_ifup(ugeth))
pr_err("Bad number of Rx threads value\n");
return -EINVAL;
}
- switch (ug_info->numThreadsTx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsTxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsTxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsTxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsTxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsTxNumerical = 8;
- break;
- default:
+ numThreadsTxNumerical = ucc_geth_thread_count(ug_info->numThreadsTx);
+ if (!numThreadsTxNumerical) {
if (netif_msg_ifup(ugeth))
pr_err("Bad number of Tx threads value\n");
return -EINVAL;
@@ -2507,20 +2387,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
*/
/* Tx global PRAM */
/* Allocate global tx parameter RAM page */
- ugeth->tx_glbl_pram_offset =
+ tx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
- if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
+ if (tx_glbl_pram_offset < 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n");
return -ENOMEM;
}
- ugeth->p_tx_glbl_pram =
- (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
- tx_glbl_pram_offset);
- /* Zero out p_tx_glbl_pram */
- memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
-
+ ugeth->p_tx_glbl_pram = qe_muram_addr(tx_glbl_pram_offset);
/* Fill global PRAM */
/* TQPTR */
@@ -2554,7 +2429,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* SQPTR */
/* Size varies with number of Tx queues */
ugeth->send_q_mem_reg_offset =
- qe_muram_alloc(ug_info->numQueuesTx *
+ qe_muram_alloc(ucc_geth_tx_queues(ug_info) *
sizeof(struct ucc_geth_send_queue_qd),
UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
@@ -2570,29 +2445,20 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Setup the table */
/* Assume BD rings are already established */
- for (i = 0; i < ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
endOfRing =
ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
1) * sizeof(struct qe_bd);
- if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
- (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
- last_bd_completed_address,
- (u32) virt_to_phys(endOfRing));
- } else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM) {
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
- (u32)qe_muram_dma(ugeth->p_tx_bd_ring[i]));
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
- last_bd_completed_address,
- (u32)qe_muram_dma(endOfRing));
- }
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) virt_to_phys(endOfRing));
}
/* schedulerbasepointer */
- if (ug_info->numQueuesTx > 1) {
+ if (ucc_geth_tx_queues(ug_info) > 1) {
/* scheduler exists only if more than 1 tx queue */
ugeth->scheduler_offset =
qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
@@ -2608,8 +2474,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
scheduler_offset);
out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
ugeth->scheduler_offset);
- /* Zero out p_scheduler */
- memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
/* Set values in scheduler */
out_be32(&ugeth->p_scheduler->mblinterval,
@@ -2652,23 +2516,18 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->p_tx_fw_statistics_pram =
(struct ucc_geth_tx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
- /* Zero out p_tx_fw_statistics_pram */
- memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
- 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
}
/* temoder */
/* Already has speed set */
- if (ug_info->numQueuesTx > 1)
+ if (ucc_geth_tx_queues(ug_info) > 1)
temoder |= TEMODER_SCHEDULER_ENABLE;
if (ug_info->ipCheckSumGenerate)
temoder |= TEMODER_IP_CHECKSUM_GENERATE;
- temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
+ temoder |= ((ucc_geth_tx_queues(ug_info) - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
- test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
-
/* Function code register value to be used later */
function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
/* Required for QE */
@@ -2678,20 +2537,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Rx global PRAM */
/* Allocate global rx parameter RAM page */
- ugeth->rx_glbl_pram_offset =
+ rx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
- if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
+ if (rx_glbl_pram_offset < 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n");
return -ENOMEM;
}
- ugeth->p_rx_glbl_pram =
- (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
- rx_glbl_pram_offset);
- /* Zero out p_rx_glbl_pram */
- memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
-
+ ugeth->p_rx_glbl_pram = qe_muram_addr(rx_glbl_pram_offset);
/* Fill global PRAM */
/* RQPTR */
@@ -2729,16 +2583,13 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->p_rx_fw_statistics_pram =
(struct ucc_geth_rx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
- /* Zero out p_rx_fw_statistics_pram */
- memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
- sizeof(struct ucc_geth_rx_firmware_statistics_pram));
}
/* intCoalescingPtr */
/* Size varies with number of Rx queues */
ugeth->rx_irq_coalescing_tbl_offset =
- qe_muram_alloc(ug_info->numQueuesRx *
+ qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
+ 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
@@ -2754,7 +2605,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->rx_irq_coalescing_tbl_offset);
/* Fill interrupt coalescing table */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
interruptcoalescingmaxvalue,
ug_info->interruptcoalescingmaxvalue[i]);
@@ -2803,7 +2654,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* RBDQPTR */
/* Size varies with number of Rx queues */
ugeth->rx_bd_qs_tbl_offset =
- qe_muram_alloc(ug_info->numQueuesRx *
+ qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
(sizeof(struct ucc_geth_rx_bd_queues_entry) +
sizeof(struct ucc_geth_rx_prefetched_bds)),
UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
@@ -2817,23 +2668,12 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
(struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
rx_bd_qs_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
- /* Zero out p_rx_bd_qs_tbl */
- memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
- 0,
- ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
- sizeof(struct ucc_geth_rx_prefetched_bds)));
/* Setup the table */
/* Assume BD rings are already established */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
- if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
- out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
- (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
- } else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM) {
- out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
- (u32)qe_muram_dma(ugeth->p_rx_bd_ring[i]));
- }
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
/* rest of fields handled by QE */
}
@@ -2854,7 +2694,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ug_info->
vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
- remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
+ remoder |= ((ucc_geth_rx_queues(ug_info) - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
if (ug_info->ipCheckSumCheck)
remoder |= REMODER_IP_CHECKSUM_CHECK;
if (ug_info->ipAddressAlignment)
@@ -2937,14 +2777,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
* allocated resources can be released when the channel is freed.
*/
if (!(ugeth->p_init_enet_param_shadow =
- kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
+ kzalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n");
return -ENOMEM;
}
- /* Zero out *p_init_enet_param_shadow */
- memset((char *)ugeth->p_init_enet_param_shadow,
- 0, sizeof(struct ucc_geth_init_pram));
/* Fill shadow InitEnet command parameter structure */
@@ -2964,7 +2801,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
- ugeth->rx_glbl_pram_offset | ug_info->riscRx;
+ rx_glbl_pram_offset | ug_info->riscRx;
if ((ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
(ug_info->largestexternallookupkeysize !=
@@ -3002,7 +2839,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_init_enet_param_shadow->txglobal =
- ugeth->tx_glbl_pram_offset | ug_info->riscTx;
+ tx_glbl_pram_offset | ug_info->riscTx;
if ((ret_val =
fill_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
@@ -3016,7 +2853,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
/* Load Rx bds with buffers */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not fill Rx bds with buffers\n");
@@ -3287,12 +3124,12 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
/* Tx event processing */
spin_lock(&ugeth->lock);
- for (i = 0; i < ug_info->numQueuesTx; i++)
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
ucc_geth_tx(ugeth->ndev, i);
spin_unlock(&ugeth->lock);
howmany = 0;
- for (i = 0; i < ug_info->numQueuesRx; i++)
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) {
@@ -3685,6 +3522,36 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
#endif
};
+static int ucc_geth_parse_clock(struct device_node *np, const char *which,
+ enum qe_clock *out)
+{
+ const char *sprop;
+ char buf[24];
+
+ snprintf(buf, sizeof(buf), "%s-clock-name", which);
+ sprop = of_get_property(np, buf, NULL);
+ if (sprop) {
+ *out = qe_clock_source(sprop);
+ } else {
+ u32 val;
+
+ snprintf(buf, sizeof(buf), "%s-clock", which);
+ if (of_property_read_u32(np, buf, &val)) {
+ /* If both *-clock-name and *-clock are missing,
+ * we want to tell people to use *-clock-name.
+ */
+ pr_err("missing %s-clock-name property\n", buf);
+ return -EINVAL;
+ }
+ *out = val;
+ }
+ if (*out < QE_CLK_NONE || *out > QE_CLK24) {
+ pr_err("invalid %s property\n", buf);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
@@ -3695,7 +3562,6 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct resource res;
int err, ucc_num, max_speed = 0;
const unsigned int *prop;
- const char *sprop;
const void *mac_addr;
phy_interface_t phy_interface;
static const int enet_to_speed[] = {
@@ -3725,62 +3591,23 @@ static int ucc_geth_probe(struct platform_device* ofdev)
if ((ucc_num < 0) || (ucc_num > 7))
return -ENODEV;
- ug_info = &ugeth_info[ucc_num];
- if (ug_info == NULL) {
- if (netif_msg_probe(&debug))
- pr_err("[%d] Missing additional data!\n", ucc_num);
- return -ENODEV;
- }
+ ug_info = kmalloc(sizeof(*ug_info), GFP_KERNEL);
+ if (ug_info == NULL)
+ return -ENOMEM;
+ memcpy(ug_info, &ugeth_primary_info, sizeof(*ug_info));
ug_info->uf_info.ucc_num = ucc_num;
- sprop = of_get_property(np, "rx-clock-name", NULL);
- if (sprop) {
- ug_info->uf_info.rx_clock = qe_clock_source(sprop);
- if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
- (ug_info->uf_info.rx_clock > QE_CLK24)) {
- pr_err("invalid rx-clock-name property\n");
- return -EINVAL;
- }
- } else {
- prop = of_get_property(np, "rx-clock", NULL);
- if (!prop) {
- /* If both rx-clock-name and rx-clock are missing,
- we want to tell people to use rx-clock-name. */
- pr_err("missing rx-clock-name property\n");
- return -EINVAL;
- }
- if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- pr_err("invalid rx-clock property\n");
- return -EINVAL;
- }
- ug_info->uf_info.rx_clock = *prop;
- }
-
- sprop = of_get_property(np, "tx-clock-name", NULL);
- if (sprop) {
- ug_info->uf_info.tx_clock = qe_clock_source(sprop);
- if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
- (ug_info->uf_info.tx_clock > QE_CLK24)) {
- pr_err("invalid tx-clock-name property\n");
- return -EINVAL;
- }
- } else {
- prop = of_get_property(np, "tx-clock", NULL);
- if (!prop) {
- pr_err("missing tx-clock-name property\n");
- return -EINVAL;
- }
- if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- pr_err("invalid tx-clock property\n");
- return -EINVAL;
- }
- ug_info->uf_info.tx_clock = *prop;
- }
+ err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock);
+ if (err)
+ goto err_free_info;
+ err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock);
+ if (err)
+ goto err_free_info;
err = of_address_to_resource(np, 0, &res);
if (err)
- return -EINVAL;
+ goto err_free_info;
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
@@ -3793,7 +3620,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
*/
err = of_phy_register_fixed_link(np);
if (err)
- return err;
+ goto err_free_info;
ug_info->phy_node = of_node_get(np);
}
@@ -3889,6 +3716,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
+ dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
@@ -3923,6 +3751,8 @@ err_deregister_fixed_link:
of_phy_deregister_fixed_link(np);
of_node_put(ug_info->tbi_node);
of_node_put(ug_info->phy_node);
+err_free_info:
+ kfree(ug_info);
return err;
}
@@ -3934,12 +3764,13 @@ static int ucc_geth_remove(struct platform_device* ofdev)
struct device_node *np = ofdev->dev.of_node;
unregister_netdev(dev);
- free_netdev(dev);
ucc_geth_memclean(ugeth);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
+ kfree(ugeth->ug_info);
+ free_netdev(dev);
return 0;
}
@@ -3967,17 +3798,10 @@ static struct platform_driver ucc_geth_driver = {
static int __init ucc_geth_init(void)
{
- int i, ret;
-
if (netif_msg_drv(&debug))
pr_info(DRV_DESC "\n");
- for (i = 0; i < 8; i++)
- memcpy(&(ugeth_info[i]), &ugeth_primary_info,
- sizeof(ugeth_primary_info));
-
- ret = platform_driver_register(&ucc_geth_driver);
- return ret;
+ return platform_driver_register(&ucc_geth_driver);
}
static void __exit ucc_geth_exit(void)
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 1a9bdf66a7d8..4294ed096ebb 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -575,7 +575,14 @@ struct ucc_geth_tx_global_pram {
u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
u32 tqptr; /* a base pointer to the Tx Queues Memory
Region */
- u8 res2[0x80 - 0x74];
+ u8 res2[0x78 - 0x74];
+ u64 snums_en;
+ u32 l2l3baseptr; /* top byte consists of a few other bit fields */
+
+ u16 mtu[8];
+ u8 res3[0xa8 - 0x94];
+ u32 wrrtablebase; /* top byte is reserved */
+ u8 res4[0xc0 - 0xac];
} __packed;
/* structure representing Extended Filtering Global Parameters in PRAM */
@@ -1069,8 +1076,6 @@ struct ucc_geth_tad_params {
/* GETH protocol initialization structure */
struct ucc_geth_info {
struct ucc_fast_info uf_info;
- u8 numQueuesTx;
- u8 numQueuesRx;
int ipCheckSumCheck;
int ipCheckSumGenerate;
int rxExtendedFiltering;
@@ -1158,9 +1163,7 @@ struct ucc_geth_private {
struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
u32 exf_glbl_param_offset;
struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram;
- u32 rx_glbl_pram_offset;
struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram;
- u32 tx_glbl_pram_offset;
struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg;
u32 send_q_mem_reg_offset;
struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx;
@@ -1178,9 +1181,7 @@ struct ucc_geth_private {
struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl;
u32 rx_bd_qs_tbl_offset;
u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES];
- u32 tx_bd_ring_offset[NUM_TX_QUEUES];
u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES];
- u32 rx_bd_ring_offset[NUM_RX_QUEUES];
u8 __iomem *confBd[NUM_TX_QUEUES];
u8 __iomem *txBd[NUM_TX_QUEUES];
u8 __iomem *rxBd[NUM_RX_QUEUES];
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 858cb293152a..5d7824d2b4d4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1502,7 +1502,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- assert(skb->queue_mapping < ndev->ae_handle->q_num);
+ assert(skb->queue_mapping < priv->ae_handle->q_num);
return hns_nic_net_xmit_hw(ndev, skb,
&tx_ring_data(priv, skb->queue_mapping));
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 7165da0ee9aa..a6e3f07caf99 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
/* for mutl buffer*/
new_skb = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb_any(skb);
+ if (!new_skb) {
+ netdev_err(ndev, "skb alloc failed\n");
+ return;
+ }
skb = new_skb;
check_ok = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index fb5e8842983c..33defa4c180a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -169,7 +169,7 @@ struct hclgevf_mbx_arq_ring {
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
- (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#define hclge_mbx_head_ptr_move_arq(arq) \
- (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 405e49033417..512080640cbc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1070,7 +1070,7 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
* HW checksum of the non-IP packets and GSO packets is handled at
* different place in the following code
*/
- if (skb->csum_not_inet || skb_is_gso(skb) ||
+ if (skb_csum_is_sctp(skb) || skb_is_gso(skb) ||
!test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
return false;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e6f37f91c489..c242883fea5d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -752,7 +752,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
- if (hdev->hw.mac.phydev) {
+ if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) {
count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
@@ -4537,8 +4538,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -4730,6 +4731,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_tuple_sets.ipv6_udp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGE_RSS_INPUT_TUPLE_SCTP;
vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 50a294dfaff5..ca46bc9110d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -107,6 +107,8 @@
#define HCLGE_D_IP_BIT BIT(2)
#define HCLGE_S_IP_BIT BIT(3)
#define HCLGE_V_TAG_BIT BIT(4)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 145757cb70f9..674b3a22e91f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -917,8 +917,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -2502,7 +2502,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 1b183bc35604..f6d817a3edcb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -122,6 +122,8 @@
#define HCLGEVF_D_IP_BIT BIT(2)
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index f302504faa8a..a40af510018a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -955,6 +955,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_rx_pools(adapter);
release_napi(adapter);
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
}
@@ -1383,10 +1384,10 @@ static int ibmvnic_close(struct net_device *netdev)
/**
* build_hdr_data - creates L2/L3/L4 header data buffer
- * @hdr_field - bitfield determining needed headers
- * @skb - socket buffer
- * @hdr_len - array of header lengths
- * @tot_len - total length of data
+ * @hdr_field: bitfield determining needed headers
+ * @skb: socket buffer
+ * @hdr_len: array of header lengths
+ * @hdr_data: buffer to write the header to
*
* Reads hdr_field to determine which headers are needed by firmware.
* Builds a buffer containing these headers. Saves individual header
@@ -1443,11 +1444,11 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
/**
* create_hdr_descs - create header and header extension descriptors
- * @hdr_field - bitfield determining needed headers
- * @data - buffer containing header data
- * @len - length of data buffer
- * @hdr_len - array of individual header lengths
- * @scrq_arr - descriptor array
+ * @hdr_field: bitfield determining needed headers
+ * @hdr_data: buffer containing header data
+ * @len: length of data buffer
+ * @hdr_len: array of individual header lengths
+ * @scrq_arr: descriptor array
*
* Creates header and, if needed, header extension descriptors and
* places them in a descriptor array, scrq_arr
@@ -1495,10 +1496,9 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
/**
* build_hdr_descs_arr - build a header descriptor array
- * @skb - socket buffer
- * @num_entries - number of descriptors to be sent
- * @subcrq - first TX descriptor
- * @hdr_field - bit field determining which headers will be sent
+ * @txbuff: tx buffer
+ * @num_entries: number of descriptors to be sent
+ * @hdr_field: bit field determining which headers will be sent
*
* This function will build a TX descriptor array with applicable
* L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
@@ -1924,93 +1924,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
return rc;
}
-/**
- * do_change_param_reset returns zero if we are able to keep processing reset
- * events, or non-zero if we hit a fatal error and must halt.
- */
-static int do_change_param_reset(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rwi *rwi,
- u32 reset_state)
-{
- struct net_device *netdev = adapter->netdev;
- int i, rc;
-
- netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
- rwi->reset_reason);
-
- netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
-
- ibmvnic_cleanup(netdev);
-
- if (reset_state == VNIC_OPEN) {
- rc = __ibmvnic_close(netdev);
- if (rc)
- goto out;
- }
-
- release_resources(adapter);
- release_sub_crqs(adapter, 1);
- release_crq_queue(adapter);
-
- adapter->state = VNIC_PROBED;
-
- rc = init_crq_queue(adapter);
-
- if (rc) {
- netdev_err(adapter->netdev,
- "Couldn't initialize crq. rc=%d\n", rc);
- return rc;
- }
-
- rc = ibmvnic_reset_init(adapter, true);
- if (rc) {
- rc = IBMVNIC_INIT_FAILED;
- goto out;
- }
-
- /* If the adapter was in PROBE state prior to the reset,
- * exit here.
- */
- if (reset_state == VNIC_PROBED)
- goto out;
-
- rc = ibmvnic_login(netdev);
- if (rc) {
- goto out;
- }
-
- rc = init_resources(adapter);
- if (rc)
- goto out;
-
- ibmvnic_disable_irqs(adapter);
-
- adapter->state = VNIC_CLOSED;
-
- if (reset_state == VNIC_CLOSED)
- return 0;
-
- rc = __ibmvnic_open(netdev);
- if (rc) {
- rc = IBMVNIC_OPEN_FAILED;
- goto out;
- }
-
- /* refresh device's multicast list */
- ibmvnic_set_multi(netdev);
-
- /* kick napi */
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
-
-out:
- if (rc)
- adapter->state = reset_state;
- return rc;
-}
-
-/**
+/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -2027,7 +1941,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->state, adapter->failover_pending,
rwi->reset_reason, reset_state);
- rtnl_lock();
+ adapter->reset_reason = rwi->reset_reason;
+ /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_lock();
+
/*
* Now that we have the rtnl lock, clear any pending failover.
* This will ensure ibmvnic_open() has either completed or will
@@ -2037,7 +1955,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->failover_pending = false;
netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
@@ -2049,25 +1966,37 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (reset_state == VNIC_OPEN &&
adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_FAILOVER) {
- adapter->state = VNIC_CLOSING;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = __ibmvnic_close(netdev);
+ if (rc)
+ goto out;
+ } else {
+ adapter->state = VNIC_CLOSING;
- /* Release the RTNL lock before link state change and
- * re-acquire after the link state change to allow
- * linkwatch_event to grab the RTNL lock and run during
- * a reset.
- */
- rtnl_unlock();
- rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- rtnl_lock();
- if (rc)
- goto out;
+ /* Release the RTNL lock before link state change and
+ * re-acquire after the link state change to allow
+ * linkwatch_event to grab the RTNL lock and run during
+ * a reset.
+ */
+ rtnl_unlock();
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+ rtnl_lock();
+ if (rc)
+ goto out;
- if (adapter->state != VNIC_CLOSING) {
- rc = -1;
- goto out;
+ if (adapter->state != VNIC_CLOSING) {
+ rc = -1;
+ goto out;
+ }
+
+ adapter->state = VNIC_CLOSED;
}
+ }
- adapter->state = VNIC_CLOSED;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ release_resources(adapter);
+ release_sub_crqs(adapter, 1);
+ release_crq_queue(adapter);
}
if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
@@ -2076,7 +2005,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_crq_queue(adapter);
+ } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
rc = ibmvnic_reenable_crq_queue(adapter);
release_sub_crqs(adapter, 1);
} else {
@@ -2115,7 +2046,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- if (adapter->req_rx_queues != old_num_rx_queues ||
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_resources(adapter);
+ if (rc)
+ goto out;
+ } else if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues ||
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
@@ -2180,7 +2115,9 @@ out:
/* restore the adapter state if reset failed */
if (rc)
adapter->state = reset_state;
- rtnl_unlock();
+ /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_unlock();
netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
adapter->state, adapter->failover_pending, rc);
@@ -2311,10 +2248,7 @@ static void __ibmvnic_reset(struct work_struct *work)
}
spin_unlock_irqrestore(&adapter->state_lock, flags);
- if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
- /* CHANGE_PARAM requestor holds rtnl_lock */
- rc = do_change_param_reset(adapter, rwi, reset_state);
- } else if (adapter->force_reset_recovery) {
+ if (adapter->force_reset_recovery) {
/*
* Since we are doing a hard reset now, clear the
* failover_pending flag so we don't ignore any
@@ -2341,8 +2275,7 @@ static void __ibmvnic_reset(struct work_struct *work)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
- } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
- adapter->from_passive_init)) {
+ } else {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
@@ -2981,9 +2914,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
int rc;
if (!scrq) {
- netdev_dbg(adapter->netdev,
- "Invalid scrq reset. irq (%d) or msgs (%p).\n",
- scrq->irq, scrq->msgs);
+ netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
return -EINVAL;
}
@@ -3873,7 +3804,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
return -1;
}
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
+
client_data_len = vnic_client_data_len(adapter);
buffer_size =
@@ -5084,6 +5017,12 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
+ */
+ dma_rmb();
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 8cc651d37a7f..f8d78af76d7d 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1739,10 +1739,10 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
dma_addr_t dma_addr;
cb->command = nic->tx_command;
- dma_addr = pci_map_single(nic->pdev,
- skb->data, skb->len, PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
/* If we can't map the skb, have the upper layer try later */
- if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
dev_kfree_skb_any(skb);
skb = NULL;
return -ENOMEM;
@@ -1828,10 +1828,10 @@ static int e100_tx_clean(struct nic *nic)
dev->stats.tx_packets++;
dev->stats.tx_bytes += cb->skb->len;
- pci_unmap_single(nic->pdev,
- le32_to_cpu(cb->u.tcb.tbd.buf_addr),
- le16_to_cpu(cb->u.tcb.tbd.size),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&nic->pdev->dev,
+ le32_to_cpu(cb->u.tcb.tbd.buf_addr),
+ le16_to_cpu(cb->u.tcb.tbd.size),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(cb->skb);
cb->skb = NULL;
tx_cleaned = 1;
@@ -1855,10 +1855,10 @@ static void e100_clean_cbs(struct nic *nic)
while (nic->cbs_avail != nic->params.cbs.count) {
struct cb *cb = nic->cb_to_clean;
if (cb->skb) {
- pci_unmap_single(nic->pdev,
- le32_to_cpu(cb->u.tcb.tbd.buf_addr),
- le16_to_cpu(cb->u.tcb.tbd.size),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&nic->pdev->dev,
+ le32_to_cpu(cb->u.tcb.tbd.buf_addr),
+ le16_to_cpu(cb->u.tcb.tbd.size),
+ DMA_TO_DEVICE);
dev_kfree_skb(cb->skb);
}
nic->cb_to_clean = nic->cb_to_clean->next;
@@ -1925,10 +1925,10 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
/* Init, and map the RFD. */
skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
- rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data,
+ RFD_BUF_LEN, DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
+ if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) {
dev_kfree_skb_any(rx->skb);
rx->skb = NULL;
rx->dma_addr = 0;
@@ -1941,8 +1941,10 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
if (rx->prev->skb) {
struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
- pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
- sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ rx->prev->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
}
return 0;
@@ -1961,8 +1963,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
return -EAGAIN;
/* Need to sync before taking a peek at cb_complete bit */
- pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
- sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr,
+ sizeof(struct rfd), DMA_BIDIRECTIONAL);
rfd_status = le16_to_cpu(rfd->status);
netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
@@ -1981,9 +1983,9 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
if (ioread8(&nic->csr->scb.status) & rus_no_res)
nic->ru_running = RU_SUSPENDED;
- pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
- sizeof(struct rfd),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_FROM_DEVICE);
return -ENODATA;
}
@@ -1995,8 +1997,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
actual_size = RFD_BUF_LEN - sizeof(struct rfd);
/* Get data */
- pci_unmap_single(nic->pdev, rx->dma_addr,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN,
+ DMA_BIDIRECTIONAL);
/* If this buffer has the el bit, but we think the receiver
* is still running, check to see if it really stopped while
@@ -2097,22 +2099,25 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
(struct rfd *)new_before_last_rx->skb->data;
new_before_last_rfd->size = 0;
new_before_last_rfd->command |= cpu_to_le16(cb_el);
- pci_dma_sync_single_for_device(nic->pdev,
- new_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ new_before_last_rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
/* Now that we have a new stopping point, we can clear the old
* stopping point. We must sync twice to get the proper
* ordering on the hardware side of things. */
old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
- pci_dma_sync_single_for_device(nic->pdev,
- old_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ old_before_last_rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
+ ETH_FCS_LEN);
- pci_dma_sync_single_for_device(nic->pdev,
- old_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ old_before_last_rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
}
if (restart_required) {
@@ -2134,8 +2139,9 @@ static void e100_rx_clean_list(struct nic *nic)
if (nic->rxs) {
for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
if (rx->skb) {
- pci_unmap_single(nic->pdev, rx->dma_addr,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&nic->pdev->dev,
+ rx->dma_addr, RFD_BUF_LEN,
+ DMA_BIDIRECTIONAL);
dev_kfree_skb(rx->skb);
}
}
@@ -2177,8 +2183,8 @@ static int e100_rx_alloc_list(struct nic *nic)
before_last = (struct rfd *)rx->skb->data;
before_last->command |= cpu_to_le16(cb_el);
before_last->size = 0;
- pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
- sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr,
+ sizeof(struct rfd), DMA_BIDIRECTIONAL);
nic->rx_to_use = nic->rx_to_clean = nic->rxs;
nic->ru_running = RU_SUSPENDED;
@@ -2377,8 +2383,8 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
msleep(10);
- pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr,
+ RFD_BUF_LEN, DMA_BIDIRECTIONAL);
if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
skb->data, ETH_DATA_LEN))
@@ -2751,16 +2757,16 @@ static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
static int e100_alloc(struct nic *nic)
{
- nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
- &nic->dma_addr);
+ nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem),
+ &nic->dma_addr, GFP_KERNEL);
return nic->mem ? 0 : -ENOMEM;
}
static void e100_free(struct nic *nic)
{
if (nic->mem) {
- pci_free_consistent(nic->pdev, sizeof(struct mem),
- nic->mem, nic->dma_addr);
+ dma_free_coherent(&nic->pdev->dev, sizeof(struct mem),
+ nic->mem, nic->dma_addr);
nic->mem = NULL;
}
}
@@ -2853,7 +2859,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
}
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
goto err_out_free_res;
}
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba7a0f8f6937..5b2143f4b1f8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -436,6 +436,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
+#define FLAG2_ENABLE_S0IX_FLOWS BIT(15)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 03215b0aee4b..06442e6bef73 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -23,6 +23,13 @@ struct e1000_stats {
int stat_offset;
};
+static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
+ "s0ix-enabled",
+};
+
+#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
+
#define E1000_STAT(str, m) { \
.stat_string = str, \
.type = E1000_STATS, \
@@ -1776,6 +1783,8 @@ static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
return E1000_TEST_LEN;
case ETH_SS_STATS:
return E1000_STATS_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return E1000E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -2097,6 +2106,10 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, e1000e_priv_flags_strings,
+ E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -2305,6 +2318,37 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
+static u32 e1000e_get_priv_flags(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
+
+ return priv_flags;
+}
+
+static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags2 = adapter->flags2;
+
+ flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
+ if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac.type < e1000_pch_cnp)
+ return -EINVAL;
+ flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+ }
+
+ if (flags2 != adapter->flags2)
+ adapter->flags2 = flags2;
+
+ return 0;
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
@@ -2336,6 +2380,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_eee = e1000e_set_eee,
.get_link_ksettings = e1000_get_link_ksettings,
.set_link_ksettings = e1000_set_link_ksettings,
+ .get_priv_flags = e1000e_get_priv_flags,
+ .set_priv_flags = e1000e_set_priv_flags,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9aa6fad8ed47..6fb46682b058 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1240,6 +1240,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
return 0;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ struct e1000_adapter *adapter = hw->adapter;
+ bool firmware_bug = false;
+
if (force) {
/* Request ME un-configure ULP mode in the PHY */
mac_reg = er32(H2ME);
@@ -1248,16 +1251,24 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
ew32(H2ME, mac_reg);
}
- /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
+ /* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
+ * If this takes more than 1 second, show a warning indicating a
+ * firmware bug
+ */
while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
- if (i++ == 30) {
+ if (i++ == 250) {
ret_val = -E1000_ERR_PHY;
goto out;
}
+ if (i > 100 && !firmware_bug)
+ firmware_bug = true;
usleep_range(10000, 11000);
}
- e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+ if (firmware_bug)
+ e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10);
+ else
+ e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
if (force) {
mac_reg = er32(H2ME);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 128ab6898070..e9b82c209c2d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -103,45 +103,6 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
-struct e1000e_me_supported {
- u16 device_id; /* supported device ID */
-};
-
-static const struct e1000e_me_supported me_supported[] = {
- {E1000_DEV_ID_PCH_LPT_I217_LM},
- {E1000_DEV_ID_PCH_LPTLP_I218_LM},
- {E1000_DEV_ID_PCH_I218_LM2},
- {E1000_DEV_ID_PCH_I218_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM},
- {E1000_DEV_ID_PCH_SPT_I219_LM2},
- {E1000_DEV_ID_PCH_LBG_I219_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM4},
- {E1000_DEV_ID_PCH_SPT_I219_LM5},
- {E1000_DEV_ID_PCH_CNP_I219_LM6},
- {E1000_DEV_ID_PCH_CNP_I219_LM7},
- {E1000_DEV_ID_PCH_ICP_I219_LM8},
- {E1000_DEV_ID_PCH_ICP_I219_LM9},
- {E1000_DEV_ID_PCH_CMP_I219_LM10},
- {E1000_DEV_ID_PCH_CMP_I219_LM11},
- {E1000_DEV_ID_PCH_CMP_I219_LM12},
- {E1000_DEV_ID_PCH_TGP_I219_LM13},
- {E1000_DEV_ID_PCH_TGP_I219_LM14},
- {E1000_DEV_ID_PCH_TGP_I219_LM15},
- {0}
-};
-
-static bool e1000e_check_me(u16 device_id)
-{
- struct e1000e_me_supported *id;
-
- for (id = (struct e1000e_me_supported *)me_supported;
- id->device_id; id++)
- if (device_id == id->device_id)
- return true;
-
- return false;
-}
-
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
@@ -6962,7 +6923,6 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@@ -6970,13 +6930,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
- if (rc)
+ if (rc) {
e1000e_pm_thaw(dev);
-
- /* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
- e1000e_s0ix_entry_flow(adapter);
+ } else {
+ /* Introduce S0ix implementation */
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ e1000e_s0ix_entry_flow(adapter);
+ }
return rc;
}
@@ -6986,12 +6946,10 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
/* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev);
@@ -7655,6 +7613,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
+ if (hw->mac.type >= e1000_pch_cnp)
+ adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+
strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 5c19ff452558..2fb52bd6fc0e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1531,8 +1531,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
.ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
.ndo_features_check = fm10k_features_check,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d231a2cdd98f..118473dfdcbd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -120,6 +120,7 @@ enum i40e_state_t {
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
+ __I40E_PF_RESET_AND_REBUILD_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
@@ -146,6 +147,8 @@ enum i40e_state_t {
};
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+ BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
enum i40e_vsi_state_t {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1337686bd099..521ea9df38d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,6 +36,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@@ -8536,6 +8538,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+ * Resets PF and reinitializes PFs VSI.
+ */
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
+
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
@@ -12794,8 +12804,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4aca637d4a23..2574e78f7597 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2344,7 +2344,7 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
**/
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
unsigned int xdp_xmit = 0;
@@ -2352,9 +2352,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
struct xdp_buff xdp;
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
+ frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
#endif
- xdp.rxq = &rx_ring->xdp_rxq;
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
@@ -2406,12 +2406,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- i40e_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = i40e_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 729c4f0d5ac5..7efc61aacb0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1772,7 +1772,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
goto sriov_configure_out;
@@ -1781,7 +1781,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
ret = -EINVAL;
@@ -4046,20 +4046,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
- * If the VF is indeed in reset, the vsi pointer has
- * to show on the newly loaded vsi under pf->vsi[id].
+ * Acquire the VSI pointer only after the VF has been
+ * properly initialized.
*/
for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- if (i > 0)
- vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
break;
- }
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
@@ -4068,6 +4064,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ret = -EAGAIN;
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 47eb9c584a12..492ce213208d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -348,12 +348,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
* SBP is *not* set in PRT_SBPVSI (default not set).
*/
skb = i40e_construct_skb_zc(rx_ring, *bi);
- *bi = NULL;
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
break;
}
+ *bi = NULL;
cleaned_count++;
i40e_inc_ntc(rx_ring);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 95543dfd4fe7..0a867d64d467 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
netif_tx_stop_all_queues(netdev);
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_add_device(adapter);
- if (err) {
- rtnl_unlock();
+ if (err)
dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
err);
- }
}
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index ed08ace4f05a..647e7fde11b4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -911,7 +911,7 @@ static void iavf_print_link_message(struct iavf_adapter *adapter)
return;
}
- speed = kcalloc(1, IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
+ speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
if (!speed)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 56725356a17b..fa1e128c24ec 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -68,7 +68,9 @@
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
-#define ICE_MIN_MSIX 2
+#define ICE_MIN_LAN_TXRX_MSIX 1
+#define ICE_MIN_LAN_OICR_MSIX 1
+#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 9e8e9531cd87..69c113a4de7e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3258,8 +3258,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min_t(int, num_online_cpus(),
- pf->hw.func_caps.common_cap.num_txq);
+ return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+ (u16)pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3268,8 +3268,8 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min_t(int, num_online_cpus(),
- pf->hw.func_caps.common_cap.num_rxq);
+ return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+ (u16)pf->hw.func_caps.common_cap.num_rxq);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 2d27f66ac853..192729546bbf 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1576,7 +1576,13 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
sizeof(struct in6_addr));
input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
- input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
+ /* if no protocol requested, use IPPROTO_NONE */
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ input->ip.v6.proto = IPPROTO_NONE;
+ else
+ input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 3df67486d42d..ad9c22a1b97a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -161,8 +161,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
switch (vsi->type) {
case ICE_VSI_PF:
- vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
- num_online_cpus());
+ vsi->alloc_txq = min3(pf->num_lan_msix,
+ ice_get_avail_txq_count(pf),
+ (u16)num_online_cpus());
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
@@ -174,8 +175,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
} else {
- vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
- num_online_cpus());
+ vsi->alloc_rxq = min3(pf->num_lan_msix,
+ ice_get_avail_rxq_count(pf),
+ (u16)num_online_cpus());
if (vsi->req_rxq) {
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
@@ -184,7 +186,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
+ vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
+ max_t(int, vsi->alloc_rxq,
+ vsi->alloc_txq));
break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c52b9bb0e3ab..06aa37549c04 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3430,18 +3430,14 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
-/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
-#define ICE_MIN_LAN_VECS 2
-#define ICE_MIN_RDMA_VECS 2
-#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
- if (v_actual < ICE_MIN_LAN_VECS) {
+ if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
pci_disable_msix(pf->pdev);
err = -ERANGE;
goto msix_err;
} else {
- pf->num_lan_msix = ICE_MIN_LAN_VECS;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
}
}
@@ -4884,9 +4880,15 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
goto err_update_filters;
}
- /* Add filter for new MAC. If filter exists, just return success */
+ /* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
+ /* Although this MAC filter is already present in hardware it's
+ * possible in some cases (e.g. bonding) that dev_addr was
+ * modified outside of the driver and needs to be restored back
+ * to this value.
+ */
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
return 0;
}
@@ -6790,6 +6792,4 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index a2d0aad8cfdd..2c2de56e2824 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1089,23 +1089,25 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*/
int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int xdp_res, xdp_xmit = 0;
struct bpf_prog *xdp_prog = NULL;
struct xdp_buff xdp;
bool failure;
- xdp.rxq = &rx_ring->xdp_rxq;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ice_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
+ unsigned int offset = ice_rx_offset(rx_ring);
union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf;
+ unsigned char *hard_start;
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
@@ -1151,10 +1153,9 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
goto construct_skb;
}
- xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
- xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
- xdp.data_meta = xdp.data;
- xdp.data_end = xdp.data + size;
+ hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
@@ -1923,12 +1924,15 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+ int ret;
+
tunnel |= ICE_TX_CTX_EIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
- if (l4.hdr != exthdr)
- ipv6_skip_exthdr(skb, exthdr - skb->data,
- &l4_proto, &frag_off);
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
}
/* define outer transport */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index bc2f4390b51d..02b12736ea80 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -191,12 +191,7 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) {
- /* this is tracked separately to help us debug stack drops */
- rx_ring->rx_stats.gro_dropped++;
- netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n",
- rx_ring->q_index);
- }
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 03f78fdb0dcd..84d4284b8b32 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5959,15 +5959,6 @@ static int igb_tso(struct igb_ring *tx_ring,
return 1;
}
-static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
@@ -5990,10 +5981,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- igb_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -8681,13 +8669,13 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
u16 cleaned_count = igb_desc_unused(rx_ring);
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
+ u32 frame_sz = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);
+ frame_sz = igb_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
@@ -8715,12 +8703,12 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- igb_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = igb_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 30fdea24e94a..fb3fbcb13331 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2072,15 +2072,6 @@ static int igbvf_tso(struct igbvf_ring *tx_ring,
return 1;
}
-static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, __be16 protocol)
{
@@ -2102,10 +2093,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((protocol == htons(ETH_P_IPV6)) &&
- igbvf_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 61d331ce38cd..831f2f09de5f 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1675,12 +1675,18 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
cmd->base.phy_address = hw->phy.addr;
/* advertising link modes */
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
/* set autoneg settings */
if (hw->mac.autoneg == 1) {
@@ -1792,6 +1798,12 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
+ /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
+ * We have to check this and convert it to ADVERTISE_2500_FULL
+ * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
+ */
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
+ advertising |= ADVERTISE_2500_FULL;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index afd6a62da29d..43aec42e6d9d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -949,15 +949,6 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
}
}
-static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
@@ -980,10 +971,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if ((first->protocol == htons(ETH_P_IP) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- (first->protocol == htons(ETH_P_IPV6) &&
- igc_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 393d1c2cd853..e08c01525fd2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2291,7 +2291,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
const int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct ixgbe_adapter *adapter = q_vector->adapter;
#ifdef IXGBE_FCOE
int ddp_bytes;
@@ -2301,12 +2301,11 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
- xdp.rxq = &rx_ring->xdp_rxq;
-
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
@@ -2336,12 +2335,12 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- ixgbe_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = ixgbe_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
@@ -8040,15 +8039,6 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
return 1;
}
-static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd)
@@ -8074,10 +8064,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- ixgbe_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -10278,8 +10265,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4061cd7db5dd..a14e55e7fce8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1121,19 +1121,18 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct ixgbevf_adapter *adapter = q_vector->adapter;
u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
- xdp.rxq = &rx_ring->xdp_rxq;
-
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < budget)) {
struct ixgbevf_rx_buffer *rx_buffer;
@@ -1161,12 +1160,12 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- ixgbevf_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = ixgbevf_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
@@ -3844,15 +3843,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
-static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
@@ -3873,10 +3863,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- ixgbevf_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 41815b609569..7fe15a3286f4 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -94,7 +94,6 @@ config MVPP2
config MVPP2_PTP
bool "Marvell Armada 8K Enable PTP support"
- depends on NETWORK_PHY_TIMESTAMPING
depends on (PTP_1588_CLOCK = y && MVPP2 = y) || \
(PTP_1588_CLOCK && MVPP2 = m)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 563ceac3060f..6290bfb6494e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2263,11 +2263,8 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
/* Prefetch header */
prefetch(data);
-
- xdp->data_hard_start = data;
- xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
- xdp->data_end = xdp->data + data_len;
- xdp_set_data_meta_invalid(xdp);
+ xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
+ data_len, false);
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0;
@@ -2363,9 +2360,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
u32 desc_status, frame_sz;
struct xdp_buff xdp_buf;
+ xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
xdp_buf.data_hard_start = NULL;
- xdp_buf.frame_sz = PAGE_SIZE;
- xdp_buf.rxq = &rxq->xdp_rxq;
sinfo.nr_frags = 0;
@@ -4432,7 +4428,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct bpf_prog *old_prog;
if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
- NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
return -EOPNOTSUPP;
}
@@ -5255,7 +5251,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- return err;
+ goto err_netdev;
}
/* Armada3700 network controller does not support per-cpu
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 8867f25afab4..663157dc8062 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -143,7 +143,7 @@ struct mvpp2_cls_c2_entry {
/* Number of per-port dedicated entries in the C2 TCAM */
#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW
-/* Each port has oen range per flow type + one entry controling the global RSS
+/* Each port has one range per flow type + one entry controlling the global RSS
* setting and the default rx queue
*/
#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index afdd22827223..143522908477 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1231,7 +1231,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+ val |= GENCONF_CTRL0_PORT0_RGMII;
else if (port->gop_id == 3)
val |= GENCONF_CTRL0_PORT1_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@@ -2370,17 +2370,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
- unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ unsigned int thread;
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
-
- put_cpu();
+ /* PKT-coalescing registers are per-queue + per-thread */
+ for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+ }
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -3562,17 +3563,17 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
frag_size = bm_pool->frag_size;
if (xdp_prog) {
- xdp.data_hard_start = data;
- xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
- xdp.data_end = xdp.data + rx_bytes;
- xdp.frame_sz = PAGE_SIZE;
+ struct xdp_rxq_info *xdp_rxq;
if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
- xdp.rxq = &rxq->xdp_rxq_short;
+ xdp_rxq = &rxq->xdp_rxq_short;
else
- xdp.rxq = &rxq->xdp_rxq_long;
+ xdp_rxq = &rxq->xdp_rxq_long;
- xdp_set_data_meta_invalid(&xdp);
+ xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
+ xdp_prepare_buff(&xdp, data,
+ MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
+ rx_bytes, false);
ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
@@ -5487,7 +5488,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int thread;
- int queue, err;
+ int queue, err, val;
/* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs >
@@ -5501,6 +5502,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
+ if (mvpp2_is_xlg(port->phy_interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
@@ -5869,8 +5882,6 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 5692c6087bbb..6ee53c52627f 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
return -EINVAL;
}
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+ unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+ struct mvpp2_prs_entry pe;
+ unsigned int len;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* For all ports - drop flow control frames */
+ pe.index = MVPP2_PE_FC_DROP;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
/* Enable/disable dropping all mac da's */
static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
{
@@ -882,15 +914,15 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = tid;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
/* Set L4 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
@@ -899,7 +931,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
MVPP2_PRS_TCAM_PROTO_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -967,12 +1000,17 @@ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
return -EINVAL;
}
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1162,6 +1200,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
+ mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1392,8 +1431,9 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1597,8 +1637,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1647,8 +1688,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IPv6 header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* Jump to DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1727,19 +1769,20 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_PROTO_UN;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
/* Set L4 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
MVPP2_PRS_RI_L4_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1752,14 +1795,19 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_ADDR_UN;
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index e22f6c85d380..4b68dd374733 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -129,7 +129,7 @@
#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
+#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 7d0f96290943..84a91234ba8e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -867,12 +867,14 @@ static int cgx_lmac_init(struct cgx *cgx)
cgx->lmac_count = MAX_LMAC_PER_CGX;
for (i = 0; i < cgx->lmac_count; i++) {
- lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+ lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
- if (!lmac->name)
- return -ENOMEM;
+ if (!lmac->name) {
+ err = -ENOMEM;
+ goto err_lmac_free;
+ }
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
lmac->lmac_id = i;
lmac->cgx = cgx;
@@ -883,7 +885,7 @@ static int cgx_lmac_init(struct cgx *cgx)
CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac);
if (err)
- return err;
+ goto err_irq;
/* Enable interrupt */
cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
@@ -895,6 +897,12 @@ static int cgx_lmac_init(struct cgx *cgx)
}
return cgx_lmac_verify_fwi_version(cgx);
+
+err_irq:
+ kfree(lmac->name);
+err_lmac_free:
+ kfree(lmac);
+ return err;
}
static int cgx_lmac_exit(struct cgx *cgx)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index f919283ddc34..89e93eb46ab7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -717,6 +717,8 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
+#define NIX_FLOW_KEY_TYPE_AH BIT(22)
+#define NIX_FLOW_KEY_TYPE_ESP BIT(23)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index a1f79445db71..3c640f6aba92 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -162,6 +162,11 @@ enum key_fields {
NPC_DIP_IPV4,
NPC_SIP_IPV6,
NPC_DIP_IPV6,
+ NPC_IPPROTO_TCP,
+ NPC_IPPROTO_UDP,
+ NPC_IPPROTO_SCTP,
+ NPC_IPPROTO_AH,
+ NPC_IPPROTO_ESP,
NPC_SPORT_TCP,
NPC_DPORT_TCP,
NPC_SPORT_UDP,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index e8fd712860a1..4ef7fc8bbb19 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -646,7 +646,7 @@ setup_vfmsix:
}
/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
- * create a IOMMU mapping for the physcial address configured by
+ * create an IOMMU mapping for the physical address configured by
* firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
@@ -1323,7 +1323,7 @@ static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
break;
default:
return rvu_get_blkaddr(rvu, blktype, 0);
- };
+ }
if (is_block_implemented(rvu->hw, blkaddr))
return blkaddr;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index d298b9357177..6c6b411e78fd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -469,6 +469,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -485,6 +488,9 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
int rc = 0, i;
u64 cfg;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->hdr.rc = rc;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index d27543c1a166..f60499562d2e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1757,6 +1757,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
default:
+ seq_puts(s, "\n");
break;
}
}
@@ -1785,7 +1786,7 @@ static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
break;
default:
break;
- };
+ }
} else {
switch (rule->rx_action.op) {
case NIX_RX_ACTIONOP_DROP:
@@ -1806,7 +1807,7 @@ static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
break;
default:
break;
- };
+ }
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index bc0e4113370e..10a98bcb7c54 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -52,6 +52,650 @@ static bool rvu_common_request_irq(struct rvu *rvu, int offset,
return rvu->irq_allocated[offset];
}
+static void rvu_nix_intr_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
+ "NIX_AF_RVU Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
+ nix_event_context->nix_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_gen_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
+ "NIX_AF_GEN Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
+ nix_event_context->nix_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_err_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
+ "NIX_AF_ERR Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_ras_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
+ "NIX_AF_RAS Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int offs, i, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+
+ if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
+ rvu_dl);
+ rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
+ }
+
+ for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_nix_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NIX AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NIX%d NIX_AF_INT vector offsets\n",
+ blkaddr - BLKADDR_NIX0);
+ return 0;
+ }
+ /* Register and enable NIX_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
+ "NIX_AF_RVU_INT",
+ rvu_nix_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
+ "NIX_AF_GEN_INT",
+ rvu_nix_af_rvu_gen_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
+ "NIX_AF_ERR_INT",
+ rvu_nix_af_rvu_err_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
+ "NIX_AF_RAS",
+ rvu_nix_af_rvu_ras_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_nix_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum nix_af_rvu_health health_reporter)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ u64 intr_val;
+ int err;
+
+ nix_event_context = ctx;
+ switch (health_reporter) {
+ case NIX_AF_RVU_INTR:
+ intr_val = nix_event_context->nix_af_rvu_int;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
+ nix_event_context->nix_af_rvu_int);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_GEN:
+ intr_val = nix_event_context->nix_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
+ nix_event_context->nix_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_ERR:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(6)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(5)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_RAS:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
+
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
+}
+
+static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
+}
+
+static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
+}
+
+static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
+}
+
+static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_nix_intr);
+RVU_REPORTERS(hw_nix_gen);
+RVU_REPORTERS(hw_nix_err);
+RVU_REPORTERS(hw_nix_ras);
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *rvu_reporters;
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_nix_health_reporter = rvu_reporters;
+ nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
+ if (!nix_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->nix_event_ctx = nix_event_context;
+ rvu_reporters->rvu_hw_nix_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ goto err;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
+
+ return 0;
+err:
+ rvu_nix_health_reporters_destroy(rvu_dl);
+ return -ENOMEM;
+}
+
+static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_nix_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_nix_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *nix_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ nix_reporters = rvu_dl->rvu_nix_health_reporter;
+
+ if (!nix_reporters->rvu_hw_nix_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
+
+ rvu_nix_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
+ kfree(rvu_dl->rvu_nix_health_reporter);
+}
+
static void rvu_npa_intr_work(struct work_struct *work)
{
struct rvu_npa_health_reporters *rvu_npa_health_reporter;
@@ -698,9 +1342,14 @@ static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
static int rvu_health_reporters_create(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
+ int err;
rvu_dl = rvu->rvu_dl;
- return rvu_npa_health_reporters_create(rvu_dl);
+ err = rvu_npa_health_reporters_create(rvu_dl);
+ if (err)
+ return err;
+
+ return rvu_nix_health_reporters_create(rvu_dl);
}
static void rvu_health_reporters_destroy(struct rvu *rvu)
@@ -712,6 +1361,7 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
rvu_dl = rvu->rvu_dl;
rvu_npa_health_reporters_destroy(rvu_dl);
+ rvu_nix_health_reporters_destroy(rvu_dl);
}
static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
index d7578fa92ac1..471e57dedb20 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
@@ -41,11 +41,38 @@ struct rvu_npa_health_reporters {
struct work_struct ras_work;
};
+enum nix_af_rvu_health {
+ NIX_AF_RVU_INTR,
+ NIX_AF_RVU_GEN,
+ NIX_AF_RVU_ERR,
+ NIX_AF_RVU_RAS,
+};
+
+struct rvu_nix_event_ctx {
+ u64 nix_af_rvu_int;
+ u64 nix_af_rvu_gen;
+ u64 nix_af_rvu_err;
+ u64 nix_af_rvu_ras;
+};
+
+struct rvu_nix_health_reporters {
+ struct rvu_nix_event_ctx *nix_event_ctx;
+ struct devlink_health_reporter *rvu_hw_nix_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_nix_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_nix_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_nix_ras_reporter;
+ struct work_struct ras_work;
+};
+
struct rvu_devlink {
struct devlink *dl;
struct rvu *rvu;
struct workqueue_struct *devlink_wq;
struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
};
/* Devlink APIs */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a8dfbb6d1774..b54753ef7d94 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2580,6 +2580,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
struct nix_rx_flowkey_alg *field;
struct nix_rx_flowkey_alg tmp;
u32 key_type, valid_key;
+ int l4_key_offset;
if (!alg)
return -EINVAL;
@@ -2712,6 +2713,12 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field_marker = false;
keyoff_marker = false;
}
+
+ /* TCP/UDP/SCTP and ESP/AH falls at same offset so
+ * remember the TCP key offset of 40 byte hash key.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_TCP)
+ l4_key_offset = key_off;
break;
case NIX_FLOW_KEY_TYPE_NVGRE:
field->lid = NPC_LID_LD;
@@ -2783,11 +2790,31 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_mask = 0xF;
field->fn_mask = 1; /* Mask out the first nibble */
break;
+ case NIX_FLOW_KEY_TYPE_AH:
+ case NIX_FLOW_KEY_TYPE_ESP:
+ field->hdr_offset = 0;
+ field->bytesm1 = 7; /* SPI + sequence number */
+ field->ltype_mask = 0xF;
+ field->lid = NPC_LID_LE;
+ field->ltype_match = NPC_LT_LE_ESP;
+ if (key_type == NIX_FLOW_KEY_TYPE_AH) {
+ field->lid = NPC_LID_LD;
+ field->ltype_match = NPC_LT_LD_AH;
+ field->hdr_offset = 4;
+ keyoff_marker = false;
+ }
+ break;
}
field->ena = 1;
/* Found a valid flow key type */
if (valid_key) {
+ /* Use the key offset of TCP/UDP/SCTP fields
+ * for ESP/AH fields.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
+ key_type == NIX_FLOW_KEY_TYPE_AH)
+ key_off = l4_key_offset;
field->key_offset = key_off;
memcpy(&alg[nr_field], field, sizeof(*field));
max_key_off = max(max_key_off, field->bytesm1 + 1);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 14832b66d1fe..4ba9d54ce4e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -26,6 +26,11 @@ static const char * const npc_flow_names[] = {
[NPC_DIP_IPV4] = "ipv4 destination ip",
[NPC_SIP_IPV6] = "ipv6 source ip",
[NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_IPPROTO_TCP] = "ip proto tcp",
+ [NPC_IPPROTO_UDP] = "ip proto udp",
+ [NPC_IPPROTO_SCTP] = "ip proto sctp",
+ [NPC_IPPROTO_AH] = "ip proto AH",
+ [NPC_IPPROTO_ESP] = "ip proto ESP",
[NPC_SPORT_TCP] = "tcp source port",
[NPC_DPORT_TCP] = "tcp destination port",
[NPC_SPORT_UDP] = "udp source port",
@@ -212,13 +217,13 @@ static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
return false;
}
-static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
- u8 intf)
+static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
{
if (!npc_is_field_present(rvu, type, intf) ||
npc_check_overlap(rvu, blkaddr, type, 0, intf))
- return -EOPNOTSUPP;
- return 0;
+ return false;
+ return true;
}
static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
@@ -269,7 +274,7 @@ static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
break;
default:
return;
- };
+ }
npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
}
@@ -448,14 +453,13 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 *features = &mcam->rx_features;
u64 tcp_udp_sctp;
- int err, hdr;
+ int hdr;
if (is_npc_intf_tx(intf))
features = &mcam->tx_features;
for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
- err = npc_check_field(rvu, blkaddr, hdr, intf);
- if (!err)
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
*features |= BIT_ULL(hdr);
}
@@ -464,13 +468,26 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
/* for tcp/udp/sctp corresponding layer type should be in the key */
- if (*features & tcp_udp_sctp)
- if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ if (*features & tcp_udp_sctp) {
+ if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
*features &= ~tcp_udp_sctp;
+ else
+ *features |= BIT_ULL(NPC_IPPROTO_TCP) |
+ BIT_ULL(NPC_IPPROTO_UDP) |
+ BIT_ULL(NPC_IPPROTO_SCTP);
+ }
+
+ /* for AH, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_AH);
+
+ /* for ESP, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_ESP);
/* for vlan corresponding layer type should be in the key */
if (*features & BIT_ULL(NPC_OUTER_VID))
- if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
}
@@ -743,13 +760,13 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
return;
/* For tcp/udp/sctp LTYPE should be present in entry */
- if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP)))
+ if (features & BIT_ULL(NPC_IPPROTO_TCP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
0, ~0ULL, 0, intf);
- if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP)))
+ if (features & BIT_ULL(NPC_IPPROTO_UDP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
0, ~0ULL, 0, intf);
- if (features & (BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP)))
+ if (features & BIT_ULL(NPC_IPPROTO_SCTP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
0, ~0ULL, 0, intf);
@@ -758,6 +775,15 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+ /* For AH, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_AH))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH,
+ 0, ~0ULL, 0, intf);
+ /* For ESP, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_ESP))
+ npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
+ 0, ~0ULL, 0, intf);
+
#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
do { \
if (features & BIT_ULL((field))) { \
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index e2153d47c373..5e15f4fc11e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -74,6 +74,16 @@ enum npa_af_int_vec_e {
NPA_AF_INT_VEC_CNT = 0x5,
};
+/* NIX Admin function Interrupt Vector Enumeration */
+enum nix_af_int_vec_e {
+ NIX_AF_INT_VEC_RVU = 0x0,
+ NIX_AF_INT_VEC_GEN = 0x1,
+ NIX_AF_INT_VEC_AQ_DONE = 0x2,
+ NIX_AF_INT_VEC_AF_ERR = 0x3,
+ NIX_AF_INT_VEC_POISON = 0x4,
+ NIX_AF_INT_VEC_CNT = 0x5,
+};
+
/**
* RVU PF Interrupt Vector Enumeration
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 73fb94dd5fbc..5ddedc3b754d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -270,14 +270,17 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
return err;
}
-int otx2_set_rss_table(struct otx2_nic *pfvf)
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ const int index = rss->rss_size * ctx_id;
struct mbox *mbox = &pfvf->mbox;
+ struct otx2_rss_ctx *rss_ctx;
struct nix_aq_enq_req *aq;
int idx, err;
mutex_lock(&mbox->lock);
+ rss_ctx = rss->rss_ctx[ctx_id];
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -297,10 +300,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
}
}
- aq->rss.rq = rss->ind_tbl[idx];
+ aq->rss.rq = rss_ctx->ind_tbl[idx];
/* Fill AQ info */
- aq->qidx = idx;
+ aq->qidx = index + idx;
aq->ctype = NIX_AQ_CTYPE_RSS;
aq->op = NIX_AQ_INSTOP_INIT;
}
@@ -335,9 +338,10 @@ void otx2_set_rss_key(struct otx2_nic *pfvf)
int otx2_rss_init(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_rss_ctx *rss_ctx;
int idx, ret = 0;
- rss->rss_size = sizeof(rss->ind_tbl);
+ rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
/* Init RSS key if it is not setup already */
if (!rss->enable)
@@ -345,13 +349,19 @@ int otx2_rss_init(struct otx2_nic *pfvf)
otx2_set_rss_key(pfvf);
if (!netif_is_rxfh_configured(pfvf->netdev)) {
- /* Default indirection table */
+ /* Set RSS group 0 as default indirection table */
+ rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
+ GFP_KERNEL);
+ if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
+ return -ENOMEM;
+
+ rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] =
+ rss_ctx->ind_tbl[idx] =
ethtool_rxfh_indir_default(idx,
pfvf->hw.rx_queues);
}
- ret = otx2_set_rss_table(pfvf);
+ ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
if (ret)
return ret;
@@ -478,10 +488,11 @@ dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
dma_addr_t iova;
u8 *buf;
- buf = napi_alloc_frag(pool->rbsize);
+ buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
if (unlikely(!buf))
return -ENOMEM;
+ buf = PTR_ALIGN(buf, OTX2_ALIGN);
iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
@@ -986,7 +997,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
nixlf->sq_cnt = pfvf->hw.tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
- nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
+ nixlf->rss_grps = MAX_RSS_GROUPS;
nixlf->xqe_sz = NIX_XQESZ_W16;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 103430400a8a..143ae04c8ad5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -51,13 +51,17 @@ enum arua_mapped_qtypes {
#define NIX_LF_POISON_VEC 0x82
/* RSS configuration */
+struct otx2_rss_ctx {
+ u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
+};
+
struct otx2_rss_info {
u8 enable;
u32 flowkey_cfg;
u16 rss_size;
- u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
u8 key[RSS_HASH_KEY_SIZE];
+ struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
};
/* NIX (or NPC) RX errors */
@@ -643,7 +647,7 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_rss_init(struct otx2_nic *pfvf);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
void otx2_set_rss_key(struct otx2_nic *pfvf);
-int otx2_set_rss_table(struct otx2_nic *pfvf);
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
/* Mbox handlers */
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
@@ -684,10 +688,11 @@ int otx2_get_flow(struct otx2_nic *pfvf,
int otx2_get_all_flows(struct otx2_nic *pfvf,
struct ethtool_rxnfc *nfc, u32 *rule_locs);
int otx2_add_flow(struct otx2_nic *pfvf,
- struct ethtool_rx_flow_spec *fsp);
+ struct ethtool_rxnfc *nfc);
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct npc_install_flow_req *req);
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 67171b66a56c..e0199f0e4a6c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -448,10 +448,14 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
- case AH_ESP_V6_FLOW:
+ break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -459,6 +463,7 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
default:
return -EINVAL;
}
+
return 0;
}
@@ -527,6 +532,36 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
return -EINVAL;
}
break;
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
+ NIX_FLOW_KEY_TYPE_AH);
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
+ NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ /* If VLAN hashing is also requested for ESP then do not
+ * allow because of hardware 40 bytes flow key limit.
+ */
+ if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
+ netdev_err(pfvf->netdev,
+ "RSS hash of ESP or AH with VLAN is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
+ /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
+ * and ESP SPI+sequence(8 bytes) uses hardware maximum
+ * limit of 40 byte flow key.
+ */
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
case IPV4_FLOW:
case IPV6_FLOW:
rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
@@ -581,7 +616,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
break;
case ETHTOOL_SRXCLSRLINS:
if (netif_running(dev) && ntuple)
- ret = otx2_add_flow(pfvf, &nfc->fs);
+ ret = otx2_add_flow(pfvf, nfc);
break;
case ETHTOOL_SRXCLSRLDEL:
if (netif_running(dev) && ntuple)
@@ -641,42 +676,50 @@ static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
-
- return pfvf->hw.rss_info.rss_size;
+ return MAX_RSS_INDIR_TBL_SIZE;
}
-/* Get RSS configuration */
-static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc)
+static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_info *rss;
- int idx;
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- rss = &pfvf->hw.rss_info;
+ otx2_rss_ctx_flow_del(pfvf, ctx_id);
+ kfree(rss->rss_ctx[ctx_id]);
+ rss->rss_ctx[ctx_id] = NULL;
- if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss->ind_tbl[idx];
- }
+ return 0;
+}
- if (hkey)
- memcpy(hkey, rss->key, sizeof(rss->key));
+static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
+ u32 *rss_context)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u8 ctx;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
+ if (!rss->rss_ctx[ctx])
+ break;
+ }
+ if (ctx == MAX_RSS_GROUPS)
+ return -EINVAL;
+
+ rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
+ if (!rss->rss_ctx[ctx])
+ return -ENOMEM;
+ *rss_context = ctx;
return 0;
}
-/* Configure RSS table and hash key */
-static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+/* RSS context configuration */
+static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc,
+ u32 *rss_context, bool delete)
{
struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
- int idx;
+ int ret, idx;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -688,20 +731,85 @@ static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
return -EIO;
}
+ if (hkey) {
+ memcpy(rss->key, hkey, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+ }
+ if (delete)
+ return otx2_rss_ctx_delete(pfvf, *rss_context);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = otx2_rss_ctx_create(pfvf, rss_context);
+ if (ret)
+ return ret;
+ }
if (indir) {
+ rss_ctx = rss->rss_ctx[*rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] = indir[idx];
+ rss_ctx->ind_tbl[idx] = indir[idx];
}
+ otx2_set_rss_table(pfvf, *rss_context);
- if (hkey) {
- memcpy(rss->key, hkey, sizeof(rss->key));
- otx2_set_rss_key(pfvf);
+ return 0;
+}
+
+static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc, u32 rss_context)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
+ struct otx2_rss_info *rss;
+ int idx, rx_queues;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
+ rx_queues = pfvf->hw.rx_queues;
+ for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
+ indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
+ return 0;
}
+ if (rss_context >= MAX_RSS_GROUPS)
+ return -ENOENT;
+
+ rss_ctx = rss->rss_ctx[rss_context];
+ if (!rss_ctx)
+ return -ENOENT;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss_ctx->ind_tbl[idx];
+ }
+ if (hkey)
+ memcpy(hkey, rss->key, sizeof(rss->key));
- otx2_set_rss_table(pfvf);
return 0;
}
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc)
+{
+ return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
+ DEFAULT_RSS_CONTEXT_GROUP);
+}
+
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
+
+ return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
+}
+
static u32 otx2_get_msglevel(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -771,6 +879,8 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
@@ -866,6 +976,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index be8ccfce1848..0dbbf38e0597 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+/* Marvell OcteonTx2 RVU Physical Function ethernet driver
*
* Copyright (C) 2020 Marvell.
*/
@@ -16,6 +16,7 @@ struct otx2_flow {
u32 location;
u16 entry;
bool is_vf;
+ u8 rss_ctx_id;
int vf;
};
@@ -245,6 +246,7 @@ int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
if (iter->location == location) {
nfc->fs = iter->flow_spec;
+ nfc->rss_context = iter->rss_ctx_id;
return 0;
}
}
@@ -270,14 +272,16 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
return err;
}
-static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
- struct npc_install_flow_req *req,
- u32 flow_type)
+static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
{
struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
@@ -297,10 +301,16 @@ static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip4dst));
req->features |= BIT_ULL(NPC_DIP_IPV4);
}
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
if (ipv4_l4_mask->ip4src) {
memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
sizeof(pkt->ip4src));
@@ -339,20 +349,60 @@ static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
else
req->features |= BIT_ULL(NPC_DPORT_SCTP);
}
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (ah_esp_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ah_esp_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tos & ah_esp_mask->tos))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
break;
default:
break;
}
+
+ return 0;
}
-static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
- struct npc_install_flow_req *req,
- u32 flow_type)
+static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
{
struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
@@ -372,10 +422,16 @@ static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip6dst));
req->features |= BIT_ULL(NPC_DIP_IPV6);
}
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
sizeof(pkt->ip6src));
@@ -414,10 +470,47 @@ static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
else
req->features |= BIT_ULL(NPC_DPORT_SCTP);
}
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
+ memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tclass & ah_esp_mask->tclass))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
default:
break;
}
+
+ return 0;
}
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
@@ -428,8 +521,9 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
u32 flow_type;
+ int ret;
- flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
switch (flow_type) {
/* bits not set in mask are don't care */
case ETHER_FLOW:
@@ -455,13 +549,21 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
- otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
break;
case IPV6_USER_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
- otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
break;
default:
return -EOPNOTSUPP;
@@ -532,9 +634,13 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
/* change to unicast only if action of default entry is not
* requested by user
*/
- if (req->op != NIX_RX_ACTION_DEFAULT)
+ if (flow->flow_spec.flow_type & FLOW_RSS) {
+ req->op = NIX_RX_ACTIONOP_RSS;
+ req->index = flow->rss_ctx_id;
+ } else {
req->op = NIX_RX_ACTIONOP_UCAST;
- req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ }
vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
if (vf > pci_num_vf(pfvf->pdev)) {
mutex_unlock(&pfvf->mbox.lock);
@@ -555,14 +661,16 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
-int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
+int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
- u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
bool new = false;
+ u32 ring;
int err;
+ ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
return -ENOMEM;
@@ -585,6 +693,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
/* struct copy */
flow->flow_spec = *fsp;
+ if (fsp->flow_type & FLOW_RSS)
+ flow->rss_ctx_id = nfc->rss_context;
+
err = otx2_add_flow_msg(pfvf, flow);
if (err) {
if (new)
@@ -647,6 +758,22 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
return 0;
}
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
+{
+ struct otx2_flow *flow, *tmp;
+ int err;
+
+ list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
+ if (flow->rss_ctx_id != ctx_id)
+ continue;
+ err = otx2_remove_flow(pfvf, flow->location);
+ if (err)
+ netdev_warn(pfvf->netdev,
+ "Can't delete the rule %d associated with this rss group err:%d",
+ flow->location, err);
+ }
+}
+
int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 634d60655a74..07ec85aebcca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+/* Marvell OcteonTx2 RVU Physical Function ethernet driver
*
* Copyright (C) 2020 Marvell International Ltd.
*
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 7d83e1f91ef1..8c2b03151736 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -580,16 +580,12 @@ int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
}
static int prestera_port_attr_br_flags_set(struct prestera_port *port,
- struct switchdev_trans *trans,
struct net_device *dev,
unsigned long flags)
{
struct prestera_bridge_port *br_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
@@ -608,35 +604,26 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port,
}
static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
- struct switchdev_trans *trans,
unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies);
struct prestera_switch *sw = port->sw;
- if (switchdev_trans_ph_prepare(trans)) {
- if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
- ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
- return -ERANGE;
- else
- return 0;
- }
+ if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
+ ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
+ return -ERANGE;
return prestera_hw_switch_ageing_set(sw, ageing_time_ms);
}
static int prestera_port_attr_br_vlan_set(struct prestera_port *port,
- struct switchdev_trans *trans,
struct net_device *dev,
bool vlan_enabled)
{
struct prestera_switch *sw = port->sw;
struct prestera_bridge *bridge;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge = prestera_bridge_by_dev(sw->swdev, dev);
if (WARN_ON(!bridge))
return -EINVAL;
@@ -665,19 +652,15 @@ static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port,
return 0;
}
-static int presterar_port_attr_stp_state_set(struct prestera_port *port,
- struct switchdev_trans *trans,
- struct net_device *dev,
- u8 state)
+static int prestera_port_attr_stp_state_set(struct prestera_port *port,
+ struct net_device *dev,
+ u8 state)
{
struct prestera_bridge_port *br_port;
struct prestera_bridge_vlan *br_vlan;
int err;
u16 vid;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
@@ -712,17 +695,15 @@ err_port_stp_set:
}
static int prestera_port_obj_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct prestera_port *port = netdev_priv(dev);
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = presterar_port_attr_stp_state_set(port, trans,
- attr->orig_dev,
- attr->u.stp_state);
+ err = prestera_port_attr_stp_state_set(port, attr->orig_dev,
+ attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
if (attr->u.brport_flags &
@@ -730,17 +711,15 @@ static int prestera_port_obj_attr_set(struct net_device *dev,
err = -EINVAL;
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = prestera_port_attr_br_flags_set(port, trans,
- attr->orig_dev,
+ err = prestera_port_attr_br_flags_set(port, attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = prestera_port_attr_br_ageing_set(port, trans,
+ err = prestera_port_attr_br_ageing_set(port,
attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- err = prestera_port_attr_br_vlan_set(port, trans,
- attr->orig_dev,
+ err = prestera_port_attr_br_vlan_set(port, attr->orig_dev,
attr->u.vlan_filtering);
break;
default:
@@ -1020,7 +999,6 @@ prestera_bridge_port_vlan_del(struct prestera_port *port,
static int prestera_port_vlans_add(struct prestera_port *port,
const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1029,14 +1007,10 @@ static int prestera_port_vlans_add(struct prestera_port *port,
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
struct prestera_bridge *bridge;
- u16 vid;
if (netif_is_bridge_master(dev))
return 0;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
if (WARN_ON(!br_port))
return -EINVAL;
@@ -1045,22 +1019,13 @@ static int prestera_port_vlans_add(struct prestera_port *port,
if (!bridge->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = prestera_bridge_port_vlan_add(port, br_port,
- vid, flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return prestera_bridge_port_vlan_add(port, br_port,
+ vlan->vid, flag_untagged,
+ flag_pvid, extack);
}
static int prestera_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
@@ -1069,7 +1034,7 @@ static int prestera_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- return prestera_port_vlans_add(port, vlan, trans, extack);
+ return prestera_port_vlans_add(port, vlan, extack);
default:
return -EOPNOTSUPP;
}
@@ -1081,7 +1046,6 @@ static int prestera_port_vlans_del(struct prestera_port *port,
struct net_device *dev = vlan->obj.orig_dev;
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
- u16 vid;
if (netif_is_bridge_master(dev))
return -EOPNOTSUPP;
@@ -1093,8 +1057,7 @@ static int prestera_port_vlans_del(struct prestera_port *port,
if (!br_port->bridge->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- prestera_bridge_port_vlan_del(port, br_port, vid);
+ prestera_bridge_port_vlan_del(port, br_port, vlan->vid);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 6d2d60675ffd..01d3ee4b5829 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -353,7 +353,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
/* Setup gmac */
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur;
- mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
/* Only update control register when needed! */
@@ -759,8 +759,8 @@ static void mtk_get_stats64(struct net_device *dev,
static inline int mtk_max_frag_size(int mtu)
{
/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
- if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
- mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+ if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
+ mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -771,7 +771,7 @@ static inline int mtk_max_buf_size(int frag_size)
int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
+ WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
return buf_size;
}
@@ -2499,6 +2499,35 @@ static void mtk_uninit(struct net_device *dev)
mtk_rx_irq_disable(eth, ~0);
}
+static int mtk_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int length = new_mtu + MTK_RX_ETH_HLEN;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
+
+ if (length <= 1518)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
+ else if (length <= 1536)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
+ else if (length <= 1552)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
+ else
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
+
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -2795,6 +2824,7 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_set_mac_address = mtk_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mtk_do_ioctl,
+ .ndo_change_mtu = mtk_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
.ndo_fix_features = mtk_fix_features,
@@ -2896,7 +2926,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
- eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+ else
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 454cfcd465fd..fd3cec8f06ba 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -17,12 +17,13 @@
#include <linux/phylink.h>
#define MTK_QDMA_PAGE_SIZE 2048
-#define MTK_MAX_RX_LENGTH 1536
+#define MTK_MAX_RX_LENGTH 1536
+#define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_DMA_SIZE 256
#define MTK_NAPI_WEIGHT 64
#define MTK_MAC_COUNT 2
-#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
@@ -320,7 +321,12 @@
/* Mac control registers */
#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
-#define MAC_MCR_MAX_RX_1536 BIT(24)
+#define MAC_MCR_MAX_RX_MASK GENMASK(25, 24)
+#define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24))
+#define MAC_MCR_MAX_RX_1518 0x0
+#define MAC_MCR_MAX_RX_1536 0x1
+#define MAC_MCR_MAX_RX_1552 0x2
+#define MAC_MCR_MAX_RX_2048 0x3
#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
#define MAC_MCR_FORCE_MODE BIT(15)
#define MAC_MCR_TX_EN BIT(14)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 32aad4d32b88..51b9700fce83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2839,8 +2839,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
@@ -2873,8 +2871,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c1c9118a66c9..e35e4d7ef4d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -682,8 +682,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
- xdp.rxq = &ring->xdp_rxq;
- xdp.frame_sz = priv->frag_info[0].frag_stride;
+ xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -777,10 +776,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp.data_hard_start = va - frags[0].page_offset;
- xdp.data = va;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + length;
+ xdp_prepare_buff(&xdp, va - frags[0].page_offset,
+ frags[0].page_offset, length, false);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 6e4d7bb7fea2..ad45d20f9d44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -203,3 +203,22 @@ config MLX5_SW_STEERING
default y
help
Build support for software-managed steering in the NIC.
+
+config MLX5_SF
+ bool "Mellanox Technologies subfunction device support using auxiliary device"
+ depends on MLX5_CORE && MLX5_CORE_EN
+ default n
+ help
+ Build support for subfuction device in the NIC. A Mellanox subfunction
+ device can support RDMA, netdevice and vdpa device.
+ It is similar to a SRIOV VF but it doesn't require SRIOV support.
+
+config MLX5_SF_MANAGER
+ bool
+ depends on MLX5_SF && MLX5_ESWITCH
+ default y
+ help
+ Build support for subfuction port in the NIC. A Mellanox subfunction
+ port is managed through devlink. A subfunction supports RDMA, netdevice
+ and vdpa device. It is similar to a SRIOV VF but it doesn't require
+ SRIOV support.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 77961643d5a9..f61ee7110243 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -16,7 +16,8 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
- diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o fw_reset.o
+ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
+ fw_reset.o qos.o
#
# Netdev basic
@@ -25,7 +26,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
- en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o
+ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
+ en/qos.o en/trap.o
#
# Netdev extra
@@ -83,5 +85,15 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_matcher.o steering/dr_rule.o \
steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
+ steering/dr_ste_v0.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
+#
+# SF device
+#
+mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o
+
+#
+# SF manager
+#
+mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 50c7b9ee80c3..e8cecd50558d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -333,6 +333,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
+ case MLX5_CMD_OP_DEALLOC_SF:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -464,6 +465,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_ALLOC_MEMIC:
case MLX5_CMD_OP_MODIFY_XRQ:
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+ case MLX5_CMD_OP_QUERY_VHCA_STATE:
+ case MLX5_CMD_OP_MODIFY_VHCA_STATE:
+ case MLX5_CMD_OP_ALLOC_SF:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -657,6 +661,10 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(ALLOC_SF);
+ MLX5_COMMAND_STR_CASE(DEALLOC_SF);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3261d0dc1104..aa76a6e0dae8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -7,6 +7,8 @@
#include "fw_reset.h"
#include "fs_core.h"
#include "eswitch.h"
+#include "sf/dev/dev.h"
+#include "sf/sf.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
@@ -127,6 +129,17 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ bool sf_dev_allocated;
+
+ sf_dev_allocated = mlx5_sf_dev_allocated(dev);
+ if (sf_dev_allocated) {
+ /* Reload results in deleting SF device which further results in
+ * unregistering devlink instance while holding devlink_mutext.
+ * Hence, do not support reload.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n");
+ return -EOPNOTSUPP;
+ }
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
@@ -168,6 +181,91 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
return 0;
}
+static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
+{
+ struct mlx5_devlink_trap *dl_trap;
+
+ list_for_each_entry(dl_trap, &dev->priv.traps, list)
+ if (dl_trap->trap.id == trap_id)
+ return dl_trap;
+
+ return NULL;
+}
+
+static int mlx5_devlink_trap_init(struct devlink *devlink, const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = kzalloc(sizeof(*dl_trap), GFP_KERNEL);
+ if (!dl_trap)
+ return -ENOMEM;
+
+ dl_trap->trap.id = trap->id;
+ dl_trap->trap.action = DEVLINK_TRAP_ACTION_DROP;
+ dl_trap->item = trap_ctx;
+
+ if (mlx5_find_trap_by_id(dev, trap->id)) {
+ kfree(dl_trap);
+ mlx5_core_err(dev, "Devlink trap: Trap 0x%x already found", trap->id);
+ return -EEXIST;
+ }
+
+ list_add_tail(&dl_trap->list, &dev->priv.traps);
+ return 0;
+}
+
+static void mlx5_devlink_trap_fini(struct devlink *devlink, const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap->id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Missing trap id 0x%x", trap->id);
+ return;
+ }
+ list_del(&dl_trap->list);
+ kfree(dl_trap);
+}
+
+static int mlx5_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ enum devlink_trap_action action_orig;
+ struct mlx5_devlink_trap *dl_trap;
+ int err = 0;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap->id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (action == dl_trap->trap.action)
+ goto out;
+
+ action_orig = dl_trap->trap.action;
+ dl_trap->trap.action = action;
+ err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
+ &dl_trap->trap);
+ if (err)
+ dl_trap->trap.action = action_orig;
+out:
+ return err;
+}
+
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
@@ -179,6 +277,12 @@ static const struct devlink_ops mlx5_devlink_ops = {
.port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get,
.port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set,
#endif
+#ifdef CONFIG_MLX5_SF_MANAGER
+ .port_new = mlx5_devlink_sf_port_new,
+ .port_del = mlx5_devlink_sf_port_del,
+ .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
+ .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
+#endif
.flash_update = mlx5_devlink_flash_update,
.info_get = mlx5_devlink_info_get,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
@@ -186,8 +290,59 @@ static const struct devlink_ops mlx5_devlink_ops = {
.reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = mlx5_devlink_reload_down,
.reload_up = mlx5_devlink_reload_up,
+ .trap_init = mlx5_devlink_trap_init,
+ .trap_fini = mlx5_devlink_trap_fini,
+ .trap_action_set = mlx5_devlink_trap_action_set,
};
+void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
+ struct devlink_port *dl_port)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap_id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Report on invalid trap id 0x%x", trap_id);
+ return;
+ }
+
+ if (dl_trap->trap.action != DEVLINK_TRAP_ACTION_TRAP) {
+ mlx5_core_dbg(dev, "Devlink trap: Trap id %d has action %d", trap_id,
+ dl_trap->trap.action);
+ return;
+ }
+ devlink_trap_report(devlink, skb, dl_trap->item, dl_port, NULL);
+}
+
+int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devlink_trap *dl_trap;
+ int count = 0;
+
+ list_for_each_entry(dl_trap, &dev->priv.traps, list)
+ if (dl_trap->trap.action == DEVLINK_TRAP_ACTION_TRAP)
+ count++;
+
+ return count;
+}
+
+int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
+ enum devlink_trap_action *action)
+{
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap_id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Get action on invalid trap id 0x%x",
+ trap_id);
+ return -EINVAL;
+ }
+
+ *action = dl_trap->trap.action;
+ return 0;
+}
+
struct devlink *mlx5_devlink_alloc(void)
{
return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev));
@@ -358,6 +513,49 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
#endif
}
+#define MLX5_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT)
+
+static const struct devlink_trap mlx5_traps_arr[] = {
+ MLX5_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ MLX5_TRAP_DROP(DMAC_FILTER, L2_DROPS),
+};
+
+static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
+};
+
+static int mlx5_devlink_traps_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *core_dev = devlink_priv(devlink);
+ int err;
+
+ err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
+ if (err)
+ return err;
+
+ err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
+ &core_dev->priv);
+ if (err)
+ goto err_trap_group;
+ return 0;
+
+err_trap_group:
+ devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
+ return err;
+}
+
+static void mlx5_devlink_traps_unregister(struct devlink *devlink)
+{
+ devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
+ devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
+}
+
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
{
int err;
@@ -372,8 +570,16 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
goto params_reg_err;
mlx5_devlink_set_params_init_values(devlink);
devlink_params_publish(devlink);
+
+ err = mlx5_devlink_traps_register(devlink);
+ if (err)
+ goto traps_reg_err;
+
return 0;
+traps_reg_err:
+ devlink_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
params_reg_err:
devlink_unregister(devlink);
return err;
@@ -381,6 +587,7 @@ params_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
+ mlx5_devlink_traps_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
devlink_unregister(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index f0de327a59be..eff107dad922 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -12,6 +12,24 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
};
+struct mlx5_trap_ctx {
+ int id;
+ int action;
+};
+
+struct mlx5_devlink_trap {
+ struct mlx5_trap_ctx trap;
+ void *item;
+ struct list_head list;
+};
+
+struct mlx5_core_dev;
+void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
+ struct devlink_port *dl_port);
+int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev);
+int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
+ enum devlink_trap_action *action);
+
struct devlink *mlx5_devlink_alloc(void);
void mlx5_devlink_free(struct devlink *devlink);
int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 055baf3b6cb1..39f389cc40fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -55,6 +55,7 @@
#include "en_stats.h"
#include "en/dcbnl.h"
#include "en/fs.h"
+#include "en/qos.h"
#include "lib/hv_vhca.h"
extern const struct net_device_ops mlx5e_netdev_ops;
@@ -161,6 +162,9 @@ do { \
##__VA_ARGS__); \
} while (0)
+#define mlx5e_state_dereference(priv, p) \
+ rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
+
enum mlx5e_rq_group {
MLX5E_RQ_GROUP_REGULAR,
MLX5E_RQ_GROUP_XSK,
@@ -560,6 +564,7 @@ typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
+void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
enum mlx5e_rq_flag {
MLX5E_RQ_FLAG_XDP_XMIT,
@@ -663,11 +668,13 @@ struct mlx5e_channel {
struct mlx5e_xdpsq rq_xdpsq;
struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_icosq icosq; /* internal control operations */
+ struct mlx5e_txqsq __rcu * __rcu *qos_sqs;
bool xdp;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
__be32 mkey_be;
+ u16 qos_sqs_size;
u8 num_tc;
u8 lag_port;
@@ -756,6 +763,8 @@ struct mlx5e_modify_sq_param {
int next_state;
int rl_update;
int rl_index;
+ bool qos_update;
+ u16 qos_queue_group_id;
};
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
@@ -788,10 +797,22 @@ struct mlx5e_scratchpad {
cpumask_var_t cpumask;
};
+struct mlx5e_htb {
+ DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
+ DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
+ struct mlx5e_sq_stats **qos_sq_stats;
+ u16 max_qos_sqs;
+ u16 maj_id;
+ u16 defcls;
+};
+
+struct mlx5e_trap;
+
struct mlx5e_priv {
/* priv data path fields - start */
/* +1 for port ptp ts */
- struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC];
+ struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC +
+ MLX5E_QOS_MAX_LEAF_NODES];
int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -826,8 +847,10 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev;
struct net_device *netdev;
+ struct mlx5e_trap *en_trap;
struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_channel_stats trap_stats;
struct mlx5e_port_ptp_stats port_ptp_stats;
u16 max_nch;
u8 max_opened_tc;
@@ -836,6 +859,7 @@ struct mlx5e_priv {
u16 q_counter;
u16 drop_rq_q_counter;
struct notifier_block events_nb;
+ struct notifier_block blocking_events_nb;
int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info;
@@ -859,6 +883,7 @@ struct mlx5e_priv {
struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif
struct mlx5e_scratchpad scratchpad;
+ struct mlx5e_htb htb;
};
struct mlx5e_rx_handlers {
@@ -942,6 +967,8 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
+void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
@@ -986,6 +1013,7 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_preactivate preactivate,
void *context);
+int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
@@ -1010,6 +1038,9 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
+int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
+ struct mlx5e_params *params, struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
@@ -1020,8 +1051,10 @@ struct mlx5e_create_sq_param;
int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
+ u16 qos_queue_group_id,
u32 *sqn);
void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
+void mlx5e_close_txqsq(struct mlx5e_txqsq *sq);
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
@@ -1047,6 +1080,8 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
+int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
+void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 5749557749b0..a16297e7e2ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -44,6 +44,11 @@ struct mlx5e_l2_rule {
#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+struct mlx5e_promisc_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *rule;
+};
+
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
@@ -53,6 +58,7 @@ struct mlx5e_vlan_table {
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
+ struct mlx5_flow_handle *trap_rule;
bool cvlan_filter_disabled;
};
@@ -62,7 +68,7 @@ struct mlx5e_l2_table {
struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
struct mlx5e_l2_rule broadcast;
struct mlx5e_l2_rule allmulti;
- struct mlx5e_l2_rule promisc;
+ struct mlx5_flow_handle *trap_rule;
bool broadcast_enabled;
bool allmulti_enabled;
bool promisc_enabled;
@@ -126,7 +132,8 @@ struct mlx5e_ttc_table {
/* NIC prio FTS */
enum {
- MLX5E_VLAN_FT_LEVEL = 0,
+ MLX5E_PROMISC_FT_LEVEL,
+ MLX5E_VLAN_FT_LEVEL,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_INNER_TTC_FT_LEVEL,
@@ -241,6 +248,7 @@ struct mlx5e_flow_steering {
struct mlx5e_ethtool_steering ethtool;
#endif
struct mlx5e_tc_table tc;
+ struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc;
@@ -288,6 +296,10 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt);
+int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
+void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
+int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
+void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 718f8c0a4f6b..84e501e057b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -273,7 +273,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
if (err)
- return err;
+ goto free_page;
cmd = mlx5_rsc_dump_cmd_create(mdev, key);
if (IS_ERR(cmd)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 807147d97a0f..ea2cfb04b31a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -118,6 +118,8 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_rq_param *param);
void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
struct mlx5e_sq_param *param);
+void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param);
void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 2a2bac30daaa..eeddd1137dda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -261,7 +261,7 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn,
csp.min_inline_mode = txqsq->min_inline_mode;
csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
- err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, &txqsq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
if (err)
goto err_free_txqsq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
new file mode 100644
index 000000000000..12d7ad061237
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -0,0 +1,984 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "en.h"
+#include "params.h"
+#include "../qos.h"
+
+#define BYTES_IN_MBIT 125000
+
+int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
+{
+ return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
+}
+
+int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv)
+{
+ int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
+
+ return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1;
+}
+
+/* Software representation of the QoS tree (internal to this file) */
+
+static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
+{
+ int size = mlx5e_qos_max_leaf_nodes(priv->mdev);
+ int res;
+
+ WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
+ res = find_first_zero_bit(priv->htb.qos_used_qids, size);
+
+ return res == size ? -ENOSPC : res;
+}
+
+struct mlx5e_qos_node {
+ struct hlist_node hnode;
+ struct rcu_head rcu;
+ struct mlx5e_qos_node *parent;
+ u64 rate;
+ u32 bw_share;
+ u32 max_average_bw;
+ u32 hw_id;
+ u32 classid; /* 16-bit, except root. */
+ u16 qid;
+};
+
+#define MLX5E_QOS_QID_INNER 0xffff
+#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
+
+static struct mlx5e_qos_node *
+mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid,
+ struct mlx5e_qos_node *parent)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->parent = parent;
+
+ node->qid = qid;
+ __set_bit(qid, priv->htb.qos_used_qids);
+
+ node->classid = classid;
+ hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid);
+
+ mlx5e_update_tx_netdev_queues(priv);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->qid = MLX5E_QOS_QID_INNER;
+ node->classid = MLX5E_HTB_CLASSID_ROOT;
+ hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
+{
+ hash_del_rcu(&node->hnode);
+ if (node->qid != MLX5E_QOS_QID_INNER) {
+ __clear_bit(node->qid, priv->htb.qos_used_qids);
+ mlx5e_update_tx_netdev_queues(priv);
+ }
+ kfree_rcu(node, rcu);
+}
+
+/* TX datapath API */
+
+static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
+{
+ /* These channel params are safe to access from the datapath, because:
+ * 1. This function is called only after checking priv->htb.maj_id != 0,
+ * and the number of queues can't change while HTB offload is active.
+ * 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for
+ * mlx5e_select_queue to finish while holding priv->state_lock,
+ * preventing other code from changing the number of queues.
+ */
+ bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS);
+
+ return (chs->params.num_channels + is_ptp) * chs->params.num_tc + qid;
+}
+
+int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid)
+{
+ struct mlx5e_qos_node *node;
+ u16 qid;
+ int res;
+
+ rcu_read_lock();
+
+ node = mlx5e_sw_node_find_rcu(priv, classid);
+ if (!node) {
+ res = -ENOENT;
+ goto out;
+ }
+ qid = READ_ONCE(node->qid);
+ if (qid == MLX5E_QOS_QID_INNER) {
+ res = -EINVAL;
+ goto out;
+ }
+ res = mlx5e_qid_from_qos(&priv->channels, qid);
+
+out:
+ rcu_read_unlock();
+ return res;
+}
+
+static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
+{
+ struct mlx5e_params *params = &priv->channels.params;
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ struct mlx5e_channel *c;
+ int ix;
+
+ ix = qid % params->num_channels;
+ qid /= params->num_channels;
+ c = priv->channels.c[ix];
+
+ qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
+ return mlx5e_state_dereference(priv, qos_sqs[qid]);
+}
+
+/* SQ lifecycle */
+
+static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ struct mlx5e_qos_node *node)
+{
+ struct mlx5e_create_cq_param ccp = {};
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ struct mlx5e_sq_param param_sq;
+ struct mlx5e_cq_param param_cq;
+ int txq_ix, ix, qid, err = 0;
+ struct mlx5e_params *params;
+ struct mlx5e_channel *c;
+ struct mlx5e_txqsq *sq;
+
+ params = &chs->params;
+
+ txq_ix = mlx5e_qid_from_qos(chs, node->qid);
+
+ WARN_ON(node->qid > priv->htb.max_qos_sqs);
+ if (node->qid == priv->htb.max_qos_sqs) {
+ struct mlx5e_sq_stats *stats, **stats_list = NULL;
+
+ if (priv->htb.max_qos_sqs == 0) {
+ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+ sizeof(*stats_list),
+ GFP_KERNEL);
+ if (!stats_list)
+ return -ENOMEM;
+ }
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats) {
+ kvfree(stats_list);
+ return -ENOMEM;
+ }
+ if (stats_list)
+ WRITE_ONCE(priv->htb.qos_sq_stats, stats_list);
+ WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats);
+ /* Order max_qos_sqs increment after writing the array pointer.
+ * Pairs with smp_load_acquire in en_stats.c.
+ */
+ smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1);
+ }
+
+ ix = node->qid % params->num_channels;
+ qid = node->qid / params->num_channels;
+ c = chs->c[ix];
+
+ qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
+ sq = kzalloc(sizeof(*sq), GFP_KERNEL);
+
+ if (!sq)
+ return -ENOMEM;
+
+ mlx5e_build_create_cq_param(&ccp, c);
+
+ memset(&param_sq, 0, sizeof(param_sq));
+ memset(&param_cq, 0, sizeof(param_cq));
+ mlx5e_build_sq_param(priv, params, &param_sq);
+ mlx5e_build_tx_cq_param(priv, params, &param_cq);
+ err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
+ if (err)
+ goto err_free_sq;
+ err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
+ &param_sq, sq, 0, node->hw_id, node->qid);
+ if (err)
+ goto err_close_cq;
+
+ rcu_assign_pointer(qos_sqs[qid], sq);
+
+ return 0;
+
+err_close_cq:
+ mlx5e_close_cq(&sq->cq);
+err_free_sq:
+ kfree(sq);
+ return err;
+}
+
+static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
+{
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_get_qos_sq(priv, node->qid);
+
+ WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq);
+
+ /* Make the change to txq2sq visible before the queue is started.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
+
+ qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node->qid);
+ mlx5e_activate_txqsq(sq);
+}
+
+static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
+{
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_get_qos_sq(priv, qid);
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ return;
+
+ qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
+ mlx5e_deactivate_txqsq(sq);
+
+ /* The queue is disabled, no synchronization with datapath is needed. */
+ priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+}
+
+static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
+{
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ struct mlx5e_params *params;
+ struct mlx5e_channel *c;
+ struct mlx5e_txqsq *sq;
+ int ix;
+
+ params = &priv->channels.params;
+
+ ix = qid % params->num_channels;
+ qid /= params->num_channels;
+ c = priv->channels.c[ix];
+ qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
+ sq = rcu_replace_pointer(qos_sqs[qid], NULL, lockdep_is_held(&priv->state_lock));
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ return;
+
+ synchronize_rcu(); /* Sync with NAPI. */
+
+ mlx5e_close_txqsq(sq);
+ mlx5e_close_cq(&sq->cq);
+ kfree(sq);
+}
+
+void mlx5e_qos_close_queues(struct mlx5e_channel *c)
+{
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ int i;
+
+ qos_sqs = rcu_replace_pointer(c->qos_sqs, NULL, lockdep_is_held(&c->priv->state_lock));
+ if (!qos_sqs)
+ return;
+ synchronize_rcu(); /* Sync with NAPI. */
+
+ for (i = 0; i < c->qos_sqs_size; i++) {
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ continue;
+
+ mlx5e_close_txqsq(sq);
+ mlx5e_close_cq(&sq->cq);
+ kfree(sq);
+ }
+
+ kvfree(qos_sqs);
+}
+
+static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++)
+ mlx5e_qos_close_queues(chs->c[i]);
+}
+
+static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+{
+ u16 qos_sqs_size;
+ int i;
+
+ qos_sqs_size = DIV_ROUND_UP(mlx5e_qos_max_leaf_nodes(priv->mdev), chs->num);
+
+ for (i = 0; i < chs->num; i++) {
+ struct mlx5e_txqsq **sqs;
+
+ sqs = kvcalloc(qos_sqs_size, sizeof(struct mlx5e_txqsq *), GFP_KERNEL);
+ if (!sqs)
+ goto err_free;
+
+ WRITE_ONCE(chs->c[i]->qos_sqs_size, qos_sqs_size);
+ smp_wmb(); /* Pairs with mlx5e_napi_poll. */
+ rcu_assign_pointer(chs->c[i]->qos_sqs, sqs);
+ }
+
+ return 0;
+
+err_free:
+ while (--i >= 0) {
+ struct mlx5e_txqsq **sqs;
+
+ sqs = rcu_replace_pointer(chs->c[i]->qos_sqs, NULL,
+ lockdep_is_held(&priv->state_lock));
+
+ synchronize_rcu(); /* Sync with NAPI. */
+ kvfree(sqs);
+ }
+ return -ENOMEM;
+}
+
+int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt, err;
+
+ if (!priv->htb.maj_id)
+ return 0;
+
+ err = mlx5e_qos_alloc_queues(priv, chs);
+ if (err)
+ return err;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
+ if (node->qid == MLX5E_QOS_QID_INNER)
+ continue;
+ err = mlx5e_open_qos_sq(priv, chs, node);
+ if (err) {
+ mlx5e_qos_close_all_queues(chs);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
+ if (node->qid == MLX5E_QOS_QID_INNER)
+ continue;
+ mlx5e_activate_qos_sq(priv, node);
+ }
+}
+
+void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
+{
+ struct mlx5e_params *params = &c->priv->channels.params;
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ int i;
+
+ qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs);
+ if (!qos_sqs)
+ return;
+
+ for (i = 0; i < c->qos_sqs_size; i++) {
+ u16 qid = params->num_channels * i + c->ix;
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ continue;
+
+ qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
+ mlx5e_deactivate_txqsq(sq);
+
+ /* The queue is disabled, no synchronization with datapath is needed. */
+ c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL;
+ }
+}
+
+static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++)
+ mlx5e_qos_deactivate_queues(chs->c[i]);
+}
+
+/* HTB API */
+
+int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *root;
+ bool opened;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
+
+ if (!mlx5_qos_is_supported(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
+ }
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ err = mlx5e_qos_alloc_queues(priv, &priv->channels);
+ if (err)
+ return err;
+ }
+
+ root = mlx5e_sw_node_create_root(priv);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto err_free_queues;
+ }
+
+ err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
+ goto err_sw_node_delete;
+ }
+
+ WRITE_ONCE(priv->htb.defcls, htb_defcls);
+ /* Order maj_id after defcls - pairs with
+ * mlx5e_select_queue/mlx5e_select_htb_queues.
+ */
+ smp_store_release(&priv->htb.maj_id, htb_maj_id);
+
+ return 0;
+
+err_sw_node_delete:
+ mlx5e_sw_node_delete(priv, root);
+
+err_free_queues:
+ if (opened)
+ mlx5e_qos_close_all_queues(&priv->channels);
+ return err;
+}
+
+int mlx5e_htb_root_del(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qos_node *root;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
+
+ WRITE_ONCE(priv->htb.maj_id, 0);
+ synchronize_rcu(); /* Sync with mlx5e_select_htb_queue and TX data path. */
+
+ root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
+ if (!root) {
+ qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
+ return -ENOENT;
+ }
+ err = mlx5_qos_destroy_node(priv->mdev, root->hw_id);
+ if (err)
+ qos_err(priv->mdev, "Failed to destroy root node %u, err = %d\n",
+ root->hw_id, err);
+ mlx5e_sw_node_delete(priv, root);
+
+ mlx5e_qos_deactivate_all_queues(&priv->channels);
+ mlx5e_qos_close_all_queues(&priv->channels);
+
+ return err;
+}
+
+static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
+ struct mlx5e_qos_node *parent, u32 *bw_share)
+{
+ u64 share = 0;
+
+ while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
+ parent = parent->parent;
+
+ if (parent->max_average_bw)
+ share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
+ parent->max_average_bw);
+ else
+ share = 101;
+
+ *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
+
+ qos_dbg(priv->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
+ rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
+
+ return 0;
+}
+
+static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
+{
+ *max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
+
+ qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
+ ceil, *max_average_bw);
+}
+
+int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ int qid;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
+ classid, parent_classid, rate, ceil);
+
+ qid = mlx5e_find_unused_qos_qid(priv);
+ if (qid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
+ return qid;
+ }
+
+ parent = mlx5e_sw_node_find(priv, parent_classid);
+ if (!parent)
+ return -EINVAL;
+
+ node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share);
+ mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ mlx5e_sw_node_delete(priv, node);
+ return err;
+ }
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node);
+ }
+ }
+
+ return mlx5e_qid_from_qos(&priv->channels, node->qid);
+}
+
+int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *child;
+ int err, tmp_err;
+ u32 new_hw_id;
+ u16 qid;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
+ classid, child_classid, rate, ceil);
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
+ qos_err(priv->mdev, "Failed to create an inner node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ /* Intentionally reuse the qid for the upcoming first child. */
+ child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node);
+ if (IS_ERR(child)) {
+ err = PTR_ERR(child);
+ goto err_destroy_hw_node;
+ }
+
+ child->rate = rate;
+ mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share);
+ mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share,
+ child->max_average_bw, &child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ goto err_delete_sw_node;
+ }
+
+ /* No fail point. */
+
+ qid = node->qid;
+ /* Pairs with mlx5e_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, child);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, child);
+ }
+ }
+
+ return 0;
+
+err_delete_sw_node:
+ child->qid = MLX5E_QOS_QID_INNER;
+ mlx5e_sw_node_delete(priv, child);
+
+err_destroy_hw_node:
+ tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id);
+ if (tmp_err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
+ new_hw_id, classid, tmp_err);
+ return err;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode)
+ if (node->qid == qid)
+ break;
+
+ return node;
+}
+
+static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
+{
+ qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
+ netdev_tx_reset_queue(txq);
+ netif_tx_start_queue(txq);
+}
+
+static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
+{
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+
+ if (!qdisc)
+ return;
+
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+}
+
+int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
+ u16 *new_qid, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node;
+ struct netdev_queue *txq;
+ u16 qid, moved_qid;
+ bool opened;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", classid);
+
+ *old_qid = *new_qid = 0;
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ /* Store qid for reuse. */
+ qid = node->qid;
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ txq = netdev_get_tx_queue(priv->netdev,
+ mlx5e_qid_from_qos(&priv->channels, qid));
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ mlx5e_sw_node_delete(priv, node);
+
+ moved_qid = mlx5e_qos_cur_leaf_nodes(priv);
+
+ if (moved_qid == 0) {
+ /* The last QoS SQ was just destroyed. */
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+ moved_qid--;
+
+ if (moved_qid < qid) {
+ /* The highest QoS SQ was just destroyed. */
+ WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
+ qid, moved_qid);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+
+ WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
+ qos_dbg(priv->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
+
+ node = mlx5e_sw_node_find_by_qid(priv, moved_qid);
+ WARN(!node, "Could not find a node with qid %u to move to queue %u",
+ moved_qid, qid);
+
+ /* Stop traffic to the old queue. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+ __clear_bit(moved_qid, priv->htb.qos_used_qids);
+
+ if (opened) {
+ txq = netdev_get_tx_queue(priv->netdev,
+ mlx5e_qid_from_qos(&priv->channels, moved_qid));
+ mlx5e_deactivate_qos_sq(priv, moved_qid);
+ mlx5e_close_qos_sq(priv, moved_qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(priv->netdev, moved_qid);
+
+ __set_bit(qid, priv->htb.qos_used_qids);
+ WRITE_ONCE(node->qid, qid);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
+ node->classid, moved_qid, qid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node);
+ }
+ }
+
+ mlx5e_update_tx_netdev_queues(priv);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
+
+ *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid);
+ *new_qid = mlx5e_qid_from_qos(&priv->channels, qid);
+ return 0;
+}
+
+int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ u32 old_hw_id, new_hw_id;
+ int err, saved_err = 0;
+ u16 qid;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
+ force ? "_FORCE" : "", classid);
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id,
+ node->parent->bw_share,
+ node->parent->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ if (!force)
+ return err;
+ saved_err = err;
+ }
+
+ /* Store qid for reuse and prevent clearing the bit. */
+ qid = node->qid;
+ /* Pairs with mlx5e_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(priv->netdev, qid);
+
+ err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ parent = node->parent;
+ mlx5e_sw_node_delete(priv, node);
+
+ node = parent;
+ WRITE_ONCE(node->qid, qid);
+
+ /* Early return on error in force mode. Parent will still be an inner
+ * node to be deleted by a following delete operation.
+ */
+ if (saved_err)
+ return saved_err;
+
+ old_hw_id = node->hw_id;
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node);
+ }
+ }
+
+ err = mlx5_qos_destroy_node(priv->mdev, old_hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ return 0;
+}
+
+static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *child;
+ int err = 0;
+ int bkt;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) {
+ u32 old_bw_share = child->bw_share;
+ int err_one;
+
+ if (child->parent != node)
+ continue;
+
+ mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share);
+ if (child->bw_share == old_bw_share)
+ continue;
+
+ err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share,
+ child->max_average_bw, child->hw_id);
+ if (!err && err_one) {
+ err = err_one;
+
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
+ qos_err(priv->mdev, "Failed to modify a child node (class %04x), err = %d\n",
+ node->classid, err);
+ }
+ }
+
+ return err;
+}
+
+int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ u32 bw_share, max_average_bw;
+ struct mlx5e_qos_node *node;
+ bool ceil_changed = false;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
+ classid, rate, ceil);
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share);
+ mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw);
+
+ err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share,
+ max_average_bw, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+ qos_err(priv->mdev, "Failed to modify a node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ if (max_average_bw != node->max_average_bw)
+ ceil_changed = true;
+
+ node->bw_share = bw_share;
+ node->max_average_bw = max_average_bw;
+
+ if (ceil_changed)
+ err = mlx5e_qos_update_children(priv, node, extack);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
new file mode 100644
index 000000000000..5af7991fcd19
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5E_EN_QOS_H
+#define __MLX5E_EN_QOS_H
+
+#include <linux/mlx5/driver.h>
+
+#define MLX5E_QOS_MAX_LEAF_NODES 256
+
+struct mlx5e_priv;
+struct mlx5e_channels;
+struct mlx5e_channel;
+
+int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
+int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
+
+/* TX datapath API */
+int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
+struct mlx5e_txqsq *mlx5e_get_sq(struct mlx5e_priv *priv, int qid);
+
+/* SQ lifecycle */
+int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
+void mlx5e_qos_activate_queues(struct mlx5e_priv *priv);
+void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c);
+void mlx5e_qos_close_queues(struct mlx5e_channel *c);
+
+/* HTB API */
+int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack);
+int mlx5e_htb_root_del(struct mlx5e_priv *priv);
+int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
+ u16 *new_qid, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
+ struct netlink_ext_ack *extack);
+int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index d29af7b9c695..76177f7c5ec2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!reg_c0)
return true;
+ /* If reg_c0 is not equal to the default flow tag then skb->mark
+ * is not supported and must be reset back to 0.
+ */
+ skb->mark = 0;
+
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index e521254d886e..e417904ae17f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -27,6 +27,7 @@
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
#define MLX5_CT_STATE_TRK_BIT BIT(2)
#define MLX5_CT_STATE_NAT_BIT BIT(3)
+#define MLX5_CT_STATE_REPLY_BIT BIT(4)
#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
@@ -118,16 +119,17 @@ struct mlx5_ct_tuple {
u16 zone;
};
-struct mlx5_ct_shared_counter {
+struct mlx5_ct_counter {
struct mlx5_fc *counter;
refcount_t refcount;
+ bool is_shared;
};
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
- struct mlx5_ct_shared_counter *shared_counter;
+ struct mlx5_ct_counter *counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
@@ -166,6 +168,12 @@ static const struct rhashtable_params tuples_nat_ht_params = {
.min_size = 16 * 1024,
};
+static bool
+mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+{
+ return !!(entry->tuple_nat_node.next);
+}
+
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
@@ -394,13 +402,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
-mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
{
- if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+ if (entry->counter->is_shared &&
+ !refcount_dec_and_test(&entry->counter->refcount))
return;
- mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
- kfree(entry->shared_counter);
+ mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
+ kfree(entry->counter);
}
static void
@@ -633,6 +642,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
}
ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT;
+ ct_state |= meta->ct_metadata.orig_dir ? 0 : MLX5_CT_STATE_REPLY_BIT;
err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
ct_state,
meta->ct_metadata.mark,
@@ -699,13 +709,11 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_ft = ct_priv->post_ct;
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
- attr->counter = entry->shared_counter->counter;
+ attr->counter = entry->counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
- mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
- entry->tuple.zone & MLX5_CT_ZONE_MASK,
- MLX5_CT_ZONE_MASK);
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
if (IS_ERR(zone_rule->rule)) {
@@ -732,13 +740,34 @@ err_attr:
return err;
}
-static struct mlx5_ct_shared_counter *
+static struct mlx5_ct_counter *
+mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
+{
+ struct mlx5_ct_counter *counter;
+ int ret;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ counter->is_shared = false;
+ counter->counter = mlx5_fc_create(ct_priv->dev, true);
+ if (IS_ERR(counter->counter)) {
+ ct_dbg("Failed to create counter for ct entry");
+ ret = PTR_ERR(counter->counter);
+ kfree(counter);
+ return ERR_PTR(ret);
+ }
+
+ return counter;
+}
+
+static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
struct mlx5_ct_tuple rev_tuple = entry->tuple;
- struct mlx5_ct_shared_counter *shared_counter;
- struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
__be16 tmp_port;
int ret;
@@ -767,25 +796,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
tuples_ht_params);
if (rev_entry) {
- if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+ if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
mutex_unlock(&ct_priv->shared_counter_lock);
- return rev_entry->shared_counter;
+ return rev_entry->counter;
}
}
mutex_unlock(&ct_priv->shared_counter_lock);
- shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
- if (!shared_counter)
- return ERR_PTR(-ENOMEM);
-
- shared_counter->counter = mlx5_fc_create(dev, true);
- if (IS_ERR(shared_counter->counter)) {
- ct_dbg("Failed to create counter for ct entry");
- ret = PTR_ERR(shared_counter->counter);
- kfree(shared_counter);
+ shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+ if (IS_ERR(shared_counter)) {
+ ret = PTR_ERR(shared_counter);
return ERR_PTR(ret);
}
+ shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1);
return shared_counter;
}
@@ -798,10 +822,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
{
int err;
- entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
- if (IS_ERR(entry->shared_counter)) {
- err = PTR_ERR(entry->shared_counter);
- ct_dbg("Failed to create counter for ct entry");
+ if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
+ entry->counter = mlx5_tc_ct_counter_create(ct_priv);
+ else
+ entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+
+ if (IS_ERR(entry->counter)) {
+ err = PTR_ERR(entry->counter);
return err;
}
@@ -820,7 +847,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
}
@@ -890,13 +917,13 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err_insert:
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
err_rules:
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node, tuples_nat_ht_params);
+ if (mlx5_tc_ct_entry_has_nat(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node, tuples_nat_ht_params);
err_tuple_nat:
- if (entry->tuple_node.next)
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
err_tuple:
err_set:
kfree(entry);
@@ -911,14 +938,14 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
{
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
mutex_lock(&ct_priv->shared_counter_lock);
- if (entry->tuple_node.next)
+ if (mlx5_tc_ct_entry_has_nat(entry))
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
mutex_unlock(&ct_priv->shared_counter_lock);
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
}
@@ -956,7 +983,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry)
return -ENOENT;
- mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
@@ -1061,8 +1088,8 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ bool trk, est, untrk, unest, new, rpl, unrpl;
struct flow_dissector_key_ct *mask, *key;
- bool trk, est, untrk, unest, new;
u32 ctstate = 0, ctstate_mask = 0;
u16 ct_state_on, ct_state_off;
u16 ct_state, ct_state_mask;
@@ -1088,9 +1115,10 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
- TCA_FLOWER_KEY_CT_FLAGS_NEW)) {
+ TCA_FLOWER_KEY_CT_FLAGS_NEW |
+ TCA_FLOWER_KEY_CT_FLAGS_REPLY)) {
NL_SET_ERR_MSG_MOD(extack,
- "only ct_state trk, est and new are supported for offload");
+ "only ct_state trk, est, new and rpl are supported for offload");
return -EOPNOTSUPP;
}
@@ -1099,13 +1127,17 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+ rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+ unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+ ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0;
ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+ ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
if (new) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1220,9 +1252,8 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
pre_ct->flow_rule = rule;
/* add miss rule */
- memset(spec, 0, sizeof(*spec));
dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add pre ct miss rule zone %d", zone);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
index 1f9526244222..3479672e84cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -81,8 +81,8 @@ static int parse_tunnel(struct mlx5e_priv *priv,
if (!enc_keyid.mask->keyid)
return 0;
- if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_CW_MPLS_UDP))
+ if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
+ !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
return -EOPNOTSUPP;
flow_rule_match_mpls(rule, &match);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
new file mode 100644
index 000000000000..37fc1d77ded7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies */
+
+#include <net/page_pool.h>
+#include "en/txrx.h"
+#include "en/params.h"
+#include "en/trap.h"
+
+static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct mlx5e_trap *trap_ctx = container_of(napi, struct mlx5e_trap, napi);
+ struct mlx5e_ch_stats *ch_stats = trap_ctx->stats;
+ struct mlx5e_rq *rq = &trap_ctx->rq;
+ bool busy = false;
+ int work_done = 0;
+
+ ch_stats->poll++;
+
+ work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
+ busy |= work_done == budget;
+ busy |= rq->post_wqes(rq);
+
+ if (busy)
+ return budget;
+
+ if (unlikely(!napi_complete_done(napi, work_done)))
+ return work_done;
+
+ mlx5e_cq_arm(&rq->cq);
+ return work_done;
+}
+
+static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
+ struct mlx5e_ch_stats *ch_stats,
+ struct mlx5e_rq *rq)
+{
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct page_pool_params pp_params = {};
+ int node = dev_to_node(mdev->device);
+ u32 pool_size;
+ int wq_sz;
+ int err;
+ int i;
+
+ rqp->wq.db_numa_node = node;
+
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = mdev->device;
+ rq->netdev = priv->netdev;
+ rq->mdev = mdev;
+ rq->priv = priv;
+ rq->stats = stats;
+ rq->clock = &mdev->clock;
+ rq->tstamp = &priv->tstamp;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+
+ xdp_rxq_info_unused(&rq->xdp_rxq);
+
+ rq->buff.map_dir = DMA_FROM_DEVICE;
+ rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, NULL);
+ pool_size = 1 << params->log_rq_mtu_frames;
+
+ err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl);
+ if (err)
+ return err;
+
+ rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
+
+ wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
+
+ rq->wqe.info = rqp->frags_info;
+ rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
+ rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
+ (wq_sz << rq->wqe.info.log_num_frags)),
+ GFP_KERNEL, node);
+ if (!rq->wqe.frags) {
+ err = -ENOMEM;
+ goto err_wq_cyc_destroy;
+ }
+
+ err = mlx5e_init_di_list(rq, wq_sz, node);
+ if (err)
+ goto err_free_frags;
+
+ rq->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+
+ mlx5e_rq_set_trap_handlers(rq, params);
+
+ /* Create a page_pool and register it with rxq */
+ pp_params.order = 0;
+ pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
+ pp_params.pool_size = pool_size;
+ pp_params.nid = node;
+ pp_params.dev = mdev->device;
+ pp_params.dma_dir = rq->buff.map_dir;
+
+ /* page_pool can be used even when there is no rq->xdp_prog,
+ * given page_pool does not handle DMA mapping there is no
+ * required state to clear. And page_pool gracefully handle
+ * elevated refcnt.
+ */
+ rq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rq->page_pool)) {
+ err = PTR_ERR(rq->page_pool);
+ rq->page_pool = NULL;
+ goto err_free_di_list;
+ }
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_rx_wqe_cyc *wqe =
+ mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
+ int f;
+
+ for (f = 0; f < rq->wqe.info.num_frags; f++) {
+ u32 frag_size = rq->wqe.info.arr[f].frag_size |
+ MLX5_HW_START_PADDING;
+
+ wqe->data[f].byte_count = cpu_to_be32(frag_size);
+ wqe->data[f].lkey = rq->mkey_be;
+ }
+ /* check if num_frags is not a pow of two */
+ if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
+ wqe->data[f].byte_count = 0;
+ wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ wqe->data[f].addr = 0;
+ }
+ }
+ return 0;
+
+err_free_di_list:
+ mlx5e_free_di_list(rq);
+err_free_frags:
+ kvfree(rq->wqe.frags);
+err_wq_cyc_destroy:
+ mlx5_wq_destroy(&rq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_free_trap_rq(struct mlx5e_rq *rq)
+{
+ page_pool_destroy(rq->page_pool);
+ mlx5e_free_di_list(rq);
+ kvfree(rq->wqe.frags);
+ mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi,
+ struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param,
+ struct mlx5e_ch_stats *ch_stats,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_create_cq_param ccp = {};
+ struct dim_cq_moder trap_moder = {};
+ struct mlx5e_cq *cq = &rq->cq;
+ int err;
+
+ ccp.node = dev_to_node(mdev->device);
+ ccp.ch_stats = ch_stats;
+ ccp.napi = napi;
+ ccp.ix = 0;
+ err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq);
+ if (err)
+ return err;
+
+ err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5e_create_rq(rq, rq_param);
+ if (err)
+ goto err_free_rq;
+
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ goto err_destroy_rq;
+
+ return 0;
+
+err_destroy_rq:
+ mlx5e_destroy_rq(rq);
+ mlx5e_free_rx_descs(rq);
+err_free_rq:
+ mlx5e_free_trap_rq(rq);
+err_destroy_cq:
+ mlx5e_close_cq(cq);
+
+ return err;
+}
+
+static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
+{
+ mlx5e_destroy_rq(rq);
+ mlx5e_free_rx_descs(rq);
+ mlx5e_free_trap_rq(rq);
+ mlx5e_close_cq(&rq->cq);
+}
+
+static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
+ u32 rqn)
+{
+ void *tirc;
+ int inlen;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, rqn);
+ err = mlx5e_create_tir(mdev, tir, in);
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir)
+{
+ mlx5e_destroy_tir(mdev, tir);
+}
+
+static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq)
+{
+ set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+}
+
+static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq)
+{
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+}
+
+static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t)
+{
+ struct mlx5e_params *params = &t->params;
+
+ params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
+ mlx5e_init_rq_type_params(priv->mdev, params);
+ params->sw_mtu = priv->netdev->max_mtu;
+ mlx5e_build_rq_param(priv, params, NULL, &t->rq_param);
+}
+
+static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
+{
+ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0));
+ struct net_device *netdev = priv->netdev;
+ struct mlx5e_trap *t;
+ int err;
+
+ t = kvzalloc_node(sizeof(*t), GFP_KERNEL, cpu_to_node(cpu));
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+
+ mlx5e_build_trap_params(priv, t);
+
+ t->priv = priv;
+ t->mdev = priv->mdev;
+ t->tstamp = &priv->tstamp;
+ t->pdev = mlx5_core_dma_dev(priv->mdev);
+ t->netdev = priv->netdev;
+ t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ t->stats = &priv->trap_stats.ch;
+
+ netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
+
+ err = mlx5e_open_trap_rq(priv, &t->napi,
+ &priv->trap_stats.rq,
+ &t->params, &t->rq_param,
+ &priv->trap_stats.ch,
+ &t->rq);
+ if (unlikely(err))
+ goto err_napi_del;
+
+ err = mlx5e_create_trap_direct_rq_tir(t->mdev, &t->tir, t->rq.rqn);
+ if (err)
+ goto err_close_trap_rq;
+
+ return t;
+
+err_close_trap_rq:
+ mlx5e_close_trap_rq(&t->rq);
+err_napi_del:
+ netif_napi_del(&t->napi);
+ kvfree(t);
+ return ERR_PTR(err);
+}
+
+void mlx5e_close_trap(struct mlx5e_trap *trap)
+{
+ mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir);
+ mlx5e_close_trap_rq(&trap->rq);
+ netif_napi_del(&trap->napi);
+ kvfree(trap);
+}
+
+static void mlx5e_activate_trap(struct mlx5e_trap *trap)
+{
+ napi_enable(&trap->napi);
+ mlx5e_activate_trap_rq(&trap->rq);
+ napi_schedule(&trap->napi);
+}
+
+void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
+{
+ struct mlx5e_trap *trap = priv->en_trap;
+
+ mlx5e_deactivate_trap_rq(&trap->rq);
+ napi_disable(&trap->napi);
+}
+
+static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv)
+{
+ struct mlx5e_trap *trap;
+
+ trap = mlx5e_open_trap(priv);
+ if (IS_ERR(trap))
+ goto out;
+
+ mlx5e_activate_trap(trap);
+out:
+ return trap;
+}
+
+static void mlx5e_del_trap_queue(struct mlx5e_priv *priv)
+{
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+}
+
+static int mlx5e_trap_get_tirn(struct mlx5e_trap *en_trap)
+{
+ return en_trap->tir.tirn;
+}
+
+static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id)
+{
+ bool open_queue = !priv->en_trap;
+ struct mlx5e_trap *trap;
+ int err;
+
+ if (open_queue) {
+ trap = mlx5e_add_trap_queue(priv);
+ if (IS_ERR(trap))
+ return PTR_ERR(trap);
+ priv->en_trap = trap;
+ }
+
+ switch (trap_id) {
+ case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
+ err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ if (err)
+ goto err_out;
+ break;
+ case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
+ err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ if (err)
+ goto err_out;
+ break;
+ default:
+ netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
+ err = -EINVAL;
+ goto err_out;
+ }
+ return 0;
+
+err_out:
+ if (open_queue)
+ mlx5e_del_trap_queue(priv);
+ return err;
+}
+
+static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id)
+{
+ switch (trap_id) {
+ case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
+ mlx5e_remove_vlan_trap(priv);
+ break;
+ case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
+ mlx5e_remove_mac_trap(priv);
+ break;
+ default:
+ netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
+ return -EINVAL;
+ }
+ if (priv->en_trap && !mlx5_devlink_trap_get_num_active(priv->mdev))
+ mlx5e_del_trap_queue(priv);
+
+ return 0;
+}
+
+int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx)
+{
+ int err = 0;
+
+ /* Traps are unarmed when interface is down, no need to update
+ * them. The configuration is saved in the core driver,
+ * queried and applied upon interface up operation in
+ * mlx5e_open_locked().
+ */
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ switch (trap_ctx->action) {
+ case DEVLINK_TRAP_ACTION_TRAP:
+ err = mlx5e_handle_action_trap(priv, trap_ctx->id);
+ break;
+ case DEVLINK_TRAP_ACTION_DROP:
+ err = mlx5e_handle_action_drop(priv, trap_ctx->id);
+ break;
+ default:
+ netdev_warn(priv->netdev, "%s: Unsupported action %d\n", __func__,
+ trap_ctx->action);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int mlx5e_apply_trap(struct mlx5e_priv *priv, int trap_id, bool enable)
+{
+ enum devlink_trap_action action;
+ int err;
+
+ err = mlx5_devlink_traps_get_action(priv->mdev, trap_id, &action);
+ if (err)
+ return err;
+ if (action == DEVLINK_TRAP_ACTION_TRAP)
+ err = enable ? mlx5e_handle_action_trap(priv, trap_id) :
+ mlx5e_handle_action_drop(priv, trap_id);
+ return err;
+}
+
+static const int mlx5e_traps_arr[] = {
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER,
+};
+
+int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlx5e_traps_arr); i++) {
+ err = mlx5e_apply_trap(priv, mlx5e_traps_arr[i], enable);
+ if (err)
+ return err;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
new file mode 100644
index 000000000000..aa3f17658c6d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies */
+
+#ifndef __MLX5E_TRAP_H__
+#define __MLX5E_TRAP_H__
+
+#include "../en.h"
+#include "../devlink.h"
+
+struct mlx5e_trap {
+ /* data path */
+ struct mlx5e_rq rq;
+ struct mlx5e_tir tir;
+ struct napi_struct napi;
+ struct device *pdev;
+ struct net_device *netdev;
+ __be32 mkey_be;
+
+ /* data path - accessed per napi poll */
+ struct mlx5e_ch_stats *stats;
+
+ /* control */
+ struct mlx5e_priv *priv;
+ struct mlx5_core_dev *mdev;
+ struct hwtstamp_config *tstamp;
+ DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
+
+ struct mlx5e_params params;
+ struct mlx5e_rq_param rq_param;
+};
+
+void mlx5e_close_trap(struct mlx5e_trap *trap);
+void mlx5e_deactivate_trap(struct mlx5e_priv *priv);
+int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx);
+int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 7943eb30b837..4880f2179273 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
u8 tun_l4_proto;
};
+static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
+{
+ /* SWP offsets are in 2-bytes words */
+ eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
+}
+
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 899b98aca0d3..6488098d2700 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
}
static inline void
-mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
struct mlx5e_swp_spec swp_spec = {};
unsigned int offset = 0;
@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
}
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+ if (skb_vlan_tag_present(skb) && ihs)
+ mlx5e_eseg_swp_offsets_add_vlan(eseg);
}
#else
@@ -142,9 +144,9 @@ static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *sta
{
#ifdef CONFIG_MLX5_EN_IPSEC
return mlx5e_ipsec_is_tx_flow(&state->ipsec);
-#endif
-
+#else
return false;
+#endif
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
@@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation)
- mlx5e_tx_tunnel_accel(skb, eseg);
+ mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index a9b45606dbdb..a97e8d205094 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -497,20 +497,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
}
}
-bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
- netdev_features_t features)
-{
- struct sec_path *sp = skb_sec_path(skb);
- struct xfrm_state *x;
-
- if (sp && sp->len) {
- x = sp->xvec[0];
- if (x && x->xso.offload_handle)
- return true;
- }
- return false;
-}
-
void mlx5e_ipsec_build_inverse_table(void)
{
u16 mss_inv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 9df9b9a8e09b..3e80742a3caf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -57,8 +57,6 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_inverse_table_init(void);
-bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
- netdev_features_t features);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
@@ -87,8 +85,28 @@ static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ips
return ipsec_st->x;
}
+static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
+{
+ return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+}
+
void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg);
+
+static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct sec_path *sp = skb_sec_path(skb);
+
+ if (sp && sp->len) {
+ struct xfrm_state *x = sp->xvec[0];
+
+ if (x && x->xso.offload_handle)
+ return true;
+ }
+ return false;
+}
+
#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
@@ -96,7 +114,14 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{}
+static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
+
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
+static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features) { return false; }
#endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 6c5c54bcd9be..5cb936541b9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -76,7 +76,7 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
- return NUM_IPSEC_SW_COUNTERS;
+ return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
}
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
@@ -105,7 +105,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
- return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
+ return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index d20243d6a032..f23c67575073 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1151,6 +1151,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
struct mlx5e_channels new_channels = {};
bool reset_channels = true;
+ bool opened;
int err = 0;
mutex_lock(&priv->state_lock);
@@ -1159,22 +1160,24 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
trust_state);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (!opened)
reset_channels = false;
- }
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
reset_channels = false;
- if (reset_channels)
+ if (reset_channels) {
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_update_trust_state_hw,
&trust_state);
- else
+ } else {
err = mlx5e_update_trust_state_hw(priv, &trust_state);
+ if (!err && !opened)
+ priv->channels.params = new_channels.params;
+ }
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d9076d543104..5e9474dba4e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -447,12 +447,29 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
- new_channels.params = priv->channels.params;
+ /* Don't allow changing the number of channels if HTB offload is active,
+ * because the numeration of the QoS SQs will change, while per-queue
+ * qdiscs are attached.
+ */
+ if (priv->htb.maj_id) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
+ __func__);
+ goto out;
+ }
+
+ new_channels.params = *cur_params;
new_channels.params.num_channels = count;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ struct mlx5e_params old_params;
+
+ old_params = *cur_params;
*cur_params = new_channels.params;
err = mlx5e_num_channels_changed(priv);
+ if (err)
+ *cur_params = old_params;
+
goto out;
}
@@ -1010,6 +1027,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
}
+static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
+ const unsigned long link_modes, u8 autoneg)
+{
+ /* Extended link-mode has no speed limitations. */
+ if (ext)
+ return 0;
+
+ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+ autoneg != AUTONEG_ENABLE) {
+ netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
@@ -1103,13 +1136,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext);
- if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
- autoneg != AUTONEG_ENABLE) {
- netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
- __func__);
- err = -EINVAL;
+ err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
+ if (err)
goto out;
- }
link_modes = link_modes & eproto.cap;
if (!link_modes) {
@@ -1954,6 +1983,16 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
return -EOPNOTSUPP;
+ /* Don't allow changing the PTP state if HTB offload is active, because
+ * the numeration of the QoS SQs will change, while per-queue qdiscs are
+ * attached.
+ */
+ if (priv->htb.maj_id) {
+ netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
+ __func__);
+ return -EINVAL;
+ }
+
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable);
/* No need to verify SQ stop room as
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index fa8149f6eb08..16ce7756ac43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -46,7 +46,6 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
enum {
MLX5E_FULLMATCH = 0,
MLX5E_ALLMULTI = 1,
- MLX5E_PROMISC = 2,
};
enum {
@@ -306,6 +305,79 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
+static struct mlx5_flow_handle *
+mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
+{
+ struct mlx5_flow_destination dest = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+ spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
+ spec->flow_context.flow_tag = trap_id;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = tir_num;
+
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ kvfree(spec);
+ return rule;
+}
+
+int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+{
+ struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ priv->fs.vlan.trap_rule = NULL;
+ netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
+ __func__, err);
+ return err;
+ }
+ priv->fs.vlan.trap_rule = rule;
+ return 0;
+}
+
+void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
+{
+ if (priv->fs.vlan.trap_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan.trap_rule);
+ priv->fs.vlan.trap_rule = NULL;
+ }
+}
+
+int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+{
+ struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ priv->fs.l2.trap_rule = NULL;
+ netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
+ __func__, err);
+ return err;
+ }
+ priv->fs.l2.trap_rule = rule;
+ return 0;
+}
+
+void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
+{
+ if (priv->fs.l2.trap_rule) {
+ mlx5_del_flow_rules(priv->fs.l2.trap_rule);
+ priv->fs.l2.trap_rule = NULL;
+ }
+}
+
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
if (!priv->fs.vlan.cvlan_filter_disabled)
@@ -419,6 +491,8 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
+ mlx5e_remove_vlan_trap(priv);
+
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
@@ -596,6 +670,83 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
mlx5e_apply_netdev_addr(priv);
}
+#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
+#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
+
+static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_handle **rule_p;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.ttc.ft.t;
+
+ rule_p = &priv->fs.promisc.rule;
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(*rule_p)) {
+ err = PTR_ERR(*rule_p);
+ *rule_p = NULL;
+ netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
+ }
+ kvfree(spec);
+ return err;
+}
+
+static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
+ struct mlx5_flow_table_attr ft_attr = {};
+ int err;
+
+ ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
+ return err;
+ }
+
+ err = mlx5e_add_promisc_rule(priv);
+ if (err)
+ goto err_destroy_promisc_table;
+
+ return 0;
+
+err_destroy_promisc_table:
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
+
+ return err;
+}
+
+static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
+{
+ if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
+ return;
+ mlx5_del_flow_rules(priv->fs.promisc.rule);
+ priv->fs.promisc.rule = NULL;
+}
+
+static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
+{
+ if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
+ return;
+ mlx5e_del_promisc_rule(priv);
+ mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
+ priv->fs.promisc.ft.t = NULL;
+}
+
void mlx5e_set_rx_mode_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
@@ -615,14 +766,15 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
+ int err;
if (enable_promisc) {
- if (!priv->channels.params.vlan_strip_disable)
+ err = mlx5e_create_promisc_table(priv);
+ if (err)
+ enable_promisc = false;
+ if (!priv->channels.params.vlan_strip_disable && !err)
netdev_warn_once(ndev,
"S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
- mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
- if (!priv->fs.vlan.cvlan_filter_disabled)
- mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
@@ -635,11 +787,8 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
if (disable_allmulti)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
- if (disable_promisc) {
- if (!priv->fs.vlan.cvlan_filter_disabled)
- mlx5e_del_any_vid_rules(priv);
- mlx5e_del_l2_flow_rule(priv, &ea->promisc);
- }
+ if (disable_promisc)
+ mlx5e_destroy_promisc_table(priv);
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
@@ -942,6 +1091,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1087,6 +1237,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1304,9 +1455,6 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
mc_dmac[0] = 0x01;
mv_dmac[0] = 0x01;
break;
-
- case MLX5E_PROMISC:
- break;
}
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -1323,12 +1471,12 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
}
#define MLX5E_NUM_L2_GROUPS 3
-#define MLX5E_L2_GROUP1_SIZE BIT(0)
-#define MLX5E_L2_GROUP2_SIZE BIT(15)
-#define MLX5E_L2_GROUP3_SIZE BIT(0)
+#define MLX5E_L2_GROUP1_SIZE BIT(15)
+#define MLX5E_L2_GROUP2_SIZE BIT(0)
+#define MLX5E_L2_GROUP_TRAP_SIZE BIT(0) /* must be last */
#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
MLX5E_L2_GROUP2_SIZE +\
- MLX5E_L2_GROUP3_SIZE)
+ MLX5E_L2_GROUP_TRAP_SIZE)
static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1351,7 +1499,9 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
outer_headers.dmac_47_16);
- /* Flow Group for promiscuous */
+ /* Flow Group for full match */
+ eth_broadcast_addr(mc_dmac);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_L2_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1360,9 +1510,9 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
goto err_destroy_groups;
ft->num_groups++;
- /* Flow Group for full match */
- eth_broadcast_addr(mc_dmac);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ /* Flow Group for allmulti */
+ eth_zero_addr(mc_dmac);
+ mc_dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_L2_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1371,11 +1521,10 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
goto err_destroy_groups;
ft->num_groups++;
- /* Flow Group for allmulti */
- eth_zero_addr(mc_dmac);
- mc_dmac[0] = 0x01;
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_L2_GROUP3_SIZE;
+ ix += MLX5E_L2_GROUP_TRAP_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
@@ -1390,6 +1539,7 @@ err_destroy_groups:
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
kvfree(in);
+ kfree(ft->g);
return err;
}
@@ -1432,15 +1582,17 @@ err_destroy_flow_table:
return err;
}
-#define MLX5E_NUM_VLAN_GROUPS 4
+#define MLX5E_NUM_VLAN_GROUPS 5
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(12)
#define MLX5E_VLAN_GROUP2_SIZE BIT(1)
#define MLX5E_VLAN_GROUP3_SIZE BIT(0)
+#define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE +\
MLX5E_VLAN_GROUP2_SIZE +\
- MLX5E_VLAN_GROUP3_SIZE)
+ MLX5E_VLAN_GROUP3_SIZE +\
+ MLX5E_VLAN_GROUP_TRAP_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
@@ -1495,6 +1647,15 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
return 0;
err_destroy_groups:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7a79d330c075..aad3887e3c1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -65,6 +65,8 @@
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "en/ptp.h"
+#include "qos.h"
+#include "en/trap.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
@@ -211,6 +213,33 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
}
+static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
+ int err;
+
+ switch (event) {
+ case MLX5_DRIVER_EVENT_TYPE_TRAP:
+ err = mlx5e_handle_trap_event(priv, data);
+ break;
+ default:
+ netdev_warn(priv->netdev, "Sync event: Unknouwn event %ld\n", event);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
+{
+ priv->blocking_events_nb.notifier_call = blocking_event;
+ mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
+}
+
+static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
+{
+ mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
+}
+
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
@@ -342,13 +371,11 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
prev->last_in_page = true;
}
-static int mlx5e_init_di_list(struct mlx5e_rq *rq,
- int wq_sz, int cpu)
+int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
- rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
- GFP_KERNEL, cpu_to_node(cpu));
+ rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
if (!rq->wqe.di)
return -ENOMEM;
@@ -357,7 +384,7 @@ static int mlx5e_init_di_list(struct mlx5e_rq *rq,
return 0;
}
-static void mlx5e_free_di_list(struct mlx5e_rq *rq)
+void mlx5e_free_di_list(struct mlx5e_rq *rq)
{
kvfree(rq->wqe.di);
}
@@ -499,7 +526,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
+ err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu));
if (err)
goto err_rq_frags;
@@ -650,8 +677,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5_wq_destroy(&rq->wq_ctrl);
}
-static int mlx5e_create_rq(struct mlx5e_rq *rq,
- struct mlx5e_rq_param *param)
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -774,7 +800,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
return err;
}
-static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
@@ -1143,7 +1169,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
@@ -1233,6 +1258,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p)
{
+ u64 bitmask = 0;
void *in;
void *sqc;
int inlen;
@@ -1248,9 +1274,14 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
MLX5_SET(sqc, sqc, state, p->next_state);
if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
- MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
- MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
+ bitmask |= 1;
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
+ }
+ if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) {
+ bitmask |= 1 << 2;
+ MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id);
}
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask);
err = mlx5_core_modify_sq(mdev, sqn, in);
@@ -1267,6 +1298,7 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
+ u16 qos_queue_group_id,
u32 *sqn)
{
struct mlx5e_modify_sq_param msp = {0};
@@ -1278,6 +1310,10 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
msp.curr_state = MLX5_SQC_STATE_RST;
msp.next_state = MLX5_SQC_STATE_RDY;
+ if (qos_queue_group_id) {
+ msp.qos_update = true;
+ msp.qos_queue_group_id = qos_queue_group_id;
+ }
err = mlx5e_modify_sq(mdev, *sqn, &msp);
if (err)
mlx5e_destroy_sq(mdev, *sqn);
@@ -1288,13 +1324,9 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
static int mlx5e_set_sq_maxrate(struct net_device *dev,
struct mlx5e_txqsq *sq, u32 rate);
-static int mlx5e_open_txqsq(struct mlx5e_channel *c,
- u32 tisn,
- int txq_ix,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq,
- int tc)
+int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
+ struct mlx5e_params *params, struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
@@ -1304,12 +1336,17 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
if (err)
return err;
+ if (qos_queue_group_id)
+ sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
+ else
+ sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+
csp.tisn = tisn;
csp.tis_lst_sz = 1;
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
if (err)
goto err_free_txqsq;
@@ -1366,7 +1403,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
}
}
-static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
+void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
@@ -1403,7 +1440,7 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -1452,7 +1489,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
@@ -1703,7 +1740,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
int txq_ix = c->ix + tc * params->num_channels;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- params, &cparam->txq_sq, &c->sq[tc], tc);
+ params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
if (err)
goto err_close_sqs;
}
@@ -2044,6 +2081,8 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
+
+ mlx5e_qos_deactivate_queues(c);
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
@@ -2051,6 +2090,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
+ mlx5e_qos_close_queues(c);
netif_napi_del(&c->napi);
kvfree(c);
@@ -2068,10 +2108,8 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
u32 buf_size = 0;
int i;
-#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN;
-#endif
if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride;
@@ -2200,9 +2238,8 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
}
-static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param)
+void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -2381,10 +2418,18 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
+ err = mlx5e_qos_open_queues(priv, chs);
+ if (err)
+ goto err_close_ptp;
+
mlx5e_health_channels_update(priv);
kvfree(cparam);
return 0;
+err_close_ptp:
+ if (chs->port_ptp)
+ mlx5e_port_ptp_close(chs->port_ptp);
+
err_close_channels:
for (i--; i >= 0; i--)
mlx5e_close_channel(chs->c[i]);
@@ -2917,11 +2962,31 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
+int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
+{
+ int qos_queues, nch, ntc, num_txqs, err;
+
+ qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
+
+ nch = priv->channels.params.num_channels;
+ ntc = priv->channels.params.num_tc;
+ num_txqs = nch * ntc + qos_queues;
+ if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
+ num_txqs += ntc;
+
+ mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs);
+ err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
+ if (err)
+ netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+
+ return err;
+}
+
static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
- int num_txqs, num_rxqs, nch, ntc;
int old_num_txqs, old_ntc;
+ int num_rxqs, nch, ntc;
int err;
old_num_txqs = netdev->real_num_tx_queues;
@@ -2929,18 +2994,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
- num_txqs = nch * ntc;
- if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
- num_txqs += ntc;
num_rxqs = nch * priv->profile->rq_groups;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
- err = netif_set_real_num_tx_queues(netdev, num_txqs);
- if (err) {
- netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+ err = mlx5e_update_tx_netdev_queues(priv);
+ if (err)
goto err_tcs;
- }
err = netif_set_real_num_rx_queues(netdev, num_rxqs);
if (err) {
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
@@ -3044,6 +3104,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_update_num_tc_x_num_ch(priv);
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
+ mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
netif_tx_start_all_queues(priv->netdev);
@@ -3161,7 +3222,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
mlx5_set_port_admin_status(mdev, state);
- if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
+ if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
+ !MLX5_CAP_GEN(mdev, uplink_follow))
return;
if (state == MLX5_PORT_UP)
@@ -3185,6 +3247,7 @@ int mlx5e_open_locked(struct net_device *netdev)
priv->profile->update_rx(priv);
mlx5e_activate_priv_channels(priv);
+ mlx5e_apply_traps(priv, true);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
@@ -3220,6 +3283,7 @@ int mlx5e_close_locked(struct net_device *netdev)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
+ mlx5e_apply_traps(priv, false);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
@@ -3609,11 +3673,26 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
+ /* MQPRIO is another toplevel qdisc that can't be attached
+ * simultaneously with the offloaded HTB.
+ */
+ if (WARN_ON(priv->htb.maj_id)) {
+ err = -EINVAL;
+ goto out;
+ }
+
new_channels.params = priv->channels.params;
new_channels.params.num_tc = tc ? tc : 1;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ struct mlx5e_params old_params;
+
+ old_params = priv->channels.params;
priv->channels.params = new_channels.params;
+ err = mlx5e_num_channels_changed(priv);
+ if (err)
+ priv->channels.params = old_params;
+
goto out;
}
@@ -3629,12 +3708,55 @@ out:
return err;
}
+static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
+{
+ int res;
+
+ switch (htb->command) {
+ case TC_HTB_CREATE:
+ return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
+ htb->extack);
+ case TC_HTB_DESTROY:
+ return mlx5e_htb_root_del(priv);
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
+ htb->rate, htb->ceil, htb->extack);
+ if (res < 0)
+ return res;
+ htb->qid = res;
+ return 0;
+ case TC_HTB_LEAF_TO_INNER:
+ return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
+ htb->rate, htb->ceil, htb->extack);
+ case TC_HTB_LEAF_DEL:
+ return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid,
+ htb->extack);
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return mlx5e_htb_leaf_del_last(priv, htb->classid,
+ htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
+ htb->extack);
+ case TC_HTB_NODE_MODIFY:
+ return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
+ htb->extack);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ res = mlx5e_get_txq_by_classid(priv, htb->classid);
+ if (res < 0)
+ return res;
+ htb->qid = res;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
switch (type) {
case TC_SETUP_BLOCK: {
@@ -3648,6 +3770,11 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(priv, type_data);
+ case TC_SETUP_QDISC_HTB:
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_setup_tc_htb(priv, type_data);
+ mutex_unlock(&priv->state_lock);
+ return err;
default:
return -EOPNOTSUPP;
}
@@ -3756,7 +3883,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
- struct mlx5e_params *old_params;
+ struct mlx5e_params *cur_params;
int err = 0;
bool reset;
@@ -3769,8 +3896,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
goto out;
}
- old_params = &priv->channels.params;
- if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+ cur_params = &priv->channels.params;
+ if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
netdev_warn(netdev, "can't set LRO with legacy RQ\n");
err = -EINVAL;
goto out;
@@ -3778,18 +3905,23 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
- new_channels.params = *old_params;
+ new_channels.params = *cur_params;
new_channels.params.lro_en = enable;
- if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
+ if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
reset = false;
}
if (!reset) {
- *old_params = new_channels.params;
+ struct mlx5e_params old_params;
+
+ old_params = *cur_params;
+ *cur_params = new_channels.params;
err = mlx5e_modify_tirs_lro(priv);
+ if (err)
+ *cur_params = old_params;
goto out;
}
@@ -3812,20 +3944,25 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
return 0;
}
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
+#endif
+
+ if (!enable && priv->htb.maj_id) {
+ netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
return 0;
}
-#endif
static int set_feature_rx_all(struct net_device *netdev, bool enable)
{
@@ -3923,9 +4060,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
-#endif
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@@ -4066,9 +4201,16 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
}
if (!reset) {
+ unsigned int old_mtu = params->sw_mtu;
+
params->sw_mtu = new_mtu;
- if (preactivate)
- preactivate(priv, NULL);
+ if (preactivate) {
+ err = preactivate(priv, NULL);
+ if (err) {
+ params->sw_mtu = old_mtu;
+ goto out;
+ }
+ }
netdev->mtu = params->sw_mtu;
goto out;
}
@@ -4375,10 +4517,8 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
-#ifdef CONFIG_MLX5_EN_IPSEC
if (mlx5e_ipsec_feature_check(skb, netdev, features))
return features;
-#endif
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
@@ -4621,8 +4761,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_change_mtu = mlx5e_change_nic_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
@@ -5026,13 +5164,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
#ifdef CONFIG_MLX5_EN_ARFS
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
+ if (mlx5_qos_is_supported(mdev))
+ netdev->features |= NETIF_F_HW_TC;
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
@@ -5250,6 +5390,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev);
mlx5e_enable_async_events(priv);
+ mlx5e_enable_blocking_events(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
@@ -5287,6 +5428,12 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
+ mlx5e_disable_blocking_events(priv);
+ if (priv->en_trap) {
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+ }
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
@@ -5338,6 +5485,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
return -ENOMEM;
mutex_init(&priv->state_lock);
+ hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
@@ -5360,8 +5508,14 @@ err_free_cpumask:
void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
{
+ int i;
+
destroy_workqueue(priv->wq);
free_cpumask_var(priv->scratchpad.cpumask);
+
+ for (i = 0; i < priv->htb.max_qos_sqs; i++)
+ kfree(priv->htb.qos_sq_stats[i]);
+ kvfree(priv->htb.qos_sq_stats);
}
struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
@@ -5371,13 +5525,17 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
{
struct net_device *netdev;
unsigned int ptp_txqs = 0;
+ int qos_sqs = 0;
int err;
if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
ptp_txqs = profile->max_tc;
+ if (mlx5_qos_is_supported(mdev))
+ qos_sqs = mlx5e_qos_max_leaf_nodes(mdev);
+
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
- nch * profile->max_tc + ptp_txqs,
+ nch * profile->max_tc + ptp_txqs + qos_sqs,
nch * profile->rq_groups);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 989c70c1eda3..629ae6ccb4cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -653,8 +653,6 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
.ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_rate = mlx5e_set_vf_rate,
@@ -737,7 +735,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_NETNS_LOCAL;
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
+#endif
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7f5851c61218..98b56f495b32 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -52,6 +52,7 @@
#include "en/xsk/rx.h"
#include "en/health.h"
#include "en/params.h"
+#include "devlink.h"
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
@@ -1126,12 +1127,8 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp)
{
- xdp->data_hard_start = va;
- xdp->data = va + headroom;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &rq->xdp_rxq;
- xdp->frame_sz = rq->buff.frame0_sz;
+ xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, va, headroom, len, false);
}
static struct sk_buff *
@@ -1786,12 +1783,10 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
-#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev)) {
netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
return -EINVAL;
}
-#endif
if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
return -EINVAL;
@@ -1821,3 +1816,48 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
return 0;
}
+
+static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_priv *priv = netdev_priv(rq->netdev);
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ struct mlx5e_wqe_frag_info *wi;
+ struct sk_buff *skb;
+ u32 cqe_bcnt;
+ u16 trap_id;
+ u16 ci;
+
+ trap_id = get_cqe_flow_tag(cqe);
+ ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
+ wi = get_frag(rq, ci);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto free_wqe;
+ }
+
+ skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt);
+ if (!skb)
+ goto free_wqe;
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ skb_push(skb, ETH_HLEN);
+
+ mlx5_devlink_trap_report(rq->mdev, trap_id, skb, &priv->dl_port);
+ dev_kfree_skb_any(skb);
+
+free_wqe:
+ mlx5e_free_rx_wqe(rq, wi, false);
+ mlx5_wq_cyc_pop(wq);
+}
+
+void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
+{
+ rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ?
+ mlx5e_skb_from_cqe_linear :
+ mlx5e_skb_from_cqe_nonlinear;
+ rq->post_wqes = mlx5e_post_rx_wqes;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
+ rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 2cf2042b37c7..92c5b81427b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -420,6 +420,25 @@ static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
}
}
+static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
+ struct mlx5e_sw_stats *s)
+{
+ struct mlx5e_sq_stats **stats;
+ u16 max_qos_sqs;
+ int i;
+
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ stats = READ_ONCE(priv->htb.qos_sq_stats);
+
+ for (i = 0; i < max_qos_sqs; i++) {
+ mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
+
+ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
+ barrier();
+ }
+}
+
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -449,6 +468,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
}
}
mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
+ mlx5e_stats_grp_sw_update_stats_qos(priv, s);
}
static const struct counter_desc q_stats_desc[] = {
@@ -1740,6 +1760,41 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
};
+static const struct counter_desc qos_sq_stats_desc[] = {
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
+#ifdef CONFIG_MLX5_EN_TLS
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
+#endif
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
+};
+
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
@@ -1750,6 +1805,49 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
+#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
+{
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
+{
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ int i, qid;
+
+ for (qid = 0; qid < max_qos_sqs; qid++)
+ for (i = 0; i < NUM_QOS_SQ_STATS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ qos_sq_stats_desc[i].format, qid);
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
+{
+ struct mlx5e_sq_stats **stats;
+ u16 max_qos_sqs;
+ int i, qid;
+
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ stats = READ_ONCE(priv->htb.qos_sq_stats);
+
+ for (qid = 0; qid < max_qos_sqs; qid++) {
+ struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
+
+ for (i = 0; i < NUM_QOS_SQ_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
+ }
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
{
@@ -1932,6 +2030,7 @@ MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
static MLX5E_DEFINE_STATS_GRP(ptp, 0);
+static MLX5E_DEFINE_STATS_GRP(qos, 0);
/* The stats groups order is opposite to the update_stats() order calls */
mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
@@ -1955,6 +2054,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
&MLX5E_STATS_GRP(ptp),
+ &MLX5E_STATS_GRP(qos),
};
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index e41fc11f2ce7..93c41312fb03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -55,6 +55,8 @@
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
+
struct counter_desc {
char format[ETH_GSTRING_LEN];
size_t offset; /* Byte offset */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 4cdf834fa74a..8fd38ad8113b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -67,6 +67,7 @@
#include "lib/geneve.h"
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
+#include <asm/div64.h>
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
@@ -1162,6 +1163,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
@@ -1192,6 +1196,9 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
{
flow_flag_clear(flow, OFFLOADED);
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ goto offload_rule_0;
+
if (flow_flag_test(flow, CT)) {
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
@@ -1200,6 +1207,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+offload_rule_0:
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
@@ -1317,12 +1325,6 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int err = 0;
int out_index;
- if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
- NL_SET_ERR_MSG_MOD(extack,
- "E-switch priorities unsupported, upgrade FW");
- return -EOPNOTSUPP;
- }
-
/* We check chain range only for tc flows.
* For ft flows, we checked attr->chain was originally 0 and set it to
* FDB_FT_CHAIN which is outside tc range.
@@ -2269,8 +2271,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
- netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
- dissector->used_keys);
+ netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -5007,13 +5009,13 @@ errout:
return err;
}
-static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
+static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch *esw;
+ u32 rate_mbps = 0;
u16 vport_num;
- u32 rate_mbps;
int err;
vport_num = rpriv->rep->vport;
@@ -5030,7 +5032,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
* Moreover, if rate is non zero we choose to configure to a minimum of
* 1 mbit/sec.
*/
- rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
+ if (rate) {
+ rate = (rate * BITS_PER_BYTE) + 500000;
+ rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
+ }
+
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e47e2a0059d0..da6a358a8a10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -106,28 +106,53 @@ return_txq:
return priv->port_ptp_tc2realtxq[up];
}
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
+ u16 htb_maj_id)
+{
+ u16 classid;
+
+ if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
+ classid = TC_H_MIN(skb->priority);
+ else
+ classid = READ_ONCE(priv->htb.defcls);
+
+ if (!classid)
+ return 0;
+
+ return mlx5e_get_txq_by_classid(priv, classid);
+}
+
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int num_tc_x_num_ch;
int txq_ix;
int up = 0;
int ch_ix;
- if (unlikely(priv->channels.port_ptp)) {
- int num_tc_x_num_ch;
+ /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
+ num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
+ if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
+ /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
+ u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- mlx5e_use_ptpsq(skb))
- return mlx5e_select_ptpsq(dev, skb);
+ if (unlikely(htb_maj_id)) {
+ txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
+ if (txq_ix > 0)
+ return txq_ix;
+ }
- /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
- num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
+ if (unlikely(priv->channels.port_ptp))
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL);
- /* Fix netdev_pick_tx() not to choose ptp_channel txqs.
+ /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
* If they are selected, switch to regular queues.
- * Driver to select these queues only at mlx5e_select_ptpsq().
+ * Driver to select these queues only at mlx5e_select_ptpsq()
+ * and mlx5e_select_htb_queue().
*/
if (unlikely(txq_ix >= num_tc_x_num_ch))
txq_ix %= num_tc_x_num_ch;
@@ -241,9 +266,8 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
#endif
- } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
+ } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
-
} else
sq->stats->csum_none++;
}
@@ -682,9 +706,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
+ if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
@@ -703,6 +727,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
+ if (unlikely(!sq)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
/* May send SKBs and WQEs. */
if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
@@ -714,7 +742,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
+ attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@@ -731,7 +760,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index a3cfe06d5116..d54da3797c30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -115,17 +115,21 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
struct mlx5e_xdpsq *xsksq = &c->xsksq;
+ struct mlx5e_txqsq __rcu **qos_sqs;
struct mlx5e_rq *xskrq = &c->xskrq;
struct mlx5e_rq *rq = &c->rq;
bool aff_change = false;
bool busy_xsk = false;
bool busy = false;
int work_done = 0;
+ u16 qos_sqs_size;
bool xsk_open;
int i;
rcu_read_lock();
+ qos_sqs = rcu_dereference(c->qos_sqs);
+
xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
ch_stats->poll++;
@@ -133,6 +137,18 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
+ if (unlikely(qos_sqs)) {
+ smp_rmb(); /* Pairs with mlx5e_qos_alloc_queues. */
+ qos_sqs_size = READ_ONCE(c->qos_sqs_size);
+
+ for (i = 0; i < qos_sqs_size; i++) {
+ struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
+
+ if (sq)
+ busy |= mlx5e_poll_tx_cq(&sq->cq, budget);
+ }
+ }
+
busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
if (c->xdp)
@@ -186,6 +202,16 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
}
+ if (unlikely(qos_sqs)) {
+ for (i = 0; i < qos_sqs_size; i++) {
+ struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
+
+ if (sq) {
+ mlx5e_handle_tx_dim(sq);
+ mlx5e_cq_arm(&sq->cq);
+ }
+ }
+ }
mlx5e_handle_rx_dim(rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index fc0afa03d407..174dfbc996c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -467,7 +467,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
- eq_table->irq_table = dev->priv.irq_table;
+ eq_table->irq_table = mlx5_irq_table_get(dev);
return 0;
}
@@ -595,6 +595,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
+ if (MLX5_CAP_GEN_MAX(dev, vhca_state))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
+
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 2b85d4777303..3e19b1721303 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
return 0;
}
- if (!IS_ERR_OR_NULL(vport->egress.acl))
- return 0;
-
- vport->egress.acl = esw_acl_table_create(esw, vport->vport,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- table_size);
- if (IS_ERR(vport->egress.acl)) {
- err = PTR_ERR(vport->egress.acl);
- vport->egress.acl = NULL;
- goto out;
+ if (!vport->egress.acl) {
+ vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ table_size);
+ if (IS_ERR(vport->egress.acl)) {
+ err = PTR_ERR(vport->egress.acl);
+ vport->egress.acl = NULL;
+ goto out;
+ }
+
+ err = esw_acl_egress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
}
- err = esw_acl_egress_lgcy_groups_create(esw, vport);
- if (err)
- goto out;
-
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
index 4c74e2690d57..26b37a0f8762 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -150,7 +150,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
{
- return mlx5_eswitch_is_vf_vport(esw, vport_num);
+ return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
}
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index ffff11baa3d0..cb1e181f4c6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -122,3 +122,44 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
vport = mlx5_eswitch_get_vport(esw, vport_num);
return vport->dl_port;
}
+
+int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct netdev_phys_item_id ppid = {};
+ unsigned int dl_port_index;
+ struct mlx5_vport *vport;
+ struct devlink *devlink;
+ u16 pfnum;
+ int err;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ pfnum = PCI_FUNC(dev->pdev->devfn);
+ mlx5_esw_get_port_parent_id(dev, &ppid);
+ memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum);
+ devlink = priv_to_devlink(dev);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
+ err = devlink_port_register(devlink, dl_port, dl_port_index);
+ if (err)
+ return err;
+
+ vport->dl_port = dl_port;
+ return 0;
+}
+
+void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
+ devlink_port_unregister(vport->dl_port);
+ vport->dl_port = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index da901e364656..820305b1664e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1042,8 +1042,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
void *vport_elem;
int err = 0;
- if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
- !MLX5_CAP_QOS(dev, esw_scheduling))
+ if (!esw->qos.enabled)
return 0;
if (vport->qos.enabled)
@@ -1273,8 +1272,8 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
esw_vport_cleanup_acl(esw, vport);
}
-static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
- enum mlx5_eswitch_vport_event enabled_events)
+int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
int ret;
@@ -1310,7 +1309,7 @@ done:
return ret;
}
-static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;
@@ -1366,9 +1365,15 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
+ u16 max_sf_vports;
u32 *out;
int err;
+ max_sf_vports = mlx5_sf_max_functions(dev);
+ /* Device interface is array of 64-bits */
+ if (max_sf_vports)
+ outlen += DIV_ROUND_UP(max_sf_vports, BITS_PER_TYPE(__be64)) * sizeof(__be64);
+
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return ERR_PTR(-ENOMEM);
@@ -1376,7 +1381,7 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
- err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
if (!err)
return out;
@@ -1426,7 +1431,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
{
int err;
- err = esw_enable_vport(esw, vport_num, enabled_events);
+ err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
if (err)
return err;
@@ -1437,14 +1442,14 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
return err;
err_rep:
- esw_disable_vport(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport_num);
return err;
}
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
- esw_disable_vport(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport_num);
}
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
@@ -1594,6 +1599,15 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
kvfree(out);
}
+static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
+{
+ struct mlx5_esw_event_info info = {};
+
+ info.new_mode = mode;
+
+ blocking_notifier_call_chain(&esw->n_head, 0, &info);
+}
+
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
@@ -1654,6 +1668,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
+ mlx5_esw_mode_change_notify(esw, mode);
+
return 0;
abort:
@@ -1710,6 +1726,11 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
+ /* Notify eswitch users that it is exiting from current mode.
+ * So that it can do necessary cleanup before the eswitch is disabled.
+ */
+ mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE);
+
mlx5_eswitch_event_handlers_unregister(esw);
if (esw->mode == MLX5_ESWITCH_LEGACY)
@@ -1810,6 +1831,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
dev->priv.eswitch = esw;
+ BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
return 0;
abort:
if (esw->work_queue)
@@ -1899,7 +1921,8 @@ static bool
is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_PF ||
- mlx5_eswitch_is_vf_vport(esw, vport_num);
+ mlx5_eswitch_is_vf_vport(esw, vport_num) ||
+ mlx5_esw_is_sf_vport(esw, vport_num);
}
int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
@@ -2500,4 +2523,12 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
+int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&esw->n_head, nb);
+}
+void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&esw->n_head, nb);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index cf87de94418f..479d2ac2cd85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -43,6 +43,7 @@
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
#include "lib/fs_chains.h"
+#include "sf/sf.h"
#include "en/tc_ct.h"
#ifdef CONFIG_MLX5_ESWITCH
@@ -277,6 +278,7 @@ struct mlx5_eswitch {
struct {
u32 large_group_num;
} params;
+ struct blocking_notifier_head n_head;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -499,6 +501,40 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
}
+static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
+{
+ /* PF and VF vports indices start from 0 to max_vfs */
+ return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
+}
+
+static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
+{
+ return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
+}
+
+static inline int
+mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return vport_num - mlx5_sf_start_function_id(esw->dev) +
+ MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
+}
+
+static inline u16
+mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
+{
+ return mlx5_sf_start_function_id(esw->dev) + idx -
+ (MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
+}
+
+static inline bool
+mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return mlx5_sf_supported(esw->dev) &&
+ vport_num >= mlx5_sf_start_function_id(esw->dev) &&
+ (vport_num < (mlx5_sf_start_function_id(esw->dev) +
+ mlx5_sf_max_functions(esw->dev)));
+}
+
static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev);
@@ -527,6 +563,10 @@ static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
if (vport_num == MLX5_VPORT_UPLINK)
return mlx5_eswitch_uplink_idx(esw);
+ if (mlx5_esw_is_sf_vport(esw, vport_num))
+ return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
+
+ /* PF and VF vports start from 0 to max_vfs */
return vport_num;
}
@@ -540,6 +580,12 @@ static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
if (index == mlx5_eswitch_uplink_idx(esw))
return MLX5_VPORT_UPLINK;
+ /* SF vports indices are after VFs and before ECPF */
+ if (mlx5_sf_supported(esw->dev) &&
+ index > mlx5_core_max_vfs(esw->dev))
+ return mlx5_esw_sf_vport_index_to_num(esw, index);
+
+ /* PF and VF vports start from 0 to max_vfs */
return index;
}
@@ -625,6 +671,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
for ((vport) = (nvfs); \
(vport) >= (esw)->first_host_vport; (vport)--)
+#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
+ for ((i) = mlx5_esw_sf_start_idx(esw); \
+ (rep) = &(esw)->offloads.vport_reps[(i)], \
+ (i) < mlx5_esw_sf_end_idx(esw); (i++))
+
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
@@ -638,6 +689,10 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
@@ -656,6 +711,9 @@ esw_get_max_restore_tag(struct mlx5_eswitch *esw);
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
+
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
@@ -667,6 +725,26 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum);
+void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum);
+void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
+
+/**
+ * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
+ *
+ * @new_mode: New mode of eswitch.
+ */
+struct mlx5_esw_event_info {
+ u16 new_mode;
+};
+
+int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
+void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 2f6a0ae20650..7f09f2bbf7c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1800,11 +1800,22 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type]->unload(rep);
}
+static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
+{
+ struct mlx5_eswitch_rep *rep;
+ int i;
+
+ mlx5_esw_for_each_sf_rep(esw, i, rep)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
+}
+
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int i;
+ __unload_reps_sf_vport(esw, rep_type);
+
mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
__esw_offloads_unload_rep(esw, rep, rep_type);
@@ -1822,7 +1833,7 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -1846,7 +1857,7 @@ err_reps:
return err;
}
-static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2824,3 +2835,35 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
+
+int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum)
+{
+ int err;
+
+ err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
+ if (err)
+ return err;
+
+ err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum);
+ if (err)
+ goto devlink_err;
+
+ err = mlx5_esw_offloads_rep_load(esw, vport_num);
+ if (err)
+ goto rep_err;
+ return 0;
+
+rep_err:
+ mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
+devlink_err:
+ mlx5_esw_vport_disable(esw, vport_num);
+ return err;
+}
+
+void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport_num);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 3ce17c3d7a00..d713ae24d6b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -23,7 +23,7 @@ static int temp_warn(struct notifier_block *, unsigned long, void *);
static int port_module(struct notifier_block *, unsigned long, void *);
static int pcie_core(struct notifier_block *, unsigned long, void *);
-/* handler which forwards the event to events->nh, driver notifiers */
+/* handler which forwards the event to events->fw_nh, driver notifiers */
static int forward_event(struct notifier_block *, unsigned long, void *);
static struct mlx5_nb events_nbs_ref[] = {
@@ -55,12 +55,14 @@ struct mlx5_events {
struct mlx5_core_dev *dev;
struct workqueue_struct *wq;
struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)];
- /* driver notifier chain */
- struct atomic_notifier_head nh;
+ /* driver notifier chain for fw events */
+ struct atomic_notifier_head fw_nh;
/* port module events stats */
struct mlx5_pme_stats pme_stats;
/*pcie_core*/
struct work_struct pcie_core_work;
+ /* driver notifier chain for sw events */
+ struct blocking_notifier_head sw_nh;
};
static const char *eqe_type_str(u8 type)
@@ -110,6 +112,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED:
return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED";
+ case MLX5_EVENT_TYPE_VHCA_STATE_CHANGE:
+ return "MLX5_EVENT_TYPE_VHCA_STATE_CHANGE";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:
@@ -331,7 +335,7 @@ static int forward_event(struct notifier_block *nb, unsigned long event, void *d
mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d) forward to interfaces\n",
eqe_type_str(eqe->type), eqe->sub_type);
- atomic_notifier_call_chain(&events->nh, event, data);
+ atomic_notifier_call_chain(&events->fw_nh, event, data);
return NOTIFY_OK;
}
@@ -342,7 +346,7 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
if (!events)
return -ENOMEM;
- ATOMIC_INIT_NOTIFIER_HEAD(&events->nh);
+ ATOMIC_INIT_NOTIFIER_HEAD(&events->fw_nh);
events->dev = dev;
dev->priv.events = events;
events->wq = create_singlethread_workqueue("mlx5_events");
@@ -351,6 +355,7 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
return -ENOMEM;
}
INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
+ BLOCKING_INIT_NOTIFIER_HEAD(&events->sw_nh);
return 0;
}
@@ -383,11 +388,14 @@ void mlx5_events_stop(struct mlx5_core_dev *dev)
flush_workqueue(events->wq);
}
+/* This API is used only for processing and forwarding firmware
+ * events to mlx5 consumer.
+ */
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
struct mlx5_events *events = dev->priv.events;
- return atomic_notifier_chain_register(&events->nh, nb);
+ return atomic_notifier_chain_register(&events->fw_nh, nb);
}
EXPORT_SYMBOL(mlx5_notifier_register);
@@ -395,11 +403,41 @@ int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *n
{
struct mlx5_events *events = dev->priv.events;
- return atomic_notifier_chain_unregister(&events->nh, nb);
+ return atomic_notifier_chain_unregister(&events->fw_nh, nb);
}
EXPORT_SYMBOL(mlx5_notifier_unregister);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data)
{
- return atomic_notifier_call_chain(&events->nh, event, data);
+ return atomic_notifier_call_chain(&events->fw_nh, event, data);
+}
+
+/* This API is used only for processing and forwarding driver-specific
+ * events to mlx5 consumers.
+ */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_chain_register(&events->sw_nh, nb);
+}
+
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_chain_unregister(&events->sw_nh, nb);
+}
+
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_call_chain(&events->sw_nh, event, data);
+}
+
+void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work)
+{
+ queue_work(dev->priv.events->wq, work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b899539a0786..07b4dbd61920 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -105,8 +105,8 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
-#define KERNEL_NIC_PRIO_NUM_LEVELS 6
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 7
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
@@ -1141,6 +1141,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
destroy_ft:
root->cmds->destroy_flow_table(root, ft);
free_ft:
+ rhltable_destroy(&ft->fgs_hash);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index f3d45ef082cd..83a05371e2aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
struct mlx5_core_dev *tmp_dev;
int i, err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
return;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
- tmp_dev = ldev->pf[i].dev;
- if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
- MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (!ldev->pf[i].dev)
break;
- }
if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 947f346bdc2d..381325b4a863 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -141,9 +141,6 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
- if (!mlx5_chains_prios_supported(chains))
- return 1;
-
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
@@ -541,13 +538,13 @@ mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_handle *miss_rule = NULL;
+ struct mlx5_flow_handle *miss_rule;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_table *next_ft;
struct mlx5_flow_table *ft;
- struct prio *prio_s = NULL;
struct fs_chain *chain_s;
struct list_head *pos;
+ struct prio *prio_s;
u32 *flow_group_in;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 3a9fa629503f..d046db7bb047 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -90,4 +90,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c08315b51fd3..e4c9627485aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -73,6 +73,9 @@
#include "ecpf.h"
#include "lib/hv_vhca.h"
#include "diag/rsc_dump.h"
+#include "sf/vhca_event.h"
+#include "sf/dev/dev.h"
+#include "sf/sf.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -82,7 +85,6 @@ unsigned int mlx5_core_debug_mask;
module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
-#define MLX5_DEFAULT_PROF 2
static unsigned int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
@@ -567,6 +569,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
+ mlx5_vhca_state_cap_handle(dev, set_hca_cap);
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -884,6 +888,24 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eswitch_cleanup;
}
+ err = mlx5_vhca_event_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err);
+ goto err_fpga_cleanup;
+ }
+
+ err = mlx5_sf_hw_table_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init SF HW table %d\n", err);
+ goto err_sf_hw_table_cleanup;
+ }
+
+ err = mlx5_sf_table_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init SF table %d\n", err);
+ goto err_sf_table_cleanup;
+ }
+
dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm))
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
@@ -894,6 +916,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
return 0;
+err_sf_table_cleanup:
+ mlx5_sf_hw_table_cleanup(dev);
+err_sf_hw_table_cleanup:
+ mlx5_vhca_event_cleanup(dev);
+err_fpga_cleanup:
+ mlx5_fpga_cleanup(dev);
err_eswitch_cleanup:
mlx5_eswitch_cleanup(dev->priv.eswitch);
err_sriov_cleanup:
@@ -925,6 +953,9 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev);
+ mlx5_sf_table_cleanup(dev);
+ mlx5_sf_hw_table_cleanup(dev);
+ mlx5_vhca_event_cleanup(dev);
mlx5_fpga_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_sriov_cleanup(dev);
@@ -1129,6 +1160,14 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_sriov;
}
+ mlx5_vhca_event_start(dev);
+
+ err = mlx5_sf_hw_table_create(dev);
+ if (err) {
+ mlx5_core_err(dev, "sf table create failed %d\n", err);
+ goto err_vhca;
+ }
+
err = mlx5_ec_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n");
@@ -1141,11 +1180,16 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_sriov;
}
+ mlx5_sf_dev_table_create(dev);
+
return 0;
err_sriov:
mlx5_ec_cleanup(dev);
err_ec:
+ mlx5_sf_hw_table_destroy(dev);
+err_vhca:
+ mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev);
err_fs:
mlx5_accel_tls_cleanup(dev);
@@ -1171,8 +1215,11 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
+ mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_ec_cleanup(dev);
+ mlx5_sf_hw_table_destroy(dev);
+ mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
@@ -1283,7 +1330,7 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
-static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
+int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
int err;
@@ -1305,6 +1352,8 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
+ INIT_LIST_HEAD(&priv->traps);
+
err = mlx5_health_init(dev);
if (err)
goto err_health_init;
@@ -1333,7 +1382,7 @@ err_health_init:
return err;
}
-static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
+void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -1368,8 +1417,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
dev->priv.adev_idx = mlx5_adev_idx_alloc();
- if (dev->priv.adev_idx < 0)
- return dev->priv.adev_idx;
+ if (dev->priv.adev_idx < 0) {
+ err = dev->priv.adev_idx;
+ goto adev_init_err;
+ }
err = mlx5_mdev_init(dev, prof_sel);
if (err)
@@ -1403,6 +1454,7 @@ pci_init_err:
mlx5_mdev_uninit(dev);
mdev_init_err:
mlx5_adev_idx_free(dev->priv.adev_idx);
+adev_init_err:
mlx5_devlink_free(devlink);
return err;
@@ -1673,6 +1725,10 @@ static int __init init(void)
if (err)
goto err_debug;
+ err = mlx5_sf_driver_register();
+ if (err)
+ goto err_sf;
+
#ifdef CONFIG_MLX5_CORE_EN
err = mlx5e_init();
if (err) {
@@ -1683,6 +1739,8 @@ static int __init init(void)
return 0;
+err_sf:
+ pci_unregister_driver(&mlx5_core_driver);
err_debug:
mlx5_unregister_debugfs();
return err;
@@ -1693,6 +1751,7 @@ static void __exit cleanup(void)
#ifdef CONFIG_MLX5_CORE_EN
mlx5e_cleanup();
#endif
+ mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
mlx5_unregister_debugfs();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0a0302ce7144..3754ef98554f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -117,6 +117,8 @@ enum mlx5_semaphore_space_address {
MLX5_SEMAPHORE_SW_RESET = 0x20,
};
+#define MLX5_DEFAULT_PROF 2
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
@@ -176,6 +178,7 @@ struct cpumask *
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
+struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
int mlx5_events_init(struct mlx5_core_dev *dev);
void mlx5_events_cleanup(struct mlx5_core_dev *dev);
@@ -257,6 +260,15 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
+static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
+{
+ return dev->coredev_type == MLX5_COREDEV_SF;
+}
+
+int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
+void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
+
+void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index eb956ce904bc..eaa8958e24d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -58,7 +58,7 @@ struct fw_page {
struct rb_node rb_node;
u64 addr;
struct page *page;
- u16 func_id;
+ u32 function;
unsigned long bitmask;
struct list_head list;
unsigned free_count;
@@ -74,12 +74,17 @@ enum {
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
-static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
+static u32 get_function(u16 func_id, bool ec_function)
+{
+ return func_id & (ec_function << 16);
+}
+
+static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
int err;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (root)
return root;
@@ -87,7 +92,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
if (!root)
return ERR_PTR(-ENOMEM);
- err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
+ err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
if (err) {
kfree(root);
return ERR_PTR(err);
@@ -98,7 +103,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
return root;
}
-static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
+static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
{
struct rb_node *parent = NULL;
struct rb_root *root;
@@ -107,7 +112,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
struct fw_page *tfp;
int i;
- root = page_root_per_func_id(dev, func_id);
+ root = page_root_per_function(dev, function);
if (IS_ERR(root))
return PTR_ERR(root);
@@ -130,7 +135,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
nfp->addr = addr;
nfp->page = page;
- nfp->func_id = func_id;
+ nfp->function = function;
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
set_bit(i, &nfp->bitmask);
@@ -143,14 +148,14 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
}
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
- u32 func_id)
+ u32 function)
{
struct fw_page *result = NULL;
struct rb_root *root;
struct rb_node *tmp;
struct fw_page *tfp;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return NULL;
@@ -194,14 +199,14 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
return err;
}
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
{
struct fw_page *fp = NULL;
struct fw_page *iter;
unsigned n;
list_for_each_entry(iter, &dev->priv.free_list, list) {
- if (iter->func_id != func_id)
+ if (iter->function != function)
continue;
fp = iter;
}
@@ -231,7 +236,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
{
struct rb_root *root;
- root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
+ root = xa_load(&dev->priv.page_root_xa, fwp->function);
if (WARN_ON_ONCE(!root))
return;
@@ -244,12 +249,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
kfree(fwp);
}
-static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
+static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
{
struct fw_page *fwp;
int n;
- fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
@@ -263,7 +268,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
list_add(&fwp->list, &dev->priv.free_list);
}
-static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
+static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
@@ -291,7 +296,7 @@ map:
goto map;
}
- err = insert_page(dev, addr, page, func_id);
+ err = insert_page(dev, addr, page, function);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -328,6 +333,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail, bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
@@ -345,10 +351,10 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
for (i = 0; i < npages; i++) {
retry:
- err = alloc_4k(dev, &addr, func_id);
+ err = alloc_4k(dev, &addr, function);
if (err) {
if (err == -ENOMEM)
- err = alloc_system_page(dev, func_id);
+ err = alloc_system_page(dev, function);
if (err)
goto out_4k;
@@ -384,7 +390,7 @@ retry:
out_4k:
for (i--; i >= 0; i--)
- free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
out_free:
kvfree(in);
if (notify_fail)
@@ -392,14 +398,15 @@ out_free:
return err;
}
-static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
struct rb_root *root;
struct rb_node *p;
int npages = 0;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return;
@@ -446,6 +453,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
struct rb_root *root;
struct fw_page *fwp;
struct rb_node *p;
+ bool ec_function;
u32 func_id;
u32 npages;
u32 i = 0;
@@ -456,8 +464,9 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
+ ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
if (WARN_ON_ONCE(!root))
return -EEXIST;
@@ -473,9 +482,10 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
return 0;
}
-static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
+static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int *nclaimed, bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
@@ -514,7 +524,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}
for (i = 0; i < num_claimed; i++)
- free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
if (nclaimed)
*nclaimed = num_claimed;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 6fd974920394..a61e09aff152 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -30,6 +30,9 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table;
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
if (!irq_table)
return -ENOMEM;
@@ -40,6 +43,9 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
{
+ if (mlx5_core_is_sf(dev))
+ return;
+
kvfree(dev->priv.irq_table);
}
@@ -268,6 +274,9 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
int nvec;
int err;
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
MLX5_IRQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs);
@@ -319,6 +328,9 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
struct mlx5_irq_table *table = dev->priv.irq_table;
int i;
+ if (mlx5_core_is_sf(dev))
+ return;
+
/* free_irq requires that affinity and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq.
@@ -332,3 +344,11 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
kfree(table->irq);
}
+struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(dev))
+ return dev->priv.parent_mdev->priv.irq_table;
+#endif
+ return dev->priv.irq_table;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
new file mode 100644
index 000000000000..0777be24a307
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "qos.h"
+
+#define MLX5_QOS_DEFAULT_DWRR_UID 0
+
+bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, qos))
+ return false;
+ if (!MLX5_CAP_QOS(mdev, nic_sq_scheduling))
+ return false;
+ if (!MLX5_CAP_QOS(mdev, nic_bw_share))
+ return false;
+ if (!MLX5_CAP_QOS(mdev, nic_rate_limit))
+ return false;
+ return true;
+}
+
+int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
+{
+ return 1 << MLX5_CAP_QOS(mdev, log_max_qos_nic_queue_group);
+}
+
+int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+
+ return mlx5_create_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
+ sched_ctx, id);
+}
+
+int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ void *attr;
+
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+
+ attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
+
+ return mlx5_create_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
+ sched_ctx, id);
+}
+
+int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
+{
+ return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
+}
+
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 id)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ u32 bitmask = 0;
+
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+
+ return mlx5_modify_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
+ sched_ctx, id, bitmask);
+}
+
+int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id)
+{
+ return mlx5_destroy_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC, id);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
new file mode 100644
index 000000000000..125e4e47e6f7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_QOS_H
+#define __MLX5_QOS_H
+
+#include "mlx5_core.h"
+
+#define MLX5_DEBUG_QOS_MASK BIT(4)
+
+#define qos_err(mdev, fmt, ...) \
+ mlx5_core_err(mdev, "QoS: " fmt, ##__VA_ARGS__)
+#define qos_warn(mdev, fmt, ...) \
+ mlx5_core_warn(mdev, "QoS: " fmt, ##__VA_ARGS__)
+#define qos_dbg(mdev, fmt, ...) \
+ mlx5_core_dbg_mask(mdev, MLX5_DEBUG_QOS_MASK, "QoS: " fmt, ##__VA_ARGS__)
+
+bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev);
+int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
+
+int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id);
+int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id);
+int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share,
+ u32 max_avg_bw, u32 id);
+int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 0fc7de4aa572..8e0dddc6383f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -116,7 +116,7 @@ free:
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
mlx5_core_roce_gid_set(dev, 0, 0, 0,
- NULL, NULL, false, 0, 0);
+ NULL, NULL, false, 0, 1);
}
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c
new file mode 100644
index 000000000000..a8d75c2f0275
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include "priv.h"
+
+int mlx5_cmd_alloc_sf(struct mlx5_core_dev *dev, u16 function_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_sf_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_sf_in)] = {};
+
+ MLX5_SET(alloc_sf_in, in, opcode, MLX5_CMD_OP_ALLOC_SF);
+ MLX5_SET(alloc_sf_in, in, function_id, function_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_sf_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_sf_in)] = {};
+
+ MLX5_SET(dealloc_sf_in, in, opcode, MLX5_CMD_OP_DEALLOC_SF);
+ MLX5_SET(dealloc_sf_in, in, function_id, function_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
+{
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
+
+ MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
+ MLX5_SET(enable_hca_in, in, function_id, func_id);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+}
+
+int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
+{
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
+
+ MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+ MLX5_SET(disable_hca_in, in, function_id, func_id);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
new file mode 100644
index 000000000000..b265f27b2166
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_core.h"
+#include "dev.h"
+#include "sf/vhca_event.h"
+#include "sf/sf.h"
+#include "sf/mlx5_ifc_vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_sf_dev_table {
+ struct xarray devices;
+ unsigned int max_sfs;
+ phys_addr_t base_address;
+ u64 sf_bar_length;
+ struct notifier_block nb;
+ struct mlx5_core_dev *dev;
+};
+
+static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev);
+}
+
+bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+
+ if (!mlx5_sf_dev_supported(dev))
+ return false;
+
+ return !xa_empty(&table->devices);
+}
+
+static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev);
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum);
+}
+static DEVICE_ATTR_RO(sfnum);
+
+static struct attribute *sf_device_attrs[] = {
+ &dev_attr_sfnum.attr,
+ NULL,
+};
+
+static const struct attribute_group sf_attr_group = {
+ .attrs = sf_device_attrs,
+};
+
+static const struct attribute_group *sf_attr_groups[2] = {
+ &sf_attr_group,
+ NULL
+};
+
+static void mlx5_sf_dev_release(struct device *device)
+{
+ struct auxiliary_device *adev = container_of(device, struct auxiliary_device, dev);
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+
+ mlx5_adev_idx_free(adev->id);
+ kfree(sf_dev);
+}
+
+static void mlx5_sf_dev_remove(struct mlx5_sf_dev *sf_dev)
+{
+ auxiliary_device_delete(&sf_dev->adev);
+ auxiliary_device_uninit(&sf_dev->adev);
+}
+
+static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+ struct mlx5_sf_dev *sf_dev;
+ struct pci_dev *pdev;
+ int err;
+ int id;
+
+ id = mlx5_adev_idx_alloc();
+ if (id < 0) {
+ err = id;
+ goto add_err;
+ }
+
+ sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL);
+ if (!sf_dev) {
+ mlx5_adev_idx_free(id);
+ err = -ENOMEM;
+ goto add_err;
+ }
+ pdev = dev->pdev;
+ sf_dev->adev.id = id;
+ sf_dev->adev.name = MLX5_SF_DEV_ID_NAME;
+ sf_dev->adev.dev.release = mlx5_sf_dev_release;
+ sf_dev->adev.dev.parent = &pdev->dev;
+ sf_dev->adev.dev.groups = sf_attr_groups;
+ sf_dev->sfnum = sfnum;
+ sf_dev->parent_mdev = dev;
+
+ if (!table->max_sfs) {
+ mlx5_adev_idx_free(id);
+ kfree(sf_dev);
+ err = -EOPNOTSUPP;
+ goto add_err;
+ }
+ sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
+
+ err = auxiliary_device_init(&sf_dev->adev);
+ if (err) {
+ mlx5_adev_idx_free(id);
+ kfree(sf_dev);
+ goto add_err;
+ }
+
+ err = auxiliary_device_add(&sf_dev->adev);
+ if (err) {
+ put_device(&sf_dev->adev.dev);
+ goto add_err;
+ }
+
+ err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL);
+ if (err)
+ goto xa_err;
+ return;
+
+xa_err:
+ mlx5_sf_dev_remove(sf_dev);
+add_err:
+ mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
+ sf_index, sfnum, err);
+}
+
+static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev, u16 sf_index)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+
+ xa_erase(&table->devices, sf_index);
+ mlx5_sf_dev_remove(sf_dev);
+}
+
+static int
+mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data)
+{
+ struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
+ const struct mlx5_vhca_state_event *event = data;
+ struct mlx5_sf_dev *sf_dev;
+ u16 sf_index;
+
+ sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id);
+ sf_dev = xa_load(&table->devices, sf_index);
+ switch (event->new_vhca_state) {
+ case MLX5_VHCA_STATE_ALLOCATED:
+ if (sf_dev)
+ mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ break;
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ if (sf_dev)
+ mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ else
+ mlx5_core_err(table->dev,
+ "SF DEV: teardown state for invalid dev index=%d fn_id=0x%x\n",
+ sf_index, event->sw_function_id);
+ break;
+ case MLX5_VHCA_STATE_ACTIVE:
+ if (!sf_dev)
+ mlx5_sf_dev_add(table->dev, sf_index, event->sw_function_id);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
+{
+ struct mlx5_core_dev *dev = table->dev;
+ u16 max_functions;
+ u16 function_id;
+ int err = 0;
+ bool ecpu;
+ int i;
+
+ max_functions = mlx5_sf_max_functions(dev);
+ function_id = MLX5_CAP_GEN(dev, sf_base_id);
+ ecpu = mlx5_read_embedded_cpu(dev);
+ /* Arm the vhca context as the vhca event notifier */
+ for (i = 0; i < max_functions; i++) {
+ err = mlx5_vhca_event_arm(dev, function_id, ecpu);
+ if (err)
+ return err;
+
+ function_id++;
+ }
+ return 0;
+}
+
+void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_dev_table *table;
+ unsigned int max_sfs;
+ int err;
+
+ if (!mlx5_sf_dev_supported(dev) || !mlx5_vhca_event_supported(dev))
+ return;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table) {
+ err = -ENOMEM;
+ goto table_err;
+ }
+
+ table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
+ table->dev = dev;
+ if (MLX5_CAP_GEN(dev, max_num_sf))
+ max_sfs = MLX5_CAP_GEN(dev, max_num_sf);
+ else
+ max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf);
+ table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
+ table->base_address = pci_resource_start(dev->pdev, 2);
+ table->max_sfs = max_sfs;
+ xa_init(&table->devices);
+ dev->priv.sf_dev_table = table;
+
+ err = mlx5_vhca_event_notifier_register(dev, &table->nb);
+ if (err)
+ goto vhca_err;
+ err = mlx5_sf_dev_vhca_arm_all(table);
+ if (err)
+ goto arm_err;
+ mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs);
+ return;
+
+arm_err:
+ mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+vhca_err:
+ table->max_sfs = 0;
+ kfree(table);
+ dev->priv.sf_dev_table = NULL;
+table_err:
+ mlx5_core_err(dev, "SF DEV table create err = %d\n", err);
+}
+
+static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
+{
+ struct mlx5_sf_dev *sf_dev;
+ unsigned long index;
+
+ xa_for_each(&table->devices, index, sf_dev) {
+ xa_erase(&table->devices, index);
+ mlx5_sf_dev_remove(sf_dev);
+ }
+}
+
+void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+
+ if (!table)
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+
+ /* Now that event handler is not running, it is safe to destroy
+ * the sf device without race.
+ */
+ mlx5_sf_dev_destroy_all(table);
+
+ WARN_ON(!xa_empty(&table->devices));
+ kfree(table);
+ dev->priv.sf_dev_table = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
new file mode 100644
index 000000000000..4de02902aef1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_SF_DEV_H__
+#define __MLX5_SF_DEV_H__
+
+#ifdef CONFIG_MLX5_SF
+
+#include <linux/auxiliary_bus.h>
+
+#define MLX5_SF_DEV_ID_NAME "sf"
+
+struct mlx5_sf_dev {
+ struct auxiliary_device adev;
+ struct mlx5_core_dev *parent_mdev;
+ struct mlx5_core_dev *mdev;
+ phys_addr_t bar_base_addr;
+ u32 sfnum;
+};
+
+void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
+void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_sf_driver_register(void);
+void mlx5_sf_driver_unregister(void);
+
+bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev);
+
+#else
+
+static inline void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
+{
+}
+
+static inline void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
+{
+}
+
+static inline int mlx5_sf_driver_register(void)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_driver_unregister(void)
+{
+}
+
+static inline bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
new file mode 100644
index 000000000000..daf63a8115e0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_core.h"
+#include "dev.h"
+#include "devlink.h"
+
+static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
+{
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct mlx5_core_dev *mdev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = mlx5_devlink_alloc();
+ if (!devlink)
+ return -ENOMEM;
+
+ mdev = devlink_priv(devlink);
+ mdev->device = &adev->dev;
+ mdev->pdev = sf_dev->parent_mdev->pdev;
+ mdev->bar_addr = sf_dev->bar_base_addr;
+ mdev->iseg_base = sf_dev->bar_base_addr;
+ mdev->coredev_type = MLX5_COREDEV_SF;
+ mdev->priv.parent_mdev = sf_dev->parent_mdev;
+ mdev->priv.adev_idx = adev->id;
+ sf_dev->mdev = mdev;
+
+ err = mlx5_mdev_init(mdev, MLX5_DEFAULT_PROF);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_mdev_init on err=%d\n", err);
+ goto mdev_err;
+ }
+
+ mdev->iseg = ioremap(mdev->iseg_base, sizeof(*mdev->iseg));
+ if (!mdev->iseg) {
+ mlx5_core_warn(mdev, "remap error\n");
+ goto remap_err;
+ }
+
+ err = mlx5_load_one(mdev, true);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_load_one err=%d\n", err);
+ goto load_one_err;
+ }
+ return 0;
+
+load_one_err:
+ iounmap(mdev->iseg);
+remap_err:
+ mlx5_mdev_uninit(mdev);
+mdev_err:
+ mlx5_devlink_free(devlink);
+ return err;
+}
+
+static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(sf_dev->mdev);
+ mlx5_unload_one(sf_dev->mdev, true);
+ iounmap(sf_dev->mdev->iseg);
+ mlx5_mdev_uninit(sf_dev->mdev);
+ mlx5_devlink_free(devlink);
+}
+
+static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
+{
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+
+ mlx5_unload_one(sf_dev->mdev, false);
+}
+
+static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
+ { .name = MLX5_ADEV_NAME "." MLX5_SF_DEV_ID_NAME, },
+ { },
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5_sf_dev_id_table);
+
+static struct auxiliary_driver mlx5_sf_driver = {
+ .name = MLX5_SF_DEV_ID_NAME,
+ .probe = mlx5_sf_dev_probe,
+ .remove = mlx5_sf_dev_remove,
+ .shutdown = mlx5_sf_dev_shutdown,
+ .id_table = mlx5_sf_dev_id_table,
+};
+
+int mlx5_sf_driver_register(void)
+{
+ return auxiliary_driver_register(&mlx5_sf_driver);
+}
+
+void mlx5_sf_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&mlx5_sf_driver);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
new file mode 100644
index 000000000000..c2ba41bb7a70
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include "eswitch.h"
+#include "priv.h"
+#include "sf/dev/dev.h"
+#include "mlx5_ifc_vhca_event.h"
+#include "vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_sf {
+ struct devlink_port dl_port;
+ unsigned int port_index;
+ u16 id;
+ u16 hw_fn_id;
+ u16 hw_state;
+};
+
+struct mlx5_sf_table {
+ struct mlx5_core_dev *dev; /* To refer from notifier context. */
+ struct xarray port_indices; /* port index based lookup. */
+ refcount_t refcount;
+ struct completion disable_complete;
+ struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
+ struct notifier_block esw_nb;
+ struct notifier_block vhca_nb;
+ u8 ecpu: 1;
+};
+
+static struct mlx5_sf *
+mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
+{
+ return xa_load(&table->port_indices, port_index);
+}
+
+static struct mlx5_sf *
+mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
+{
+ unsigned long index;
+ struct mlx5_sf *sf;
+
+ xa_for_each(&table->port_indices, index, sf) {
+ if (sf->hw_fn_id == fn_id)
+ return sf;
+ }
+ return NULL;
+}
+
+static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
+}
+
+static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ xa_erase(&table->port_indices, sf->port_index);
+}
+
+static struct mlx5_sf *
+mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack)
+{
+ unsigned int dl_port_index;
+ struct mlx5_sf *sf;
+ u16 hw_fn_id;
+ int id_err;
+ int err;
+
+ id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum);
+ if (id_err < 0) {
+ err = id_err;
+ goto id_err;
+ }
+
+ sf = kzalloc(sizeof(*sf), GFP_KERNEL);
+ if (!sf) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ sf->id = id_err;
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
+ sf->port_index = dl_port_index;
+ sf->hw_fn_id = hw_fn_id;
+ sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
+
+ err = mlx5_sf_id_insert(table, sf);
+ if (err)
+ goto insert_err;
+
+ return sf;
+
+insert_err:
+ kfree(sf);
+alloc_err:
+ mlx5_sf_hw_table_sf_free(table->dev, id_err);
+id_err:
+ if (err == -EEXIST)
+ NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
+ return ERR_PTR(err);
+}
+
+static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ mlx5_sf_id_erase(table, sf);
+ mlx5_sf_hw_table_sf_free(table->dev, sf->id);
+ kfree(sf);
+}
+
+static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return NULL;
+
+ return refcount_inc_not_zero(&table->refcount) ? table : NULL;
+}
+
+static void mlx5_sf_table_put(struct mlx5_sf_table *table)
+{
+ if (refcount_dec_and_test(&table->refcount))
+ complete(&table->disable_complete);
+}
+
+static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
+{
+ switch (hw_state) {
+ case MLX5_VHCA_STATE_ACTIVE:
+ case MLX5_VHCA_STATE_IN_USE:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ return DEVLINK_PORT_FN_STATE_ACTIVE;
+ case MLX5_VHCA_STATE_INVALID:
+ case MLX5_VHCA_STATE_ALLOCATED:
+ default:
+ return DEVLINK_PORT_FN_STATE_INACTIVE;
+ }
+}
+
+static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
+{
+ switch (hw_state) {
+ case MLX5_VHCA_STATE_IN_USE:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+ case MLX5_VHCA_STATE_INVALID:
+ case MLX5_VHCA_STATE_ALLOCATED:
+ case MLX5_VHCA_STATE_ACTIVE:
+ default:
+ return DEVLINK_PORT_FN_OPSTATE_DETACHED;
+ }
+}
+
+static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
+{
+ return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
+}
+
+int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_sf_table *table;
+ struct mlx5_sf *sf;
+ int err = 0;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table)
+ return -EOPNOTSUPP;
+
+ sf = mlx5_sf_lookup_by_index(table, dl_port->index);
+ if (!sf) {
+ err = -EOPNOTSUPP;
+ goto sf_err;
+ }
+ mutex_lock(&table->sf_state_lock);
+ *state = mlx5_sf_to_devlink_state(sf->hw_state);
+ *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
+ mutex_unlock(&table->sf_state_lock);
+sf_err:
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+{
+ int err;
+
+ if (mlx5_sf_is_active(sf))
+ return 0;
+ if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
+ return -EINVAL;
+
+ err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
+ if (err)
+ return err;
+
+ sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
+ return 0;
+}
+
+static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+{
+ int err;
+
+ if (!mlx5_sf_is_active(sf))
+ return 0;
+
+ err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
+ if (err)
+ return err;
+
+ sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
+ return 0;
+}
+
+static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
+ struct mlx5_sf *sf,
+ enum devlink_port_fn_state state)
+{
+ int err = 0;
+
+ mutex_lock(&table->sf_state_lock);
+ if (state == mlx5_sf_to_devlink_state(sf->hw_state))
+ goto out;
+ if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
+ err = mlx5_sf_activate(dev, sf);
+ else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
+ err = mlx5_sf_deactivate(dev, sf);
+ else
+ err = -EINVAL;
+out:
+ mutex_unlock(&table->sf_state_lock);
+ return err;
+}
+
+int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_sf_table *table;
+ struct mlx5_sf *sf;
+ int err;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
+ return -EOPNOTSUPP;
+ }
+ sf = mlx5_sf_lookup_by_index(table, dl_port->index);
+ if (!sf) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ err = mlx5_sf_state_set(dev, table, sf, state);
+out:
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_sf *sf;
+ u16 hw_fn_id;
+ int err;
+
+ sf = mlx5_sf_alloc(table, new_attr->sfnum, extack);
+ if (IS_ERR(sf))
+ return PTR_ERR(sf);
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id);
+ err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum);
+ if (err)
+ goto esw_err;
+ *new_port_index = sf->port_index;
+ return 0;
+
+esw_err:
+ mlx5_sf_free(table, sf);
+ return err;
+}
+
+static int
+mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack)
+{
+ if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
+ return -EOPNOTSUPP;
+ }
+ if (new_attr->port_index_valid) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Driver does not support user defined port index assignment");
+ return -EOPNOTSUPP;
+ }
+ if (!new_attr->sfnum_valid) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "User must provide unique sfnum. Driver does not support auto assignment");
+ return -EOPNOTSUPP;
+ }
+ if (new_attr->controller_valid && new_attr->controller) {
+ NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
+ return -EOPNOTSUPP;
+ }
+ if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+int mlx5_devlink_sf_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_sf_table *table;
+ int err;
+
+ err = mlx5_sf_new_check_attr(dev, new_attr, extack);
+ if (err)
+ return err;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
+ return -EOPNOTSUPP;
+ }
+ err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index);
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
+ mlx5_sf_free(table, sf);
+ } else if (mlx5_sf_is_active(sf)) {
+ /* Even if its active, it is treated as in_use because by the time,
+ * it is disabled here, it may getting used. So it is safe to
+ * always look for the event to ensure that it is recycled only after
+ * firmware gives confirmation that it is detached by the driver.
+ */
+ mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
+ mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
+ kfree(sf);
+ } else {
+ mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
+ kfree(sf);
+ }
+}
+
+int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_sf_table *table;
+ struct mlx5_sf *sf;
+ int err = 0;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
+ return -EOPNOTSUPP;
+ }
+ sf = mlx5_sf_lookup_by_index(table, port_index);
+ if (!sf) {
+ err = -ENODEV;
+ goto sf_err;
+ }
+
+ mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
+ mlx5_sf_id_erase(table, sf);
+
+ mutex_lock(&table->sf_state_lock);
+ mlx5_sf_dealloc(table, sf);
+ mutex_unlock(&table->sf_state_lock);
+sf_err:
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
+{
+ if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
+ return true;
+
+ if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
+ return true;
+
+ if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
+ new_state == MLX5_VHCA_STATE_ALLOCATED)
+ return true;
+
+ return false;
+}
+
+static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
+{
+ struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
+ const struct mlx5_vhca_state_event *event = data;
+ bool update = false;
+ struct mlx5_sf *sf;
+
+ table = mlx5_sf_table_try_get(table->dev);
+ if (!table)
+ return 0;
+
+ mutex_lock(&table->sf_state_lock);
+ sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
+ if (!sf)
+ goto sf_err;
+
+ /* When driver is attached or detached to a function, an event
+ * notifies such state change.
+ */
+ update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
+ if (update)
+ sf->hw_state = event->new_vhca_state;
+sf_err:
+ mutex_unlock(&table->sf_state_lock);
+ mlx5_sf_table_put(table);
+ return 0;
+}
+
+static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
+{
+ if (!mlx5_sf_max_functions(table->dev))
+ return;
+
+ init_completion(&table->disable_complete);
+ refcount_set(&table->refcount, 1);
+}
+
+static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
+{
+ struct mlx5_eswitch *esw = table->dev->priv.eswitch;
+ unsigned long index;
+ struct mlx5_sf *sf;
+
+ /* At this point, no new user commands can start and no vhca event can
+ * arrive. It is safe to destroy all user created SFs.
+ */
+ xa_for_each(&table->port_indices, index, sf) {
+ mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
+ mlx5_sf_id_erase(table, sf);
+ mlx5_sf_dealloc(table, sf);
+ }
+}
+
+static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
+{
+ if (!mlx5_sf_max_functions(table->dev))
+ return;
+
+ if (!refcount_read(&table->refcount))
+ return;
+
+ /* Balances with refcount_set; drop the reference so that new user cmd cannot start
+ * and new vhca event handler cannnot run.
+ */
+ mlx5_sf_table_put(table);
+ wait_for_completion(&table->disable_complete);
+
+ mlx5_sf_deactivate_all(table);
+}
+
+static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
+ const struct mlx5_esw_event_info *mode = data;
+
+ switch (mode->new_mode) {
+ case MLX5_ESWITCH_OFFLOADS:
+ mlx5_sf_table_enable(table);
+ break;
+ case MLX5_ESWITCH_NONE:
+ mlx5_sf_table_disable(table);
+ break;
+ default:
+ break;
+ };
+
+ return 0;
+}
+
+static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev);
+}
+
+int mlx5_sf_table_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table;
+ int err;
+
+ if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
+ return 0;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ mutex_init(&table->sf_state_lock);
+ table->dev = dev;
+ xa_init(&table->port_indices);
+ dev->priv.sf_table = table;
+ refcount_set(&table->refcount, 0);
+ table->esw_nb.notifier_call = mlx5_sf_esw_event;
+ err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
+ if (err)
+ goto reg_err;
+
+ table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
+ err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
+ if (err)
+ goto vhca_err;
+
+ return 0;
+
+vhca_err:
+ mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
+reg_err:
+ mutex_destroy(&table->sf_state_lock);
+ kfree(table);
+ dev->priv.sf_table = NULL;
+ return err;
+}
+
+void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return;
+
+ mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
+ mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
+ WARN_ON(refcount_read(&table->refcount));
+ mutex_destroy(&table->sf_state_lock);
+ WARN_ON(!xa_empty(&table->port_indices));
+ kfree(table);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
new file mode 100644
index 000000000000..58b6be0b03d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+#include <linux/mlx5/driver.h>
+#include "vhca_event.h"
+#include "priv.h"
+#include "sf.h"
+#include "mlx5_ifc_vhca_event.h"
+#include "vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_sf_hw {
+ u32 usr_sfnum;
+ u8 allocated: 1;
+ u8 pending_delete: 1;
+};
+
+struct mlx5_sf_hw_table {
+ struct mlx5_core_dev *dev;
+ struct mlx5_sf_hw *sfs;
+ int max_local_functions;
+ u8 ecpu: 1;
+ struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
+ struct notifier_block vhca_nb;
+};
+
+u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id)
+{
+ return sw_id + mlx5_sf_start_function_id(dev);
+}
+
+static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id)
+{
+ return hw_id - mlx5_sf_start_function_id(dev);
+}
+
+int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ int sw_id = -ENOSPC;
+ u16 hw_fn_id;
+ int err;
+ int i;
+
+ if (!table->max_local_functions)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&table->table_lock);
+ /* Check if sf with same sfnum already exists or not. */
+ for (i = 0; i < table->max_local_functions; i++) {
+ if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) {
+ err = -EEXIST;
+ goto exist_err;
+ }
+ }
+
+ /* Find the free entry and allocate the entry from the array */
+ for (i = 0; i < table->max_local_functions; i++) {
+ if (!table->sfs[i].allocated) {
+ table->sfs[i].usr_sfnum = usr_sfnum;
+ table->sfs[i].allocated = true;
+ sw_id = i;
+ break;
+ }
+ }
+ if (sw_id == -ENOSPC) {
+ err = -ENOSPC;
+ goto err;
+ }
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
+ err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id);
+ if (err)
+ goto err;
+
+ err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum);
+ if (err)
+ goto vhca_err;
+
+ mutex_unlock(&table->table_lock);
+ return sw_id;
+
+vhca_err:
+ mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+err:
+ table->sfs[i].allocated = false;
+exist_err:
+ mutex_unlock(&table->table_lock);
+ return err;
+}
+
+static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ u16 hw_fn_id;
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id);
+ mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+ table->sfs[id].allocated = false;
+ table->sfs[id].pending_delete = false;
+}
+
+void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ mutex_lock(&table->table_lock);
+ _mlx5_sf_hw_id_free(dev, id);
+ mutex_unlock(&table->table_lock);
+}
+
+void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ u16 hw_fn_id;
+ u8 state;
+ int err;
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
+ mutex_lock(&table->table_lock);
+ err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out));
+ if (err)
+ goto err;
+ state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
+ if (state == MLX5_VHCA_STATE_ALLOCATED) {
+ mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+ table->sfs[id].allocated = false;
+ } else {
+ table->sfs[id].pending_delete = true;
+ }
+err:
+ mutex_unlock(&table->table_lock);
+}
+
+static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table)
+{
+ int i;
+
+ for (i = 0; i < table->max_local_functions; i++) {
+ if (table->sfs[i].allocated)
+ _mlx5_sf_hw_id_free(table->dev, i);
+ }
+}
+
+int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table;
+ struct mlx5_sf_hw *sfs;
+ int max_functions;
+
+ if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev))
+ return 0;
+
+ max_functions = mlx5_sf_max_functions(dev);
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ sfs = kcalloc(max_functions, sizeof(*sfs), GFP_KERNEL);
+ if (!sfs)
+ goto table_err;
+
+ mutex_init(&table->table_lock);
+ table->dev = dev;
+ table->sfs = sfs;
+ table->max_local_functions = max_functions;
+ table->ecpu = mlx5_read_embedded_cpu(dev);
+ dev->priv.sf_hw_table = table;
+ mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
+ return 0;
+
+table_err:
+ kfree(table);
+ return -ENOMEM;
+}
+
+void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ if (!table)
+ return;
+
+ mutex_destroy(&table->table_lock);
+ kfree(table->sfs);
+ kfree(table);
+}
+
+static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
+{
+ struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
+ const struct mlx5_vhca_state_event *event = data;
+ struct mlx5_sf_hw *sf_hw;
+ u16 sw_id;
+
+ if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
+ return 0;
+
+ sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id);
+ sf_hw = &table->sfs[sw_id];
+
+ mutex_lock(&table->table_lock);
+ /* SF driver notified through firmware that SF is finally detached.
+ * Hence recycle the sf hardware id for reuse.
+ */
+ if (sf_hw->allocated && sf_hw->pending_delete)
+ _mlx5_sf_hw_id_free(table->dev, sw_id);
+ mutex_unlock(&table->table_lock);
+ return 0;
+}
+
+int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ if (!table)
+ return 0;
+
+ table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
+ return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
+}
+
+void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ if (!table)
+ return;
+
+ mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
+ /* Dealloc SFs whose firmware event has been missed. */
+ mlx5_sf_hw_dealloc_all(table);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h
new file mode 100644
index 000000000000..1daf5a122ba3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_IFC_VHCA_EVENT_H__
+#define __MLX5_IFC_VHCA_EVENT_H__
+
+enum mlx5_ifc_vhca_state {
+ MLX5_VHCA_STATE_INVALID = 0x0,
+ MLX5_VHCA_STATE_ALLOCATED = 0x1,
+ MLX5_VHCA_STATE_ACTIVE = 0x2,
+ MLX5_VHCA_STATE_IN_USE = 0x3,
+ MLX5_VHCA_STATE_TEARDOWN_REQUEST = 0x4,
+};
+
+struct mlx5_ifc_vhca_state_context_bits {
+ u8 arm_change_event[0x1];
+ u8 reserved_at_1[0xb];
+ u8 vhca_state[0x4];
+ u8 reserved_at_10[0x10];
+
+ u8 sw_function_id[0x20];
+
+ u8 reserved_at_40[0x80];
+};
+
+struct mlx5_ifc_query_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_vhca_state_context_bits vhca_state_context;
+};
+
+struct mlx5_ifc_query_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_vhca_state_field_select_bits {
+ u8 reserved_at_0[0x1e];
+ u8 sw_function_id[0x1];
+ u8 arm_change_event[0x1];
+};
+
+struct mlx5_ifc_modify_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ struct mlx5_ifc_vhca_state_field_select_bits vhca_state_field_select;
+
+ struct mlx5_ifc_vhca_state_context_bits vhca_state_context;
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
new file mode 100644
index 000000000000..cb02a51d0986
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_SF_PRIV_H__
+#define __MLX5_SF_PRIV_H__
+
+#include <linux/mlx5/driver.h>
+
+int mlx5_cmd_alloc_sf(struct mlx5_core_dev *dev, u16 function_id);
+int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id);
+
+int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
+int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
+
+u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id);
+
+int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum);
+void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id);
+void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
new file mode 100644
index 000000000000..0b6aea1e6a94
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_SF_H__
+#define __MLX5_SF_H__
+
+#include <linux/mlx5/driver.h>
+
+static inline u16 mlx5_sf_start_function_id(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf_base_id);
+}
+
+#ifdef CONFIG_MLX5_SF
+
+static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf);
+}
+
+static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
+{
+ if (!mlx5_sf_supported(dev))
+ return 0;
+ if (MLX5_CAP_GEN(dev, max_num_sf))
+ return MLX5_CAP_GEN(dev, max_num_sf);
+ else
+ return 1 << MLX5_CAP_GEN(dev, log_max_sf);
+}
+
+#else
+
+static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_MLX5_SF_MANAGER
+
+int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev);
+void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev);
+
+int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev);
+void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_sf_table_init(struct mlx5_core_dev *dev);
+void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
+
+int mlx5_devlink_sf_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *add_attr,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index);
+int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack);
+#else
+
+static inline int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
+static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
+{
+}
+
+static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
new file mode 100644
index 000000000000..af2f2dd9db25
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_ifc_vhca_event.h"
+#include "mlx5_core.h"
+#include "vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_vhca_state_notifier {
+ struct mlx5_core_dev *dev;
+ struct mlx5_nb nb;
+ struct blocking_notifier_head n_head;
+};
+
+struct mlx5_vhca_event_work {
+ struct work_struct work;
+ struct mlx5_vhca_state_notifier *notifier;
+ struct mlx5_vhca_state_event event;
+};
+
+int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
+ bool ecpu, u32 *out, u32 outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
+
+ MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE);
+ MLX5_SET(query_vhca_state_in, in, function_id, function_id);
+ MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
+static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
+ bool ecpu, u32 *in, u32 inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
+
+ MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
+ MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
+ MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
+
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
+
+ MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
+ MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
+ MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1);
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id);
+
+ return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out);
+}
+
+int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
+
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1);
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1);
+
+ return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in));
+}
+
+static void
+mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *event)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ int err;
+
+ err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out));
+ if (err)
+ return;
+
+ event->sw_function_id = MLX5_GET(query_vhca_state_out, out,
+ vhca_state_context.sw_function_id);
+ event->new_vhca_state = MLX5_GET(query_vhca_state_out, out,
+ vhca_state_context.vhca_state);
+
+ mlx5_vhca_event_arm(dev, event->function_id, event->ecpu);
+
+ blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
+}
+
+static void mlx5_vhca_state_work_handler(struct work_struct *_work)
+{
+ struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
+ struct mlx5_vhca_state_notifier *notifier = work->notifier;
+ struct mlx5_core_dev *dev = notifier->dev;
+
+ mlx5_vhca_event_notify(dev, &work->event);
+}
+
+static int
+mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
+{
+ struct mlx5_vhca_state_notifier *notifier =
+ mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
+ struct mlx5_vhca_event_work *work;
+ struct mlx5_eqe *eqe = data;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return NOTIFY_DONE;
+ INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
+ work->notifier = notifier;
+ work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
+ work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function);
+ mlx5_events_work_enqueue(notifier->dev, &work->work);
+ return NOTIFY_OK;
+}
+
+void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
+{
+ if (!mlx5_vhca_event_supported(dev))
+ return;
+
+ MLX5_SET(cmd_hca_cap, set_hca_cap, vhca_state, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_allocated, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_active, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_in_use, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
+}
+
+int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_state_notifier *notifier;
+
+ if (!mlx5_vhca_event_supported(dev))
+ return 0;
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier)
+ return -ENOMEM;
+
+ dev->priv.vhca_state_notifier = notifier;
+ notifier->dev = dev;
+ BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
+ MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
+ return 0;
+}
+
+void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_vhca_event_supported(dev))
+ return;
+
+ kfree(dev->priv.vhca_state_notifier);
+ dev->priv.vhca_state_notifier = NULL;
+}
+
+void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_state_notifier *notifier;
+
+ if (!dev->priv.vhca_state_notifier)
+ return;
+
+ notifier = dev->priv.vhca_state_notifier;
+ mlx5_eq_notifier_register(dev, &notifier->nb);
+}
+
+void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_state_notifier *notifier;
+
+ if (!dev->priv.vhca_state_notifier)
+ return;
+
+ notifier = dev->priv.vhca_state_notifier;
+ mlx5_eq_notifier_unregister(dev, &notifier->nb);
+}
+
+int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ if (!dev->priv.vhca_state_notifier)
+ return -EOPNOTSUPP;
+ return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb);
+}
+
+void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
new file mode 100644
index 000000000000..1fe1ec6f4d4b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_VHCA_EVENT_H__
+#define __MLX5_VHCA_EVENT_H__
+
+#ifdef CONFIG_MLX5_SF
+
+struct mlx5_vhca_state_event {
+ u16 function_id;
+ u16 sw_function_id;
+ u8 new_vhca_state;
+ bool ecpu;
+};
+
+static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN_MAX(dev, vhca_state);
+}
+
+void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap);
+int mlx5_vhca_event_init(struct mlx5_core_dev *dev);
+void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev);
+void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
+void mlx5_vhca_event_stop(struct mlx5_core_dev *dev);
+int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id);
+int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu);
+int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
+ bool ecpu, u32 *out, u32 outlen);
+#else
+
+static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
+{
+}
+
+static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
+static inline void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
+{
+}
+
+static inline void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index df1363a34a42..27c2b8416d02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -218,158 +218,6 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
},
};
-struct dr_action_modify_field_conv {
- u16 hw_field;
- u8 start;
- u8 end;
- u8 l3_type;
- u8 l4_type;
-};
-
-static const struct dr_action_modify_field_conv dr_action_conv_arr[] = {
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15,
- },
-};
-
-#define MAX_VLANS 2
-struct dr_action_vlan_info {
- int count;
- u32 headers[MAX_VLANS];
-};
-
-struct dr_action_apply_attr {
- u32 modify_index;
- u16 modify_actions;
- u32 decap_index;
- u16 decap_actions;
- u8 decap_with_vlan:1;
- u64 final_icm_addr;
- u32 flow_tag;
- u32 ctr_id;
- u16 gvmi;
- u16 hit_gvmi;
- u32 reformat_id;
- u32 reformat_size;
- struct dr_action_vlan_info vlans;
-};
-
static int
dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type,
enum mlx5dr_action_type *action_type)
@@ -394,141 +242,6 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
return 0;
}
-static void dr_actions_init_next_ste(u8 **last_ste,
- u32 *added_stes,
- enum mlx5dr_ste_entry_type entry_type,
- u16 gvmi)
-{
- (*added_stes)++;
- *last_ste += DR_STE_SIZE;
- mlx5dr_ste_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, entry_type, gvmi);
-}
-
-static void dr_actions_apply_tx(struct mlx5dr_domain *dmn,
- u8 *action_type_set,
- u8 *last_ste,
- struct dr_action_apply_attr *attr,
- u32 *added_stes)
-{
- bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
- action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
-
- /* We want to make sure the modify header comes before L2
- * encapsulation. The reason for that is that we support
- * modify headers for outer headers only
- */
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->modify_actions,
- attr->modify_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
- int i;
-
- for (i = 0; i < attr->vlans.count; i++) {
- if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_TX,
- attr->gvmi);
-
- mlx5dr_ste_set_tx_push_vlan(last_ste,
- attr->vlans.headers[i],
- encap);
- }
- }
-
- if (encap) {
- /* Modify header and encapsulation require a different STEs.
- * Since modify header STE format doesn't support encapsulation
- * tunneling_action.
- */
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
- action_type_set[DR_ACTION_TYP_PUSH_VLAN])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_TX,
- attr->gvmi);
-
- mlx5dr_ste_set_tx_encap(last_ste,
- attr->reformat_id,
- attr->reformat_size,
- action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
- /* Whenever prio_tag_required enabled, we can be sure that the
- * previous table (ACL) already push vlan to our packet,
- * And due to HW limitation we need to set this bit, otherwise
- * push vlan + reformat will not work.
- */
- if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
- mlx5dr_ste_set_go_back_bit(last_ste);
- }
-
- if (action_type_set[DR_ACTION_TYP_CTR])
- mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
-}
-
-static void dr_actions_apply_rx(u8 *action_type_set,
- u8 *last_ste,
- struct dr_action_apply_attr *attr,
- u32 *added_stes)
-{
- if (action_type_set[DR_ACTION_TYP_CTR])
- mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
-
- if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
- mlx5dr_ste_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->decap_actions,
- attr->decap_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
- mlx5dr_ste_set_rx_decap(last_ste);
-
- if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
- int i;
-
- for (i = 0; i < attr->vlans.count; i++) {
- if (i ||
- action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
- action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_RX,
- attr->gvmi);
-
- mlx5dr_ste_set_rx_pop_vlan(last_ste);
- }
- }
-
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_MODIFY_PKT,
- attr->gvmi);
- else
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
-
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->modify_actions,
- attr->modify_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_TAG]) {
- if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_RX,
- attr->gvmi);
-
- mlx5dr_ste_rx_set_flow_tag(last_ste, attr->flow_tag);
- }
-}
-
/* Apply the actions on the rule STE array starting from the last_ste.
* Actions might require more than one STE, new_num_stes will return
* the new size of the STEs array, rule with actions.
@@ -537,21 +250,20 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn,
enum mlx5dr_ste_entry_type ste_type,
u8 *action_type_set,
u8 *last_ste,
- struct dr_action_apply_attr *attr,
+ struct mlx5dr_ste_actions_attr *attr,
u32 *new_num_stes)
{
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
u32 added_stes = 0;
if (ste_type == MLX5DR_STE_TYPE_RX)
- dr_actions_apply_rx(action_type_set, last_ste, attr, &added_stes);
+ mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set,
+ last_ste, attr, &added_stes);
else
- dr_actions_apply_tx(dmn, action_type_set, last_ste, attr, &added_stes);
+ mlx5dr_ste_set_actions_tx(ste_ctx, dmn, action_type_set,
+ last_ste, attr, &added_stes);
- last_ste += added_stes * DR_STE_SIZE;
*new_num_stes += added_stes;
-
- mlx5dr_ste_set_hit_gvmi(last_ste, attr->hit_gvmi);
- mlx5dr_ste_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
static enum dr_action_domain
@@ -643,9 +355,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 action_type_set[DR_ACTION_TYP_MAX] = {};
+ struct mlx5dr_ste_actions_attr attr = {};
struct mlx5dr_action *dest_action = NULL;
u32 state = DR_ACTION_STATE_NO_ACTION;
- struct dr_action_apply_attr attr = {};
enum dr_action_domain action_domain;
bool recalc_cs_required = false;
u8 *last_ste;
@@ -756,12 +468,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
}
break;
case DR_ACTION_TYP_POP_VLAN:
- max_actions_type = MAX_VLANS;
+ max_actions_type = MLX5DR_MAX_VLANS;
attr.vlans.count++;
break;
case DR_ACTION_TYP_PUSH_VLAN:
- max_actions_type = MAX_VLANS;
- if (attr.vlans.count == MAX_VLANS)
+ max_actions_type = MLX5DR_MAX_VLANS;
+ if (attr.vlans.count == MLX5DR_MAX_VLANS)
return -EINVAL;
attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
@@ -817,132 +529,6 @@ out_invalid_arg:
return -EINVAL;
}
-#define CVLAN_ETHERTYPE 0x8100
-#define SVLAN_ETHERTYPE 0x88a8
-#define HDR_LEN_L2_ONLY 14
-#define HDR_LEN_L2_VLAN 18
-#define REWRITE_HW_ACTION_NUM 6
-
-static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
- struct mlx5dr_action *action,
- void *data, size_t data_sz)
-{
- struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
- u64 ops[REWRITE_HW_ACTION_NUM] = {};
- u32 hdr_fld_4b;
- u16 hdr_fld_2b;
- u16 vlan_type;
- bool vlan;
- int i = 0;
- int ret;
-
- vlan = (data_sz != HDR_LEN_L2_ONLY);
-
- /* dmac_47_16 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 16);
- hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_4b);
- i++;
-
- /* smac_47_16 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 16);
- hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
- MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_4b);
- i++;
-
- /* dmac_15_0 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- i++;
-
- /* ethertype + (optional) vlan */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 32);
- if (!vlan) {
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
- MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_2b);
- MLX5_SET(dr_action_hw_set, ops + i, destination_length, 16);
- } else {
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
- vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
- hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
- MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_4b);
- MLX5_SET(dr_action_hw_set, ops + i, destination_length, 18);
- }
- i++;
-
- /* smac_15_0 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- i++;
-
- if (vlan) {
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- i++;
- }
-
- action->rewrite.data = (void *)ops;
- action->rewrite.num_of_actions = i;
-
- ret = mlx5dr_send_postsend_action(dmn, action);
- if (ret) {
- mlx5dr_dbg(dmn, "Writing encapsulation action to ICM failed\n");
- return ret;
- }
-
- return 0;
-}
-
static struct mlx5dr_action *
dr_action_create_generic(enum mlx5dr_action_type action_type)
{
@@ -1217,21 +803,34 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
case DR_ACTION_TYP_TNL_L3_TO_L2:
{
- /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
- if (data_sz != HDR_LEN_L2_ONLY && data_sz != HDR_LEN_L2_VLAN)
- return -EINVAL;
+ u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {};
+ int ret;
+
+ ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
+ data, data_sz,
+ hw_actions,
+ ACTION_CACHE_LINE_SIZE,
+ &action->rewrite.num_of_actions);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
+ return ret;
+ }
action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
DR_CHUNK_SIZE_8);
- if (!action->rewrite.chunk)
+ if (!action->rewrite.chunk) {
+ mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
return -ENOMEM;
+ }
+ action->rewrite.data = (void *)hw_actions;
action->rewrite.index = (action->rewrite.chunk->icm_addr -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
- ret = dr_actions_l2_rewrite(dmn, action, data, data_sz);
+ ret = mlx5dr_send_postsend_action(dmn, action);
if (ret) {
+ mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
mlx5dr_icm_free_chunk(action->rewrite.chunk);
return ret;
}
@@ -1243,6 +842,9 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
}
+#define CVLAN_ETHERTYPE 0x8100
+#define SVLAN_ETHERTYPE 0x88a8
+
struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void)
{
return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN);
@@ -1315,31 +917,13 @@ dec_ref:
return NULL;
}
-static const struct dr_action_modify_field_conv *
-dr_action_modify_get_hw_info(u16 sw_field)
-{
- const struct dr_action_modify_field_conv *hw_action_info;
-
- if (sw_field >= ARRAY_SIZE(dr_action_conv_arr))
- goto not_found;
-
- hw_action_info = &dr_action_conv_arr[sw_field];
- if (!hw_action_info->end && !hw_action_info->start)
- goto not_found;
-
- return hw_action_info;
-
-not_found:
- return NULL;
-}
-
static int
dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_hw_info)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_action_info;
u8 max_length;
u16 sw_field;
u32 data;
@@ -1349,7 +933,7 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
- hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
return -EINVAL;
@@ -1357,20 +941,12 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
max_length = hw_action_info->end - hw_action_info->start + 1;
- MLX5_SET(dr_action_hw_set, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
- hw_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
- hw_action_info->start);
-
- /* PRM defines that length zero specific length of 32bits */
- MLX5_SET(dr_action_hw_set, hw_action, destination_length,
- max_length == 32 ? 0 : max_length);
-
- MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+ mlx5dr_ste_set_action_add(dmn->ste_ctx,
+ hw_action,
+ hw_action_info->hw_field,
+ hw_action_info->start,
+ max_length,
+ data);
*ret_hw_info = hw_action_info;
@@ -1381,9 +957,9 @@ static int
dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_hw_info)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_action_info;
u8 offset, length, max_length;
u16 sw_field;
u32 data;
@@ -1395,7 +971,7 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
- hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
return -EINVAL;
@@ -1411,19 +987,12 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
return -EINVAL;
}
- MLX5_SET(dr_action_hw_set, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
- hw_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
- hw_action_info->start + offset);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_length,
- length == 32 ? 0 : length);
-
- MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+ mlx5dr_ste_set_action_set(dmn->ste_ctx,
+ hw_action,
+ hw_action_info->hw_field,
+ hw_action_info->start + offset,
+ length,
+ data);
*ret_hw_info = hw_action_info;
@@ -1434,12 +1003,12 @@ static int
dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_dst_hw_info,
- const struct dr_action_modify_field_conv **ret_src_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
+ const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
{
u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
- const struct dr_action_modify_field_conv *hw_dst_action_info;
- const struct dr_action_modify_field_conv *hw_src_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
u16 src_field, dst_field;
/* Get SW modify action data */
@@ -1450,8 +1019,8 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
length = MLX5_GET(copy_action_in, sw_action, length);
/* Convert SW data to HW modify action format */
- hw_src_action_info = dr_action_modify_get_hw_info(src_field);
- hw_dst_action_info = dr_action_modify_get_hw_info(dst_field);
+ hw_src_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, src_field);
+ hw_dst_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, dst_field);
if (!hw_src_action_info || !hw_dst_action_info) {
mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
return -EINVAL;
@@ -1471,23 +1040,13 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
return -EINVAL;
}
- MLX5_SET(dr_action_hw_copy, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code,
- hw_dst_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter,
- hw_dst_action_info->start + dst_offset);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_length,
- length == 32 ? 0 : length);
-
- MLX5_SET(dr_action_hw_copy, hw_action, source_field_code,
- hw_src_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter,
- hw_src_action_info->start + dst_offset);
+ mlx5dr_ste_set_action_copy(dmn->ste_ctx,
+ hw_action,
+ hw_dst_action_info->hw_field,
+ hw_dst_action_info->start + dst_offset,
+ length,
+ hw_src_action_info->hw_field,
+ hw_src_action_info->start + src_offset);
*ret_dst_hw_info = hw_dst_action_info;
*ret_src_hw_info = hw_src_action_info;
@@ -1499,8 +1058,8 @@ static int
dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_dst_hw_info,
- const struct dr_action_modify_field_conv **ret_src_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
+ const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
{
u8 action;
int ret;
@@ -1677,15 +1236,15 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 *num_hw_actions,
bool *modify_ttl)
{
- const struct dr_action_modify_field_conv *hw_dst_action_info;
- const struct dr_action_modify_field_conv *hw_src_action_info;
- u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
- u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
- u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
+ const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
struct mlx5dr_domain *dmn = action->rewrite.dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
+ u16 hw_field = 0;
+ u32 l3_type = 0;
+ u32 l4_type = 0;
*modify_ttl = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index aa2c2d6c44e6..47ec88964bf3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -57,6 +57,12 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
{
int ret;
+ dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
+ if (!dmn->ste_ctx) {
+ mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) {
mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 6527eb4df153..e3a002983c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -221,6 +221,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_match_param mask = {};
struct mlx5dr_ste_build *sb;
bool inner, rx;
@@ -259,80 +260,89 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = false;
if (dr_mask_is_wqe_metadata_set(&mask.misc2))
- mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
- mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
- mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
- mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
- dmn, inner, rx);
+ mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
+ &mask, dmn, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) {
- mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer))
- mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
- mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
- mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
- mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
else if (dr_mask_is_tnl_geneve(&mask, dmn))
- mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
- mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
- mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_icmp(&mask, dmn)) {
- ret = mlx5dr_ste_build_icmp(&sb[idx++],
+ ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
if (ret)
return ret;
}
if (dr_mask_is_tnl_gre_set(&mask.misc))
- mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
/* Inner */
@@ -343,50 +353,56 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = true;
if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
- mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) {
- mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++],
+ mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.inner))
- mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
- mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
- mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
- mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
- mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
/* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 6d73719db1f4..ddcb7017e121 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -10,7 +10,8 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
-static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
+static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
struct list_head *send_list)
{
@@ -25,7 +26,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
if (!ste_info_last)
return -ENOMEM;
- mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
+ mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list);
@@ -42,6 +43,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
@@ -57,7 +59,8 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
/* One and only entry, never grows */
ste = new_htbl->ste_arr;
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
mlx5dr_htbl_get(new_htbl);
return ste;
@@ -169,6 +172,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste *col_ste,
u8 *hw_ste)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste *new_ste;
int ret;
@@ -180,11 +184,11 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */
- ret = dr_rule_append_to_miss_list(new_ste,
+ ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
mlx5dr_ste_get_miss_list(col_ste),
update_list);
if (ret) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
+ mlx5dr_dbg(dmn, "Failed update dup entry\n");
goto err_exit;
}
@@ -224,6 +228,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *new_htbl,
struct list_head *update_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
@@ -237,7 +242,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
/* Copy STE control and tag */
memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->ste_arr[new_idx];
@@ -253,7 +259,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_ste,
hw_ste);
if (!new_ste) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
+ mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
new_idx);
return NULL;
}
@@ -391,7 +397,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* Write new table to HW */
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
- mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
+ dmn->info.caps.gvmi,
nic_dmn,
new_htbl,
formatted_ste,
@@ -436,13 +443,15 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
* (48B len) which works only on first 32B
*/
- mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
+ mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
+ prev_htbl->ste_arr[0].hw_ste,
new_htbl->chunk->icm_addr,
new_htbl->chunk->num_of_entries);
ste_to_update = &prev_htbl->ste_arr[0];
} else {
- mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
+ mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
+ cur_htbl->pointing_ste->hw_ste,
new_htbl);
ste_to_update = cur_htbl->pointing_ste;
}
@@ -496,6 +505,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
struct list_head *miss_list,
struct list_head *send_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info;
struct mlx5dr_ste *new_ste;
@@ -507,8 +518,9 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
if (!new_ste)
goto free_send_info;
- if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
+ if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
+ miss_list, send_list)) {
+ mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
goto err_exit;
}
@@ -659,6 +671,7 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
u8 num_of_builders = nic_matcher->num_of_builders;
struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 *curr_hw_ste, *prev_hw_ste;
struct mlx5dr_ste *action_ste;
int i, k, ret;
@@ -692,10 +705,12 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
goto err_exit;
/* Point current ste to the new action */
- mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
+ mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
+ prev_hw_ste,
+ action_ste->htbl);
ret = dr_rule_add_member(nic_rule, action_ste);
if (ret) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
+ mlx5dr_dbg(dmn, "Failed adding rule member\n");
goto free_ste_info;
}
mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
@@ -722,6 +737,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
struct list_head *miss_list,
struct list_head *send_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
/* Take ref on table, only on first time this ste is used */
@@ -730,7 +746,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
ste->ste_chain_location = ste_location;
@@ -743,7 +760,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
ste,
hw_ste,
DR_CHUNK_SIZE_1)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
+ mlx5dr_dbg(dmn, "Failed allocating table\n");
goto clean_ste_info;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index d275823bff2f..1614481fdf8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -3,104 +3,7 @@
#include <linux/types.h>
#include <linux/crc32.h>
-#include "dr_types.h"
-
-#define DR_STE_CRC_POLY 0xEDB88320L
-#define STE_IPV4 0x1
-#define STE_IPV6 0x2
-#define STE_TCP 0x1
-#define STE_UDP 0x2
-#define STE_SPI 0x3
-#define IP_VERSION_IPV4 0x4
-#define IP_VERSION_IPV6 0x6
-#define STE_SVLAN 0x1
-#define STE_CVLAN 0x2
-
-#define DR_STE_ENABLE_FLOW_TAG BIT(31)
-
-/* Set to STE a specific value using DR_STE_SET */
-#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
- if ((spec)->s_fname) { \
- MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
- (spec)->s_fname = 0; \
- } \
-} while (0)
-
-/* Set to STE spec->s_fname to tag->t_fname */
-#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
-
-/* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
-#define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
-
-/* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
-#define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
-
-#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
- MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
-} while (0)
-
-#define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
- in_out##_first_mpls_label);\
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
- in_out##_first_mpls_s_bos); \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
- in_out##_first_mpls_exp); \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
- in_out##_first_mpls_ttl); \
-} while (0)
-
-#define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
- in_out##_first_mpls_label);\
- DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
- in_out##_first_mpls_s_bos); \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
- in_out##_first_mpls_exp); \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
- in_out##_first_mpls_ttl); \
-} while (0)
-
-#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
- (_misc)->outer_first_mpls_over_gre_label || \
- (_misc)->outer_first_mpls_over_gre_exp || \
- (_misc)->outer_first_mpls_over_gre_s_bos || \
- (_misc)->outer_first_mpls_over_gre_ttl)
-#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
- (_misc)->outer_first_mpls_over_udp_label || \
- (_misc)->outer_first_mpls_over_udp_exp || \
- (_misc)->outer_first_mpls_over_udp_s_bos || \
- (_misc)->outer_first_mpls_over_udp_ttl)
-
-#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
- ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
- (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
- MLX5DR_STE_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_tunl_action {
- DR_STE_TUNL_ACTION_NONE = 0,
- DR_STE_TUNL_ACTION_ENABLE = 1,
- DR_STE_TUNL_ACTION_DECAP = 2,
- DR_STE_TUNL_ACTION_L3_DECAP = 3,
- DR_STE_TUNL_ACTION_POP_VLAN = 4,
-};
-
-enum dr_ste_action_type {
- DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
- DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
- DR_STE_ACTION_TYPE_ENCAP = 4,
-};
+#include "dr_ste.h"
struct dr_hw_ste_format {
u8 ctrl[DR_STE_SIZE_CTRL];
@@ -142,7 +45,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
return index;
}
-static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
+u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
{
u16 byte_mask = 0;
int i;
@@ -155,7 +58,7 @@ static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
return byte_mask;
}
-static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p)
+static u8 *dr_ste_get_tag(u8 *hw_ste_p)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -169,104 +72,6 @@ void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
}
-void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
- DR_STE_ENABLE_FLOW_TAG | flow_tag);
-}
-
-void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
-{
- /* This can be used for both rx_steering_mult and for sx_transmit */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
-}
-
-void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
-}
-
-void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
- bool go_back)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
- DR_STE_ACTION_TYPE_PUSH_VLAN);
- MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
- /* Due to HW limitation we need to set this bit, otherwise reforamt +
- * push vlan will not work.
- */
- if (go_back)
- mlx5dr_ste_set_go_back_bit(hw_ste_p);
-}
-
-void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
- encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
- /* The hardware expects here size in words (2 byte) */
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
- MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
-}
-
-void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_DECAP);
-}
-
-void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_POP_VLAN);
-}
-
-void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_L3_DECAP);
- MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
-}
-
-void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
-{
- MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
-}
-
-u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
-{
- return MLX5_GET(ste_general, hw_ste_p, entry_type);
-}
-
-void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
- u32 re_write_index)
-{
- MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
- num_of_actions);
- MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
- re_write_index);
-}
-
-void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
-{
- MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
-}
-
-void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
- u16 gvmi)
-{
- MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
- MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
- MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
-
- /* Set GVMI once, this is the same for RX/TX
- * bits 63_48 of next table base / miss address encode the next GVMI
- */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
-}
-
static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
{
memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
@@ -279,21 +84,26 @@ static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
hw_ste->mask[0] = 0;
}
-u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
+void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste_p, u64 miss_addr)
{
- u64 index =
- (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
- MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
-
- return index << 6;
+ ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
}
-void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
+static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste, u64 miss_addr)
{
- u64 index = (icm_addr >> 5) | ht_size;
+ u8 *hw_ste_p = ste->hw_ste;
- MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
- MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
+ ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+ ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
+ dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+}
+
+void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 icm_addr, u32 ht_size)
+{
+ ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
}
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
@@ -317,15 +127,16 @@ struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
return &ste->htbl->miss_list[index];
}
-static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
+static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
u8 *hw_ste = ste->hw_ste;
- MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
- MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
- mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
+ ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
+ ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
}
@@ -363,7 +174,8 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
/* Free ste which is the head and the only one in miss_list */
static void
-dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
+dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste_send_info *ste_info_head,
struct list_head *send_ste_list,
@@ -380,7 +192,7 @@ dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
*/
memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
- mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
+ dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
list_del_init(&ste->miss_list_node);
@@ -436,7 +248,8 @@ dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
/* Free ste that is located in the middle of the miss list:
* |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
*/
-static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
+static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_ste_send_info *ste_info,
struct list_head *send_ste_list,
struct mlx5dr_ste_htbl *stats_tbl)
@@ -448,8 +261,8 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
if (WARN_ON(!prev_ste))
return;
- miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
- mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
+ miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
+ ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
prev_ste->hw_ste, ste_info,
@@ -467,6 +280,7 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
{
struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info ste_info_head;
struct mlx5dr_ste *next_ste, *first_ste;
bool put_on_origin_table = true;
@@ -495,7 +309,8 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
if (!next_ste) {
/* One and only entry in the list */
- dr_ste_remove_head_ste(ste, nic_matcher,
+ dr_ste_remove_head_ste(ste_ctx, ste,
+ nic_matcher,
&ste_info_head,
&send_ste_list,
stats_tbl);
@@ -506,7 +321,9 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
put_on_origin_table = false;
}
} else { /* Ste in the middle of the list */
- dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
+ dr_ste_remove_middle_ste(ste_ctx, ste,
+ &ste_info_head, &send_ste_list,
+ stats_tbl);
}
/* Update HW */
@@ -530,34 +347,18 @@ bool mlx5dr_ste_equal_tag(void *src, void *dst)
return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
}
-void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
+void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
- mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
-}
-
-void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
-{
- u64 index = miss_addr >> 6;
-
- /* Miss address for TX and RX STEs located in the same offsets */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
-}
-
-void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
-{
- u8 *hw_ste = ste->hw_ste;
-
- MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
- mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
- dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+ ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
}
/* Init one ste as a pattern for ste data array */
-void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
@@ -565,13 +366,13 @@ void mlx5dr_ste_set_formatted_ste(u16 gvmi,
{
struct mlx5dr_ste ste = {};
- mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
+ ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
ste.hw_ste = formatted_ste;
if (connect_info->type == CONNECT_HIT)
- dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
+ dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
else
- mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
+ dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
}
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
@@ -582,7 +383,8 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
{
u8 formatted_ste[DR_STE_SIZE] = {};
- mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
+ dmn->info.caps.gvmi,
nic_dmn,
htbl,
formatted_ste,
@@ -597,18 +399,18 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
u8 *cur_hw_ste,
enum mlx5dr_icm_chunk_size log_table_size)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
- u8 next_lu_type;
+ u16 next_lu_type;
u16 byte_mask;
- next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
- byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
+ next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
+ byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size,
@@ -628,7 +430,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
goto free_table;
}
- mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
+ mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
+ cur_hw_ste, next_htbl);
ste->next_htbl = next_htbl;
next_htbl->pointing_ste = ste;
}
@@ -657,7 +460,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
- u8 lu_type, u16 byte_mask)
+ u16 lu_type, u16 byte_mask)
{
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste_htbl *htbl;
@@ -709,6 +512,92 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
return 0;
}
+void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
+ attr, added_stes);
+}
+
+void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
+ attr, added_stes);
+}
+
+const struct mlx5dr_ste_action_modify_field *
+mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
+{
+ const struct mlx5dr_ste_action_modify_field *hw_field;
+
+ if (sw_field >= ste_ctx->modify_field_arr_sz)
+ return NULL;
+
+ hw_field = &ste_ctx->modify_field_arr[sw_field];
+ if (!hw_field->end && !hw_field->start)
+ return NULL;
+
+ return hw_field;
+}
+
+void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ ste_ctx->set_action_set((u8 *)hw_action,
+ hw_field, shifter, length, data);
+}
+
+void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ ste_ctx->set_action_add((u8 *)hw_action,
+ hw_field, shifter, length, data);
+}
+
+void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
+{
+ ste_ctx->set_action_copy((u8 *)hw_action,
+ dst_hw_field, dst_shifter, dst_len,
+ src_hw_field, src_shifter);
+}
+
+int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
+ void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ u16 *used_hw_action_num)
+{
+ /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
+ if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
+ return -EINVAL;
+
+ return ste_ctx->set_action_decap_l3_list(data, data_sz,
+ hw_action, hw_action_sz,
+ used_hw_action_num);
+}
+
int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
u8 match_criteria,
struct mlx5dr_match_param *mask,
@@ -738,6 +627,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_build *sb;
int ret, i;
@@ -748,14 +638,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
sb = nic_matcher->ste_builder;
for (i = 0; i < nic_matcher->num_of_builders; i++) {
- mlx5dr_ste_init(ste_arr,
- sb->lu_type,
- nic_dmn->ste_type,
- dmn->info.caps.gvmi);
+ ste_ctx->ste_init(ste_arr,
+ sb->lu_type,
+ nic_dmn->ste_type,
+ dmn->info.caps.gvmi);
mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
- ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr));
+ ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
if (ret)
return ret;
@@ -765,45 +655,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
* not relevant for the last ste in the chain.
*/
sb++;
- MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
- MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
+ ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
+ ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
}
ste_arr += DR_STE_SIZE;
}
return 0;
}
-static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
-
- if (mask->smac_47_16 || mask->smac_15_0) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
- mask->smac_47_16 >> 16);
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
- mask->smac_47_16 << 16 | mask->smac_15_0);
- mask->smac_47_16 = 0;
- mask->smac_15_0 = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
-
- if (mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- } else if (mask->svlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
- mask->svlan_tag = 0;
- }
-}
-
static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
{
spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
@@ -1045,566 +904,93 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
}
}
-static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
-
- if (spec->smac_47_16 || spec->smac_15_0) {
- MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
- spec->smac_47_16 >> 16);
- MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
- spec->smac_47_16 << 16 | spec->smac_15_0);
- spec->smac_47_16 = 0;
- spec->smac_15_0 = 0;
- }
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- pr_info("Unsupported ip_version value\n");
- return -EINVAL;
- }
- }
-
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
-}
-
-static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
-
- return 0;
+ ste_ctx->build_eth_l2_src_dst_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
-}
-
-static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
-
- return 0;
+ ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
- bool inner,
- u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_address, mask, dst_ip_31_0);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_address, mask, src_ip_31_0);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_port, mask, tcp_dport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_port, mask, udp_dport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_port, mask, tcp_sport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_port, mask, udp_sport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- protocol, mask, ip_protocol);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- dscp, mask, ip_dscp);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- ecn, mask, ip_ecn);
-
- if (mask->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
- mask->tcp_flags = 0;
- }
-}
-
-static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
-
- if (spec->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
- spec->tcp_flags = 0;
- }
-
- return 0;
+ ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
-}
-
-static void
-dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
- DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
-
- if (mask->svlan_tag || mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- mask->svlan_tag = 0;
- }
-
- if (inner) {
- if (misc_mask->inner_second_cvlan_tag ||
- misc_mask->inner_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
- misc_mask->inner_second_cvlan_tag = 0;
- misc_mask->inner_second_svlan_tag = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_vlan_id, misc_mask, inner_second_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_cfi, misc_mask, inner_second_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_priority, misc_mask, inner_second_prio);
- } else {
- if (misc_mask->outer_second_cvlan_tag ||
- misc_mask->outer_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
- misc_mask->outer_second_cvlan_tag = 0;
- misc_mask->outer_second_svlan_tag = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_vlan_id, misc_mask, outer_second_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_cfi, misc_mask, outer_second_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_priority, misc_mask, outer_second_prio);
- }
-}
-
-static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
- bool inner, u8 *tag)
-{
- struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc_spec = &value->misc;
-
- DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
- DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- pr_info("Unsupported ip_version value\n");
- return -EINVAL;
- }
- }
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
-
- if (inner) {
- if (misc_spec->inner_second_cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
- misc_spec->inner_second_cvlan_tag = 0;
- } else if (misc_spec->inner_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
- misc_spec->inner_second_svlan_tag = 0;
- }
-
- DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
- } else {
- if (misc_spec->outer_second_cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
- misc_spec->outer_second_cvlan_tag = 0;
- } else if (misc_spec->outer_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
- misc_spec->outer_second_svlan_tag = 0;
- }
- DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
- }
-
- return 0;
-}
-
-static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
-
- dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
- DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
-
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+ ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
-}
-
-static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
-
- dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
-
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+ ste_ctx->build_eth_l2_src_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
-}
-
-static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
- DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
-
- if (misc->vxlan_vni) {
- MLX5_SET(ste_eth_l2_tnl, bit_mask,
- l2_tunneling_network_id, (misc->vxlan_vni << 8));
- misc->vxlan_vni = 0;
- }
-
- if (mask->svlan_tag || mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- mask->svlan_tag = 0;
- }
+ ste_ctx->build_eth_l2_dst_init(sb, mask);
}
-static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
- DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
-
- if (misc->vxlan_vni) {
- MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
- (misc->vxlan_vni << 8));
- misc->vxlan_vni = 0;
- }
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, bool inner, bool rx)
{
- dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
-}
-
-static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
-
- return 0;
+ ste_ctx->build_eth_l2_tnl_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
-}
-
-static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
-
- if (mask->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
- mask->tcp_flags = 0;
- }
+ ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
}
-static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
- DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
- DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
- DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
- DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
- DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
- DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
- DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
-
- if (spec->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
- spec->tcp_flags = 0;
- }
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
+ ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
}
static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
@@ -1622,653 +1008,110 @@ void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
}
-static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
-
- if (inner)
- DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
- else
- DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
-}
-
-static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
-
- if (sb->inner)
- DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
- else
- DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
-
- return 0;
-}
-
-void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
-}
-
-static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
-
- DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
+ ste_ctx->build_mpls_init(sb, mask);
}
-static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
-
- DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
- DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
- DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
-
- DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
-
- DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask, bool inner, bool rx)
-{
- dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_gre_tag;
-}
-
-static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
- misc_2_mask, outer_first_mpls_over_gre_label);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_gre_exp);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_gre_s_bos);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_gre_ttl);
- } else {
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
- misc_2_mask, outer_first_mpls_over_udp_label);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_udp_exp);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_udp_s_bos);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_udp_ttl);
- }
-}
-
-static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2_mask, outer_first_mpls_over_gre_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_gre_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_gre_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_gre_ttl);
- } else {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2_mask, outer_first_mpls_over_udp_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_udp_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_udp_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_udp_ttl);
- }
- return 0;
+ ste_ctx->build_tnl_gre_init(sb, mask);
}
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
-}
-
-#define ICMP_TYPE_OFFSET_FIRST_DW 24
-#define ICMP_CODE_OFFSET_FIRST_DW 16
-#define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0
-
-static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- u8 *bit_mask)
-{
- bool is_ipv4_mask = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
- struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
- u32 icmp_header_data_mask;
- u32 icmp_type_mask;
- u32 icmp_code_mask;
- int dw0_location;
- int dw1_location;
-
- if (is_ipv4_mask) {
- icmp_header_data_mask = misc_3_mask->icmpv4_header_data;
- icmp_type_mask = misc_3_mask->icmpv4_type;
- icmp_code_mask = misc_3_mask->icmpv4_code;
- dw0_location = caps->flex_parser_id_icmp_dw0;
- dw1_location = caps->flex_parser_id_icmp_dw1;
- } else {
- icmp_header_data_mask = misc_3_mask->icmpv6_header_data;
- icmp_type_mask = misc_3_mask->icmpv6_type;
- icmp_code_mask = misc_3_mask->icmpv6_code;
- dw0_location = caps->flex_parser_id_icmpv6_dw0;
- dw1_location = caps->flex_parser_id_icmpv6_dw1;
- }
-
- switch (dw0_location) {
- case 4:
- if (icmp_type_mask) {
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
- (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_type = 0;
- else
- misc_3_mask->icmpv6_type = 0;
- }
- if (icmp_code_mask) {
- u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
- flex_parser_4);
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
- cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_code = 0;
- else
- misc_3_mask->icmpv6_code = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- switch (dw1_location) {
- case 5:
- if (icmp_header_data_mask) {
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
- (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_header_data = 0;
- else
- misc_3_mask->icmpv6_header_data = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ ste_ctx->build_tnl_mpls_init(sb, mask);
}
-static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
- u32 icmp_header_data;
- int dw0_location;
- int dw1_location;
- u32 icmp_type;
- u32 icmp_code;
- bool is_ipv4;
-
- is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
- if (is_ipv4) {
- icmp_header_data = misc_3->icmpv4_header_data;
- icmp_type = misc_3->icmpv4_type;
- icmp_code = misc_3->icmpv4_code;
- dw0_location = sb->caps->flex_parser_id_icmp_dw0;
- dw1_location = sb->caps->flex_parser_id_icmp_dw1;
- } else {
- icmp_header_data = misc_3->icmpv6_header_data;
- icmp_type = misc_3->icmpv6_type;
- icmp_code = misc_3->icmpv6_code;
- dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
- dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
- }
-
- switch (dw0_location) {
- case 4:
- if (icmp_type) {
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
- (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
- if (is_ipv4)
- misc_3->icmpv4_type = 0;
- else
- misc_3->icmpv6_type = 0;
- }
-
- if (icmp_code) {
- u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
- flex_parser_4);
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
- cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
- if (is_ipv4)
- misc_3->icmpv4_code = 0;
- else
- misc_3->icmpv6_code = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- switch (dw1_location) {
- case 5:
- if (icmp_header_data) {
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
- (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
- if (is_ipv4)
- misc_3->icmpv4_header_data = 0;
- else
- misc_3->icmpv6_header_data = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
- int ret;
-
- ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
- if (ret)
- return ret;
-
sb->rx = rx;
sb->inner = inner;
sb->caps = caps;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
-
- return 0;
-}
-
-static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(general_purpose, bit_mask,
- general_purpose_lookup_field, misc_2_mask,
- metadata_reg_a);
-}
-
-static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
- misc_2_mask, metadata_reg_a);
-
- return 0;
+ return ste_ctx->build_icmp_init(sb, mask);
}
-void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
-}
-
-static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
-
- if (inner) {
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
- inner_tcp_seq_num);
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
- inner_tcp_ack_num);
- } else {
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
- outer_tcp_seq_num);
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
- outer_tcp_ack_num);
- }
+ ste_ctx->build_general_purpose_init(sb, mask);
}
-static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
- if (sb->inner) {
- DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
- DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
- } else {
- DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
- DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
- }
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
+ ste_ctx->build_eth_l4_misc_init(sb, mask);
}
-static void
-dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
-
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_flags,
- misc_3_mask, outer_vxlan_gpe_flags);
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_next_protocol,
- misc_3_mask, outer_vxlan_gpe_next_protocol);
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_vni,
- misc_3_mask, outer_vxlan_gpe_vni);
-}
-
-static int
-dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_flags, misc3,
- outer_vxlan_gpe_flags);
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_next_protocol, misc3,
- outer_vxlan_gpe_next_protocol);
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_vni, misc3,
- outer_vxlan_gpe_vni);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
- sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
+ ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
}
-static void
-dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_protocol_type,
- misc_mask, geneve_protocol_type);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_oam,
- misc_mask, geneve_oam);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_opt_len,
- misc_mask, geneve_opt_len);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_vni,
- misc_mask, geneve_vni);
-}
-
-static int
-dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_protocol_type, misc, geneve_protocol_type);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_oam, misc, geneve_oam);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_opt_len, misc, geneve_opt_len);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_vni, misc, geneve_vni);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
+ ste_ctx->build_tnl_geneve_init(sb, mask);
}
-static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
- misc_2_mask, metadata_reg_c_0);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
- misc_2_mask, metadata_reg_c_1);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
- misc_2_mask, metadata_reg_c_2);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
- misc_2_mask, metadata_reg_c_3);
-}
-
-static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
- DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
- DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
- DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
- DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
-
- return 0;
-}
-
-void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
-}
-
-static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
- misc_2_mask, metadata_reg_c_4);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
- misc_2_mask, metadata_reg_c_5);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
- misc_2_mask, metadata_reg_c_6);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
- misc_2_mask, metadata_reg_c_7);
+ ste_ctx->build_register_0_init(sb, mask);
}
-static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
- DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
- DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
- DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
- DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
-
- return 0;
-}
-
-void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
+ ste_ctx->build_register_1_init(sb, mask);
}
-static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
- DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
- misc_mask->source_eswitch_owner_vhca_id = 0;
-}
-
-static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc *misc = &value->misc;
- struct mlx5dr_cmd_vport_cap *vport_cap;
- struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
- u8 *bit_mask = sb->bit_mask;
- bool source_gvmi_set;
-
- DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
-
- if (sb->vhca_id_valid) {
- /* Find port GVMI based on the eswitch_owner_vhca_id */
- if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
- else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
- dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
- else
- return -EINVAL;
- } else {
- caps = &dmn->info.caps;
- }
-
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
- if (!vport_cap)
- return -EINVAL;
-
- source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
- if (vport_cap->vport_gvmi && source_gvmi_set)
- MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
-
- misc->source_eswitch_owner_vhca_id = 0;
- misc->source_port = 0;
-
- return 0;
-}
-
-void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx)
@@ -2276,12 +1119,21 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
- dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->dmn = dmn;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
+ ste_ctx->build_src_gvmi_qpn_init(sb, mask);
+}
+
+static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
+ [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
+ [MLX5_STEERING_FORMAT_CONNECTX_6DX] = NULL,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
+{
+ if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
+ return NULL;
+
+ return mlx5dr_ste_ctx_arr[version];
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
new file mode 100644
index 000000000000..4a3d6a849991
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef _DR_STE_
+#define _DR_STE_
+
+#include "dr_types.h"
+
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_SPI 0x3
+#define IP_VERSION_IPV4 0x4
+#define IP_VERSION_IPV6 0x6
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+#define HDR_LEN_L2_MACS 0xC
+#define HDR_LEN_L2_VLAN 0x4
+#define HDR_LEN_L2_ETHER 0x2
+#define HDR_LEN_L2 (HDR_LEN_L2_MACS + HDR_LEN_L2_ETHER)
+#define HDR_LEN_L2_W_VLAN (HDR_LEN_L2 + HDR_LEN_L2_VLAN)
+
+/* Set to STE a specific value using DR_STE_SET */
+#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
+ if ((spec)->s_fname) { \
+ MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
+ (spec)->s_fname = 0; \
+ } \
+} while (0)
+
+/* Set to STE spec->s_fname to tag->t_fname set spec->s_fname as used */
+#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
+
+/* Set to STE -1 to tag->t_fname and set spec->s_fname as used */
+#define DR_STE_SET_ONES(lookup_type, tag, t_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, -1)
+
+#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
+} while (0)
+
+#define DR_STE_SET_MPLS(lookup_type, mask, in_out, tag) do { \
+ struct mlx5dr_match_misc2 *_mask = mask; \
+ u8 *_tag = tag; \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_label, _mask, \
+ in_out##_first_mpls_label);\
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_s_bos, _mask, \
+ in_out##_first_mpls_s_bos); \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_exp, _mask, \
+ in_out##_first_mpls_exp); \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_ttl, _mask, \
+ in_out##_first_mpls_ttl); \
+} while (0)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_gre_label || \
+ (_misc)->outer_first_mpls_over_gre_exp || \
+ (_misc)->outer_first_mpls_over_gre_s_bos || \
+ (_misc)->outer_first_mpls_over_gre_ttl)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_udp_label || \
+ (_misc)->outer_first_mpls_over_udp_exp || \
+ (_misc)->outer_first_mpls_over_udp_s_bos || \
+ (_misc)->outer_first_mpls_over_udp_ttl)
+
+enum dr_ste_action_modify_type_l3 {
+ DR_STE_ACTION_MDFY_TYPE_L3_NONE = 0x0,
+ DR_STE_ACTION_MDFY_TYPE_L3_IPV4 = 0x1,
+ DR_STE_ACTION_MDFY_TYPE_L3_IPV6 = 0x2,
+};
+
+enum dr_ste_action_modify_type_l4 {
+ DR_STE_ACTION_MDFY_TYPE_L4_NONE = 0x0,
+ DR_STE_ACTION_MDFY_TYPE_L4_TCP = 0x1,
+ DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
+};
+
+u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
+
+#define DR_STE_CTX_BUILDER(fname) \
+ ((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
+ struct mlx5dr_match_param *mask))
+
+struct mlx5dr_ste_ctx {
+ /* Builders */
+ void DR_STE_CTX_BUILDER(eth_l2_src_dst);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv6_src);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv6_dst);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv4_5_tuple);
+ void DR_STE_CTX_BUILDER(eth_l2_src);
+ void DR_STE_CTX_BUILDER(eth_l2_dst);
+ void DR_STE_CTX_BUILDER(eth_l2_tnl);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv4_misc);
+ void DR_STE_CTX_BUILDER(eth_ipv6_l3_l4);
+ void DR_STE_CTX_BUILDER(mpls);
+ void DR_STE_CTX_BUILDER(tnl_gre);
+ void DR_STE_CTX_BUILDER(tnl_mpls);
+ int DR_STE_CTX_BUILDER(icmp);
+ void DR_STE_CTX_BUILDER(general_purpose);
+ void DR_STE_CTX_BUILDER(eth_l4_misc);
+ void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
+ void DR_STE_CTX_BUILDER(tnl_geneve);
+ void DR_STE_CTX_BUILDER(register_0);
+ void DR_STE_CTX_BUILDER(register_1);
+ void DR_STE_CTX_BUILDER(src_gvmi_qpn);
+
+ /* Getters and Setters */
+ void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
+ u8 entry_type, u16 gvmi);
+ void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
+ u16 (*get_next_lu_type)(u8 *hw_ste_p);
+ void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
+ u64 (*get_miss_addr)(u8 *hw_ste_p);
+ void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+ void (*set_byte_mask)(u8 *hw_ste_p, u16 byte_mask);
+ u16 (*get_byte_mask)(u8 *hw_ste_p);
+
+ /* Actions */
+ void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+ void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+ u32 modify_field_arr_sz;
+ const struct mlx5dr_ste_action_modify_field *modify_field_arr;
+ void (*set_action_set)(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+ void (*set_action_add)(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+ void (*set_action_copy)(u8 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter);
+ int (*set_action_decap_l3_list)(void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num);
+};
+
+extern struct mlx5dr_ste_ctx ste_ctx_v0;
+
+#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
new file mode 100644
index 000000000000..b76fdff08890
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -0,0 +1,1640 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#include <linux/types.h>
+#include <linux/crc32.h>
+#include "dr_ste.h"
+
+#define SVLAN_ETHERTYPE 0x88a8
+#define DR_STE_ENABLE_FLOW_TAG BIT(31)
+
+enum dr_ste_v0_action_tunl {
+ DR_STE_TUNL_ACTION_NONE = 0,
+ DR_STE_TUNL_ACTION_ENABLE = 1,
+ DR_STE_TUNL_ACTION_DECAP = 2,
+ DR_STE_TUNL_ACTION_L3_DECAP = 3,
+ DR_STE_TUNL_ACTION_POP_VLAN = 4,
+};
+
+enum dr_ste_v0_action_type {
+ DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
+ DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
+ DR_STE_ACTION_TYPE_ENCAP = 4,
+};
+
+enum dr_ste_v0_action_mdfy_op {
+ DR_STE_ACTION_MDFY_OP_COPY = 0x1,
+ DR_STE_ACTION_MDFY_OP_SET = 0x2,
+ DR_STE_ACTION_MDFY_OP_ADD = 0x3,
+};
+
+#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
+ ((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
+ (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
+ DR_STE_V0_LU_TYPE_##lookup_type##_O)
+
+enum {
+ DR_STE_V0_LU_TYPE_NOP = 0x00,
+ DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
+ DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_O = 0x06,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_I = 0x07,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_D = 0x1b,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_O = 0x08,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_I = 0x09,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_D = 0x1c,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
+ DR_STE_V0_LU_TYPE_ETHL4_O = 0x13,
+ DR_STE_V0_LU_TYPE_ETHL4_I = 0x14,
+ DR_STE_V0_LU_TYPE_ETHL4_D = 0x21,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_O = 0x2c,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_I = 0x2d,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_D = 0x2e,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_O = 0x15,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_I = 0x24,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_D = 0x25,
+ DR_STE_V0_LU_TYPE_GRE = 0x16,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0 = 0x22,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 = 0x23,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
+ DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18,
+ DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
+ DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
+ DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum {
+ DR_STE_V0_ACTION_MDFY_FLD_L2_0 = 0,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_1 = 1,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_2 = 2,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_0 = 3,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_1 = 4,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_2 = 5,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_3 = 6,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_4 = 7,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_0 = 8,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_1 = 9,
+ DR_STE_V0_ACTION_MDFY_FLD_MPLS = 10,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0 = 11,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_0 = 12,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_1 = 13,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_2 = 14,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_3 = 15,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_2 = 16,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_0 = 17,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_1 = 18,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_2 = 19,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_3 = 20,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1 = 21,
+ DR_STE_V0_ACTION_MDFY_FLD_METADATA = 22,
+ DR_STE_V0_ACTION_MDFY_FLD_RESERVED = 23,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
+ },
+};
+
+static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
+}
+
+static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, entry_type);
+}
+
+static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+{
+ u64 index = miss_addr >> 6;
+
+ /* Miss address for TX and RX STEs located in the same offsets */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
+}
+
+static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
+{
+ u64 index =
+ (MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
+ MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32) << 26);
+
+ return index << 6;
+}
+
+static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+{
+ MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
+}
+
+static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, byte_mask);
+}
+
+static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
+}
+
+static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
+}
+
+static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
+}
+
+static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
+{
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
+}
+
+static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+{
+ u64 index = (icm_addr >> 5) | ht_size;
+
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
+}
+
+static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
+ u8 entry_type, u16 gvmi)
+{
+ dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
+ dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
+ dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+
+ /* Set GVMI once, this is the same for RX/TX
+ * bits 63_48 of next table base / miss address encode the next GVMI
+ */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
+}
+
+static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
+ DR_STE_ENABLE_FLOW_TAG | flow_tag);
+}
+
+static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
+{
+ /* This can be used for both rx_steering_mult and for sx_transmit */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
+}
+
+static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
+}
+
+static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
+ bool go_back)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ DR_STE_ACTION_TYPE_PUSH_VLAN);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
+ /* Due to HW limitation we need to set this bit, otherwise reforamt +
+ * push vlan will not work.
+ */
+ if (go_back)
+ dr_ste_v0_set_go_back_bit(hw_ste_p);
+}
+
+static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
+ int size, bool encap_l3)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
+}
+
+static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_DECAP);
+}
+
+static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_POP_VLAN);
+}
+
+static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_L3_DECAP);
+ MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+}
+
+static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
+ u32 re_write_index)
+{
+ MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
+ num_of_actions);
+ MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
+ re_write_index);
+}
+
+static void dr_ste_v0_arr_init_next(u8 **last_ste,
+ u32 *added_stes,
+ enum mlx5dr_ste_entry_type entry_type,
+ u16 gvmi)
+{
+ (*added_stes)++;
+ *last_ste += DR_STE_SIZE;
+ dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
+ entry_type, gvmi);
+}
+
+static void
+dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
+
+ /* We want to make sure the modify header comes before L2
+ * encapsulation. The reason for that is that we support
+ * modify headers for outer headers only
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ dr_ste_v0_set_tx_push_vlan(last_ste,
+ attr->vlans.headers[i],
+ encap);
+ }
+ }
+
+ if (encap) {
+ /* Modify header and encapsulation require a different STEs.
+ * Since modify header STE format doesn't support encapsulation
+ * tunneling_action.
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
+ action_type_set[DR_ACTION_TYP_PUSH_VLAN])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ dr_ste_v0_set_tx_encap(last_ste,
+ attr->reformat_id,
+ attr->reformat_size,
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
+ /* Whenever prio_tag_required enabled, we can be sure that the
+ * previous table (ACL) already push vlan to our packet,
+ * And due to HW limitation we need to set this bit, otherwise
+ * push vlan + reformat will not work.
+ */
+ if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
+ dr_ste_v0_set_go_back_bit(last_ste);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
+
+ dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void
+dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->decap_actions,
+ attr->decap_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
+ dr_ste_v0_set_rx_decap(last_ste);
+
+ if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i ||
+ action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
+ action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ dr_ste_v0_set_rx_pop_vlan(last_ste);
+ }
+ }
+
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_MODIFY_PKT,
+ attr->gvmi);
+ else
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TAG]) {
+ if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
+ }
+
+ dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void dr_ste_v0_set_action_set(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ length = (length == 32) ? 0 : length;
+ MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+}
+
+static void dr_ste_v0_set_action_add(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ length = (length == 32) ? 0 : length;
+ MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+}
+
+static void dr_ste_v0_set_action_copy(u8 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
+{
+ MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
+ MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
+ MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
+}
+
+#define DR_STE_DECAP_L3_MIN_ACTION_NUM 5
+
+static int
+dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ u16 *used_hw_action_num)
+{
+ struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
+ u32 hw_action_num;
+ int required_actions;
+ u32 hdr_fld_4b;
+ u16 hdr_fld_2b;
+ u16 vlan_type;
+ bool vlan;
+
+ vlan = (data_sz != HDR_LEN_L2);
+ hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
+ required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
+
+ if (hw_action_num < required_actions)
+ return -ENOMEM;
+
+ /* dmac_47_16 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 16);
+ hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_4b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* smac_47_16 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
+ hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
+ MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* dmac_15_0 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_2b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* ethertype + (optional) vlan */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 32);
+ if (!vlan) {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
+ } else {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
+ hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
+ }
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* smac_15_0 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ if (vlan) {
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ }
+
+ *used_hw_action_num = required_actions;
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ if (mask->smac_47_16 || mask->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
+ mask->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
+ mask->smac_47_16 << 16 | mask->smac_15_0);
+ mask->smac_47_16 = 0;
+ mask->smac_15_0 = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ } else if (mask->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->svlan_tag = 0;
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ if (spec->smac_47_16 || spec->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
+ spec->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
+ spec->smac_47_16 << 16 | spec->smac_15_0);
+ spec->smac_47_16 = 0;
+ spec->smac_15_0 = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_mask->inner_second_cvlan_tag ||
+ misc_mask->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->inner_second_cvlan_tag = 0;
+ misc_mask->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_priority, misc_mask, inner_second_prio);
+ } else {
+ if (misc_mask->outer_second_cvlan_tag ||
+ misc_mask->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->outer_second_cvlan_tag = 0;
+ misc_mask->outer_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_priority, misc_mask, outer_second_prio);
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
+ bool inner, u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_spec = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_spec->inner_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->inner_second_cvlan_tag = 0;
+ } else if (misc_spec->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
+ } else {
+ if (misc_spec->outer_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->outer_second_cvlan_tag = 0;
+ } else if (misc_spec->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->outer_second_svlan_tag = 0;
+ }
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
+
+ dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
+
+ return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
+}
+
+static int
+dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void
+dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask,
+ l2_tunneling_network_id, (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
+ (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
+}
+
+static int
+dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
+ DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
+}
+
+static int
+dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ if (sb->inner)
+ DR_STE_SET_MPLS(mpls, misc2, inner, tag);
+ else
+ DR_STE_SET_MPLS(mpls, misc2, outer, tag);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
+
+ DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
+ DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
+ DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
+
+ DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
+
+ DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+
+ if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2, outer_first_mpls_over_gre_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2, outer_first_mpls_over_gre_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2, outer_first_mpls_over_gre_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2, outer_first_mpls_over_gre_ttl);
+ } else {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2, outer_first_mpls_over_udp_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2, outer_first_mpls_over_udp_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2, outer_first_mpls_over_udp_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2, outer_first_mpls_over_udp_ttl);
+ }
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
+}
+
+#define ICMP_TYPE_OFFSET_FIRST_DW 24
+#define ICMP_CODE_OFFSET_FIRST_DW 16
+
+static int
+dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
+ u32 *icmp_header_data;
+ int dw0_location;
+ int dw1_location;
+ u8 *icmp_type;
+ u8 *icmp_code;
+ bool is_ipv4;
+
+ is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
+ if (is_ipv4) {
+ icmp_header_data = &misc_3->icmpv4_header_data;
+ icmp_type = &misc_3->icmpv4_type;
+ icmp_code = &misc_3->icmpv4_code;
+ dw0_location = sb->caps->flex_parser_id_icmp_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmp_dw1;
+ } else {
+ icmp_header_data = &misc_3->icmpv6_header_data;
+ icmp_type = &misc_3->icmpv6_type;
+ icmp_code = &misc_3->icmpv6_code;
+ dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
+ }
+
+ switch (dw0_location) {
+ case 4:
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
+ (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
+ (*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW));
+
+ *icmp_type = 0;
+ *icmp_code = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dw1_location) {
+ case 5:
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
+ *icmp_header_data);
+ *icmp_header_data = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ int ret;
+
+ ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
+ if (ret)
+ return ret;
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
+
+ return 0;
+}
+
+static int
+dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+
+ DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
+ misc_2, metadata_reg_a);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ if (sb->inner) {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
+ } else {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_flags, misc3,
+ outer_vxlan_gpe_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_next_protocol, misc3,
+ outer_vxlan_gpe_next_protocol);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_vni, misc3,
+ outer_vxlan_gpe_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_protocol_type, misc, geneve_protocol_type);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_oam, misc, geneve_oam);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_opt_len, misc, geneve_opt_len);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_vni, misc, geneve_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
+}
+
+static int
+dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
+ DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
+ DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
+ DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
+}
+
+static int
+dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
+ DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
+ DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
+ DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
+}
+
+static void
+dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
+ DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
+ misc_mask->source_eswitch_owner_vhca_id = 0;
+}
+
+static int
+dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ struct mlx5dr_domain *dmn = sb->dmn;
+ struct mlx5dr_cmd_caps *caps;
+ u8 *bit_mask = sb->bit_mask;
+ bool source_gvmi_set;
+
+ DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
+
+ if (sb->vhca_id_valid) {
+ /* Find port GVMI based on the eswitch_owner_vhca_id */
+ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
+ caps = &dmn->info.caps;
+ else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
+ dmn->peer_dmn->info.caps.gvmi))
+ caps = &dmn->peer_dmn->info.caps;
+ else
+ return -EINVAL;
+
+ misc->source_eswitch_owner_vhca_id = 0;
+ } else {
+ caps = &dmn->info.caps;
+ }
+
+ source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
+ if (source_gvmi_set) {
+ vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ if (!vport_cap) {
+ mlx5dr_err(dmn, "Vport 0x%x is invalid\n",
+ misc->source_port);
+ return -EINVAL;
+ }
+
+ if (vport_cap->vport_gvmi)
+ MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
+
+ misc->source_port = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
+}
+
+struct mlx5dr_ste_ctx ste_ctx_v0 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v0_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v0_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v0_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v0_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v0_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v0_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v0_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init,
+ .build_icmp_init = &dr_ste_v0_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v0_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
+ .build_register_0_init = &dr_ste_v0_build_register_0_init,
+ .build_register_1_init = &dr_ste_v0_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v0_init,
+ .set_next_lu_type = &dr_ste_v0_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v0_get_next_lu_type,
+ .set_miss_addr = &dr_ste_v0_set_miss_addr,
+ .get_miss_addr = &dr_ste_v0_get_miss_addr,
+ .set_hit_addr = &dr_ste_v0_set_hit_addr,
+ .set_byte_mask = &dr_ste_v0_set_byte_mask,
+ .get_byte_mask = &dr_ste_v0_get_byte_mask,
+
+ /* Actions */
+ .set_actions_rx = &dr_ste_v0_set_actions_rx,
+ .set_actions_tx = &dr_ste_v0_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v0_action_modify_field_arr,
+ .set_action_set = &dr_ste_v0_set_action_set,
+ .set_action_add = &dr_ste_v0_set_action_add,
+ .set_action_copy = &dr_ste_v0_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 51880df26724..8d2c3b6e2755 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -120,6 +120,7 @@ struct mlx5dr_ste_htbl;
struct mlx5dr_match_param;
struct mlx5dr_cmd_caps;
struct mlx5dr_matcher_rx_tx;
+struct mlx5dr_ste_ctx;
struct mlx5dr_ste {
u8 *hw_ste;
@@ -154,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
};
struct mlx5dr_ste_htbl {
- u8 lu_type;
+ u16 lu_type;
u16 byte_mask;
u32 refcount;
struct mlx5dr_icm_chunk *chunk;
@@ -190,7 +191,7 @@ struct mlx5dr_ste_build {
u8 vhca_id_valid:1;
struct mlx5dr_domain *dmn;
struct mlx5dr_cmd_caps *caps;
- u8 lu_type;
+ u16 lu_type;
u16 byte_mask;
u8 bit_mask[DR_STE_SIZE_MASK];
int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
@@ -201,7 +202,7 @@ struct mlx5dr_ste_build {
struct mlx5dr_ste_htbl *
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
- u8 lu_type, u16 byte_mask);
+ u16 lu_type, u16 byte_mask);
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
@@ -219,35 +220,84 @@ static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
/* STE utils */
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
-void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
-void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
- struct mlx5dr_ste_htbl *next_htbl);
-void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
-u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
-void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
-void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
-void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
+void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 miss_addr);
+void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 icm_addr, u32 ht_size);
+void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste,
+ struct mlx5dr_ste_htbl *next_htbl);
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 ste_location);
-void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
-void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
-void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
- int size, bool encap_l3);
-void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
-void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
-void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
-void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
- bool go_back);
-void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
-u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
-void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
- u32 re_write_index);
-void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
+#define MLX5DR_MAX_VLANS 2
+
+struct mlx5dr_ste_actions_attr {
+ u32 modify_index;
+ u16 modify_actions;
+ u32 decap_index;
+ u16 decap_actions;
+ u8 decap_with_vlan:1;
+ u64 final_icm_addr;
+ u32 flow_tag;
+ u32 ctr_id;
+ u16 gvmi;
+ u16 hit_gvmi;
+ u32 reformat_id;
+ u32 reformat_size;
+ struct {
+ int count;
+ u32 headers[MLX5DR_MAX_VLANS];
+ } vlans;
+};
+
+void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+
+void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter);
+int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
+ void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num);
+
+const struct mlx5dr_ste_action_modify_field *
+mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version);
void mlx5dr_ste_free(struct mlx5dr_ste *ste,
struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher);
@@ -271,8 +321,6 @@ static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
return !ste->refcount;
}
-void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
- struct mlx5dr_ste_htbl *next_htbl);
bool mlx5dr_ste_equal_tag(void *src, void *dst);
int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
@@ -289,65 +337,85 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_match_param *value,
u8 *ste_arr);
-void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *builder,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx);
@@ -574,10 +642,10 @@ struct mlx5dr_match_misc3 {
u32 outer_vxlan_gpe_next_protocol:8;
u32 icmpv4_header_data;
u32 icmpv6_header_data;
- u32 icmpv6_code:8;
- u32 icmpv6_type:8;
- u32 icmpv4_code:8;
- u32 icmpv4_type:8;
+ u8 icmpv6_code;
+ u8 icmpv6_type;
+ u8 icmpv4_code;
+ u8 icmpv4_type;
u8 reserved_auto3[0x1c];
};
@@ -671,6 +739,7 @@ struct mlx5dr_domain {
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
struct mlx5dr_domain_cache cache;
+ struct mlx5dr_ste_ctx *ste_ctx;
};
struct mlx5dr_table_rx_tx {
@@ -725,6 +794,14 @@ struct mlx5dr_rule_member {
struct list_head use_ste_list;
};
+struct mlx5dr_ste_action_modify_field {
+ u16 hw_field;
+ u8 start;
+ u8 end;
+ u8 l3_type;
+ u8 l4_type;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -1000,7 +1077,8 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_htbl_connect_info *connect_info,
bool update_hw_ste);
-void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index e01c3766c7de..83df6df6b459 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -5,91 +5,6 @@
#define MLX5_IFC_DR_H
enum {
- MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9,
- MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21,
- MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22,
- MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
- MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
- MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0,
- MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1,
- MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0,
- MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1,
- MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2,
-};
-
-enum {
- MLX5DR_STE_LU_TYPE_NOP = 0x00,
- MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
- MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
- MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13,
- MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14,
- MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25,
- MLX5DR_STE_LU_TYPE_GRE = 0x16,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
- MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18,
- MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
- MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index bdafc85fd874..ba78e0660523 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
+#include "sf/sf.h"
/* Mutex to hold while enabling or disabling RoCE */
static DEFINE_MUTEX(mlx5_roce_en_lock);
@@ -1160,6 +1161,6 @@ EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
*/
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
- return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
+ return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev);
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 5d9ddf36fb4e..e6f677e42007 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -267,7 +267,7 @@ struct mlxfw_mfa2_file *mlxfw_mfa2_file_init(const struct firmware *fw)
const void *first_tlv_ptr;
const void *cb_top_ptr;
- mfa2_file = kcalloc(1, sizeof(*mfa2_file), GFP_KERNEL);
+ mfa2_file = kzalloc(sizeof(*mfa2_file), GFP_KERNEL);
if (!mfa2_file)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 685037e052af..52fdc34251ba 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -84,6 +84,7 @@ struct mlxsw_core {
struct mlxsw_thermal *thermal;
struct mlxsw_core_port *ports;
unsigned int max_ports;
+ atomic_t active_ports_count;
bool fw_flash_in_progress;
struct {
struct devlink_health_reporter *fw_fatal;
@@ -96,8 +97,36 @@ struct mlxsw_core {
#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
-static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
+static u64 mlxsw_ports_occ_get(void *priv)
{
+ struct mlxsw_core *mlxsw_core = priv;
+
+ return atomic_read(&mlxsw_core->active_ports_count);
+}
+
+static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params ports_num_params;
+ u32 max_ports;
+
+ max_ports = mlxsw_core->max_ports - 1;
+ devlink_resource_size_params_init(&ports_num_params, max_ports,
+ max_ports, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink,
+ DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
+ max_ports, MLXSW_CORE_RESOURCE_PORTS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &ports_num_params);
+}
+
+static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ int err;
+
/* Switch ports are numbered from 1 to queried value */
if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
@@ -110,11 +139,30 @@ static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
if (!mlxsw_core->ports)
return -ENOMEM;
+ if (!reload) {
+ err = mlxsw_core_resources_ports_register(mlxsw_core);
+ if (err)
+ goto err_resources_ports_register;
+ }
+ atomic_set(&mlxsw_core->active_ports_count, 0);
+ devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
+ mlxsw_ports_occ_get, mlxsw_core);
+
return 0;
+
+err_resources_ports_register:
+ kfree(mlxsw_core->ports);
+ return err;
}
-static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
+static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+
+ devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
+ if (!reload)
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+
kfree(mlxsw_core->ports);
}
@@ -1897,7 +1945,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_register_resources;
}
- err = mlxsw_ports_init(mlxsw_core);
+ err = mlxsw_ports_init(mlxsw_core, reload);
if (err)
goto err_ports_init;
@@ -1986,7 +2034,7 @@ err_devlink_register:
err_emad_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
- mlxsw_ports_fini(mlxsw_core);
+ mlxsw_ports_fini(mlxsw_core, reload);
err_ports_init:
if (!reload)
devlink_resources_unregister(devlink, NULL);
@@ -2056,7 +2104,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
- mlxsw_ports_fini(mlxsw_core);
+ mlxsw_ports_fini(mlxsw_core, reload);
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
@@ -2755,16 +2803,25 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
const unsigned char *switch_id,
unsigned char switch_id_len)
{
- return __mlxsw_core_port_init(mlxsw_core, local_port,
- DEVLINK_PORT_FLAVOUR_PHYSICAL,
- port_number, split, split_port_subnumber,
- splittable, lanes,
- switch_id, switch_id_len);
+ int err;
+
+ err = __mlxsw_core_port_init(mlxsw_core, local_port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_number, split, split_port_subnumber,
+ splittable, lanes,
+ switch_id, switch_id_len);
+ if (err)
+ return err;
+
+ atomic_inc(&mlxsw_core->active_ports_count);
+ return 0;
}
EXPORT_SYMBOL(mlxsw_core_port_init);
void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
{
+ atomic_dec(&mlxsw_core->active_ports_count);
+
__mlxsw_core_port_fini(mlxsw_core, local_port);
}
EXPORT_SYMBOL(mlxsw_core_port_fini);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6b3ccbf6b238..8af7d9d03475 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -19,6 +19,11 @@
#include "cmd.h"
#include "resources.h"
+enum mlxsw_core_resource_id {
+ MLXSW_CORE_RESOURCE_PORTS = 1,
+ MLXSW_CORE_RESOURCE_MAX,
+};
+
struct mlxsw_core;
struct mlxsw_core_port;
struct mlxsw_driver;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 8fa286ccdd6b..bf85ce9835d7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -19,7 +19,7 @@
#define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */
#define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
-#define MLXSW_THERMAL_ASIC_TEMP_CRIT 110000 /* 110C */
+#define MLXSW_THERMAL_ASIC_TEMP_CRIT 140000 /* 140C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
#define MLXSW_THERMAL_ZONE_MAX_NAME 16
@@ -176,6 +176,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
if (err)
return err;
+ if (crit_temp > emerg_temp) {
+ dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n",
+ tz->tzdev->type, crit_temp, emerg_temp);
+ return 0;
+ }
+
/* According to the system thermal requirements, the thermal zones are
* defined with four trip points. The critical and emergency
* temperature thresholds, provided by QSFP module are set as "active"
@@ -190,11 +196,8 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
- if (emerg_temp > crit_temp)
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
MLXSW_THERMAL_MODULE_TEMP_SHIFT;
- else
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 4eeae8d78006..d0052537e627 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -323,8 +323,8 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
struct pci_dev *pdev = mlxsw_pci->pdev;
dma_addr_t mapaddr;
- mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
- if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+ mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
+ if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
return -EIO;
}
@@ -342,7 +342,7 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
if (!frag_len)
return;
- pci_unmap_single(pdev, mapaddr, frag_len, direction);
+ dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
}
static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
@@ -858,9 +858,9 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
tasklet_setup(&q->tasklet, q_ops->tasklet);
mem_item->size = MLXSW_PCI_AQ_SIZE;
- mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
- mem_item->size,
- &mem_item->mapaddr);
+ mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
+ mem_item->size, &mem_item->mapaddr,
+ GFP_KERNEL);
if (!mem_item->buf)
return -ENOMEM;
@@ -890,8 +890,8 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err_q_ops_init:
kfree(q->elem_info);
err_elem_info_alloc:
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
return err;
}
@@ -903,8 +903,8 @@ static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
q_ops->fini(mlxsw_pci, q);
kfree(q->elem_info);
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
@@ -1273,9 +1273,9 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
mem_item = &mlxsw_pci->fw_area.items[i];
mem_item->size = MLXSW_PCI_PAGE_SIZE;
- mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
- mem_item->size,
- &mem_item->mapaddr);
+ mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
+ mem_item->size,
+ &mem_item->mapaddr, GFP_KERNEL);
if (!mem_item->buf) {
err = -ENOMEM;
goto err_alloc;
@@ -1304,8 +1304,8 @@ err_alloc:
for (i--; i >= 0; i--) {
mem_item = &mlxsw_pci->fw_area.items[i];
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
kfree(mlxsw_pci->fw_area.items);
return err;
@@ -1321,8 +1321,8 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
mem_item = &mlxsw_pci->fw_area.items[i];
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
kfree(mlxsw_pci->fw_area.items);
}
@@ -1347,8 +1347,8 @@ static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
int err = 0;
mbox->size = MLXSW_CMD_MBOX_SIZE;
- mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
- &mbox->mapaddr);
+ mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
+ &mbox->mapaddr, GFP_KERNEL);
if (!mbox->buf) {
dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
err = -ENOMEM;
@@ -1362,8 +1362,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
{
struct pci_dev *pdev = mlxsw_pci->pdev;
- pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
- mbox->mapaddr);
+ dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
+ mbox->mapaddr);
}
static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
@@ -1848,17 +1848,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_request_regions;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
- goto err_pci_set_dma_mask;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
goto err_pci_set_dma_mask;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a6956cfc9cb1..a3769f95a182 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -52,7 +52,7 @@
#define MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF "rif"
enum mlxsw_sp_resource_id {
- MLXSW_SP_RESOURCE_KVD = 1,
+ MLXSW_SP_RESOURCE_KVD = MLXSW_CORE_RESOURCE_MAX,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 41424ee909a0..0ac7014703aa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4309,11 +4309,18 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
if (event != NEXTHOP_EVENT_REPLACE)
return 0;
- if (!info->is_grp)
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
info->extack);
- return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp, info->nh_grp,
- info->extack);
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
+ info->nh_grp,
+ info->extack);
+ default:
+ NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
+ return -EOPNOTSUPP;
+ }
}
static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
@@ -4321,13 +4328,17 @@ static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
{
const struct net_device *dev;
- if (info->is_grp)
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
+ dev = info->nh->dev;
+ return info->nh->gw_family || info->nh->is_reject ||
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+ case NH_NOTIFIER_INFO_TYPE_GRP:
/* Already validated earlier. */
return true;
-
- dev = info->nh->dev;
- return info->nh->gw_family || info->nh->is_reject ||
- mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+ default:
+ return false;
+ }
}
static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
@@ -4410,11 +4421,22 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct nh_notifier_info *info)
{
- unsigned int nhs = info->is_grp ? info->nh_grp->num_nh : 1;
struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_nexthop *nh;
+ unsigned int nhs;
int err, i;
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
+ nhs = 1;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ nhs = info->nh_grp->num_nh;
+ break;
+ default:
+ return -EINVAL;
+ }
+
nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
if (!nhgi)
return -ENOMEM;
@@ -4427,12 +4449,18 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
int weight;
nh = &nhgi->nexthops[i];
- if (info->is_grp) {
- nh_obj = &info->nh_grp->nh_entries[i].nh;
- weight = info->nh_grp->nh_entries[i].weight;
- } else {
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
nh_obj = info->nh;
weight = 1;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ nh_obj = &info->nh_grp->nh_entries[i].nh;
+ weight = info->nh_grp->nh_entries[i].weight;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_nexthop_obj_init;
}
err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
weight);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index c6c5826aba41..1892cea05ee7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -157,6 +157,7 @@ mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
+ .is_static = true,
.can_handle = mlxsw_sp1_span_cpu_can_handle,
.parms_set = mlxsw_sp1_span_entry_cpu_parms,
.configure = mlxsw_sp1_span_entry_cpu_configure,
@@ -214,6 +215,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+ .is_static = true,
.can_handle = mlxsw_sp_port_dev_check,
.parms_set = mlxsw_sp_span_entry_phys_parms,
.configure = mlxsw_sp_span_entry_phys_configure,
@@ -721,6 +723,7 @@ mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
+ .is_static = true,
.can_handle = mlxsw_sp2_span_cpu_can_handle,
.parms_set = mlxsw_sp2_span_entry_cpu_parms,
.configure = mlxsw_sp2_span_entry_cpu_configure,
@@ -1036,6 +1039,9 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work)
if (!refcount_read(&curr->ref_count))
continue;
+ if (curr->ops->is_static)
+ continue;
+
err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
if (err)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index d907718bc8c5..aa1cd409c0e2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -60,6 +60,7 @@ struct mlxsw_sp_span_entry {
};
struct mlxsw_sp_span_entry_ops {
+ bool is_static;
bool (*can_handle)(const struct net_device *to_dev);
int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index cea42f6ed89b..20c4f3c2cf23 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -527,7 +527,6 @@ mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
u8 state)
{
@@ -535,9 +534,6 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -659,7 +655,6 @@ err_port_bridge_vlan_learning_set:
static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port,
- struct switchdev_trans *trans,
unsigned long brport_flags)
{
if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
@@ -669,16 +664,12 @@ static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
unsigned long brport_flags)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (!bridge_port)
@@ -724,35 +715,26 @@ static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
}
static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
unsigned long ageing_clock_t)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- if (switchdev_trans_ph_prepare(trans)) {
- if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
- ageing_time > MLXSW_SP_MAX_AGEING_TIME)
- return -ERANGE;
- else
- return 0;
- }
+ if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
+ ageing_time > MLXSW_SP_MAX_AGEING_TIME)
+ return -ERANGE;
return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
}
static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool vlan_enabled)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_device))
return -EINVAL;
@@ -765,16 +747,12 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
u16 vlan_proto)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_device))
return -EINVAL;
@@ -784,16 +762,12 @@ static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_p
}
static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool is_port_mrouter)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (!bridge_port)
@@ -825,7 +799,6 @@ static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
}
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool mc_disabled)
{
@@ -834,9 +807,6 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -896,16 +866,12 @@ mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool is_mrouter)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -921,54 +887,52 @@ mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
- trans,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
- err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.vlan_protocol);
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
- err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mrouter);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
- err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mc_disabled);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
- err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mrouter);
break;
@@ -977,8 +941,7 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
break;
}
- if (switchdev_trans_ph_commit(trans))
- mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
return err;
}
@@ -1211,23 +1174,20 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
const struct switchdev_obj_port_vlan *vlan)
{
u16 pvid;
- u16 vid;
pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
if (!pvid)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
- if (vid != pvid) {
- netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
- return -EBUSY;
- }
- } else {
- if (vid == pvid) {
- netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
- return -EBUSY;
- }
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ if (vlan->vid != pvid) {
+ netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ } else {
+ if (vlan->vid == pvid) {
+ netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
+ return -EBUSY;
}
}
@@ -1236,7 +1196,6 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1244,14 +1203,12 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
- u16 vid;
if (netif_is_bridge_master(orig_dev)) {
int err = 0;
if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
- br_vlan_enabled(orig_dev) &&
- switchdev_trans_ph_prepare(trans))
+ br_vlan_enabled(orig_dev))
err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
orig_dev, vlan);
if (!err)
@@ -1259,9 +1216,6 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
@@ -1269,17 +1223,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
- vid, flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
+ vlan->vid, flag_untagged,
+ flag_pvid, extack);
}
static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
@@ -1716,8 +1662,7 @@ static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
@@ -1729,9 +1674,6 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_index;
int err = 0;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
return 0;
@@ -1813,7 +1755,6 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
@@ -1823,22 +1764,19 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
- extack);
- if (switchdev_trans_ph_prepare(trans)) {
- /* The event is emitted before the changes are actually
- * applied to the bridge. Therefore schedule the respin
- * call for later, so that the respin logic sees the
- * updated bridge state.
- */
- mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
- }
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
+
+ /* The event is emitted before the changes are actually
+ * applied to the bridge. Therefore schedule the respin
+ * call for later, so that the respin logic sees the
+ * updated bridge state.
+ */
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_MDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -1873,7 +1811,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
- u16 vid;
if (netif_is_bridge_master(orig_dev))
return -EOPNOTSUPP;
@@ -1885,8 +1822,7 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
+ mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
return 0;
}
@@ -3406,12 +3342,10 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct switchdev_trans *trans = port_obj_info->trans;
struct mlxsw_sp_bridge_device *bridge_device;
struct netlink_ext_ack *extack;
struct mlxsw_sp *mlxsw_sp;
struct net_device *br_dev;
- u16 vid;
extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
br_dev = netdev_master_upper_dev_get(vxlan_dev);
@@ -3424,9 +3358,6 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
port_obj_info->handled = true;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (!bridge_device)
return -EINVAL;
@@ -3434,18 +3365,10 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
if (!bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
- vxlan_dev, vid,
- flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
+ vxlan_dev, vlan->vid,
+ flag_untagged,
+ flag_pvid, extack);
}
static void
@@ -3458,7 +3381,6 @@ mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp *mlxsw_sp;
struct net_device *br_dev;
- u16 vid;
br_dev = netdev_master_upper_dev_get(vxlan_dev);
if (!br_dev)
@@ -3477,9 +3399,8 @@ mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
if (!bridge_device->vlan_enabled)
return;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
- vxlan_dev, vid);
+ mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
+ vlan->vid);
}
static int
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index 42bc014136fe..93df3049cdc0 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -31,6 +31,8 @@ config KS8851
select MII
select CRC32
select EEPROM_93CX6
+ select PHYLIB
+ select MICREL_PHY
help
SPI driver for Micrel KS8851 SPI attached network chip.
@@ -40,6 +42,8 @@ config KS8851_MLL
select MII
select CRC32
select EEPROM_93CX6
+ select PHYLIB
+ select MICREL_PHY
help
This platform driver is for Micrel KS8851 Address/data bus
multiplexed network chip.
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 2b319e451121..e2eb0caeac82 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -358,6 +358,7 @@ union ks8851_tx_hdr {
* @vdd_reg: Optional regulator supplying the chip
* @vdd_io: Optional digital power supply for IO
* @gpio: Optional reset_n gpio
+ * @mii_bus: Pointer to MII bus structure
* @lock: Bus access lock callback
* @unlock: Bus access unlock callback
* @rdreg16: 16bit register read callback
@@ -403,6 +404,7 @@ struct ks8851_net {
struct regulator *vdd_reg;
struct regulator *vdd_io;
int gpio;
+ struct mii_bus *mii_bus;
void (*lock)(struct ks8851_net *ks,
unsigned long *flags);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 6fc7483aea03..2feed6ce19d3 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -23,6 +21,7 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "ks8851.h"
@@ -932,7 +931,25 @@ static int ks8851_phy_reg(int reg)
return KS_P1ANLPR;
}
- return 0x0;
+ return -EOPNOTSUPP;
+}
+
+static int ks8851_phy_read_common(struct net_device *dev, int phy_addr, int reg)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
+ int result;
+ int ksreg;
+
+ ksreg = ks8851_phy_reg(reg);
+ if (ksreg < 0)
+ return ksreg;
+
+ ks8851_lock(ks, &flags);
+ result = ks8851_rdreg16(ks, ksreg);
+ ks8851_unlock(ks, &flags);
+
+ return result;
}
/**
@@ -952,20 +969,13 @@ static int ks8851_phy_reg(int reg)
*/
static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
{
- struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
- int ksreg;
- int result;
+ int ret;
- ksreg = ks8851_phy_reg(reg);
- if (!ksreg)
+ ret = ks8851_phy_read_common(dev, phy_addr, reg);
+ if (ret < 0)
return 0x0; /* no error return allowed, so use zero */
- ks8851_lock(ks, &flags);
- result = ks8851_rdreg16(ks, ksreg);
- ks8851_unlock(ks, &flags);
-
- return result;
+ return ret;
}
static void ks8851_phy_write(struct net_device *dev,
@@ -976,13 +986,37 @@ static void ks8851_phy_write(struct net_device *dev,
int ksreg;
ksreg = ks8851_phy_reg(reg);
- if (ksreg) {
+ if (ksreg >= 0) {
ks8851_lock(ks, &flags);
ks8851_wrreg16(ks, ksreg, value);
ks8851_unlock(ks, &flags);
}
}
+static int ks8851_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct ks8851_net *ks = bus->priv;
+
+ if (phy_id != 0)
+ return -EOPNOTSUPP;
+
+ /* KS8851 PHY ID registers are swapped in HW, swap them back. */
+ if (reg == MII_PHYSID1)
+ reg = MII_PHYSID2;
+ else if (reg == MII_PHYSID2)
+ reg = MII_PHYSID1;
+
+ return ks8851_phy_read_common(ks->netdev, phy_id, reg);
+}
+
+static int ks8851_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+{
+ struct ks8851_net *ks = bus->priv;
+
+ ks8851_phy_write(ks->netdev, phy_id, reg, val);
+ return 0;
+}
+
/**
* ks8851_read_selftest - read the selftest memory info.
* @ks: The device state
@@ -1046,6 +1080,42 @@ int ks8851_resume(struct device *dev)
}
#endif
+static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev)
+{
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "ks8851_eth_mii";
+ mii_bus->read = ks8851_mdio_read;
+ mii_bus->write = ks8851_mdio_write;
+ mii_bus->priv = ks;
+ mii_bus->parent = dev;
+ mii_bus->phy_mask = ~((u32)BIT(0));
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ ret = mdiobus_register(mii_bus);
+ if (ret)
+ goto err_mdiobus_register;
+
+ ks->mii_bus = mii_bus;
+
+ return 0;
+
+err_mdiobus_register:
+ mdiobus_free(mii_bus);
+ return ret;
+}
+
+static void ks8851_unregister_mdiobus(struct ks8851_net *ks)
+{
+ mdiobus_unregister(ks->mii_bus);
+ mdiobus_free(ks->mii_bus);
+}
+
int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int msg_en)
{
@@ -1104,6 +1174,8 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
+ SET_NETDEV_DEV(netdev, dev);
+
/* setup EEPROM state */
ks->eeprom.data = ks;
ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
@@ -1120,6 +1192,10 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
dev_info(dev, "message enable is %d\n", msg_en);
+ ret = ks8851_register_mdiobus(ks, dev);
+ if (ret)
+ goto err_mdio;
+
/* set the default message enable */
ks->msg_enable = netif_msg_init(msg_en, NETIF_MSG_DRV |
NETIF_MSG_PROBE |
@@ -1128,7 +1204,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
skb_queue_head_init(&ks->txq);
netdev->ethtool_ops = &ks8851_ethtool_ops;
- SET_NETDEV_DEV(netdev, dev);
dev_set_drvdata(dev, ks);
@@ -1156,7 +1231,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "failed to register network device\n");
- goto err_netdev;
+ goto err_id;
}
netdev_info(netdev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
@@ -1165,8 +1240,9 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
return 0;
-err_netdev:
err_id:
+ ks8851_unregister_mdiobus(ks);
+err_mdio:
if (gpio_is_valid(gpio))
gpio_set_value(gpio, 0);
regulator_disable(ks->vdd_reg);
@@ -1180,6 +1256,8 @@ int ks8851_remove_common(struct device *dev)
{
struct ks8851_net *priv = dev_get_drvdata(dev);
+ ks8851_unregister_mdiobus(priv);
+
if (netif_msg_drv(priv))
dev_info(dev, "remove\n");
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 3bab0cb2b1a5..2e8fcce50f9d 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 4ec7f1615977..479406ecbaa3 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 3804310c853a..51359ce650bd 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1253,7 +1253,7 @@ static void lan743x_tx_release_desc(struct lan743x_tx *tx,
if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
goto done;
- descriptor_type = (descriptor->data0) &
+ descriptor_type = le32_to_cpu(descriptor->data0) &
TX_DESC_DATA0_DTYPE_MASK_;
if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
goto clean_up_data_descriptor;
@@ -1313,7 +1313,7 @@ static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
{
- while ((*tx->head_cpu_ptr) != (tx->last_head)) {
+ while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
lan743x_tx_release_desc(tx, tx->last_head, false);
tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
}
@@ -1399,10 +1399,10 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
if (dma_mapping_error(dev, dma_ptr))
return -ENOMEM;
- tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
- tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
- tx_descriptor->data3 = (frame_length << 16) &
- TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
+ tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
+ tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
+ tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
+ TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
buffer_info->skb = NULL;
buffer_info->dma_ptr = dma_ptr;
@@ -1443,7 +1443,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- tx_descriptor->data0 = tx->frame_data0;
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
/* move to next descriptor */
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
@@ -1487,7 +1487,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
/* wrap up previous descriptor */
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- tx_descriptor->data0 = tx->frame_data0;
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
/* move to next descriptor */
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
@@ -1513,10 +1513,10 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
return -ENOMEM;
}
- tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
- tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
- tx_descriptor->data3 = (frame_length << 16) &
- TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
+ tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
+ tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
+ tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
+ TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
buffer_info->skb = NULL;
buffer_info->dma_ptr = dma_ptr;
@@ -1560,7 +1560,7 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
- tx_descriptor->data0 = tx->frame_data0;
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
tx->last_tail = tx->frame_tail;
@@ -1967,11 +1967,11 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
}
buffer_info->buffer_length = length;
- descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
- descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
+ descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
+ descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
descriptor->data3 = 0;
- descriptor->data0 = (RX_DESC_DATA0_OWN_ |
- (length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
+ descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
+ (length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
lan743x_rx_update_tail(rx, index);
@@ -1986,12 +1986,12 @@ static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
- descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
+ descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
+ descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
descriptor->data3 = 0;
- descriptor->data0 = (RX_DESC_DATA0_OWN_ |
+ descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
((buffer_info->buffer_length) &
- RX_DESC_DATA0_BUF_LENGTH_MASK_));
+ RX_DESC_DATA0_BUF_LENGTH_MASK_)));
lan743x_rx_update_tail(rx, index);
}
@@ -2025,7 +2025,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
{
struct skb_shared_hwtstamps *hwtstamps = NULL;
int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
- int current_head_index = *rx->head_cpu_ptr;
+ int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
struct lan743x_rx_buffer_info *buffer_info;
struct lan743x_rx_descriptor *descriptor;
int extension_index = -1;
@@ -2040,14 +2040,14 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
if (rx->last_head != current_head_index) {
descriptor = &rx->ring_cpu_ptr[rx->last_head];
- if (descriptor->data0 & RX_DESC_DATA0_OWN_)
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
goto done;
- if (!(descriptor->data0 & RX_DESC_DATA0_FS_))
+ if (!(le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_))
goto done;
first_index = rx->last_head;
- if (descriptor->data0 & RX_DESC_DATA0_LS_) {
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_) {
last_index = rx->last_head;
} else {
int index;
@@ -2055,10 +2055,10 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
index = lan743x_rx_next_index(rx, first_index);
while (index != current_head_index) {
descriptor = &rx->ring_cpu_ptr[index];
- if (descriptor->data0 & RX_DESC_DATA0_OWN_)
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
goto done;
- if (descriptor->data0 & RX_DESC_DATA0_LS_) {
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_) {
last_index = index;
break;
}
@@ -2067,17 +2067,17 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
}
if (last_index >= 0) {
descriptor = &rx->ring_cpu_ptr[last_index];
- if (descriptor->data0 & RX_DESC_DATA0_EXT_) {
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
/* extension is expected to follow */
int index = lan743x_rx_next_index(rx,
last_index);
if (index != current_head_index) {
descriptor = &rx->ring_cpu_ptr[index];
- if (descriptor->data0 &
+ if (le32_to_cpu(descriptor->data0) &
RX_DESC_DATA0_OWN_) {
goto done;
}
- if (descriptor->data0 &
+ if (le32_to_cpu(descriptor->data0) &
RX_DESC_DATA0_EXT_) {
extension_index = index;
} else {
@@ -2129,7 +2129,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
}
buffer_info->skb = NULL;
packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_
- (descriptor->data0);
+ (le32_to_cpu(descriptor->data0));
skb_put(skb, packet_length - 4);
skb->protocol = eth_type_trans(skb,
rx->adapter->netdev);
@@ -2167,8 +2167,8 @@ process_extension:
descriptor = &rx->ring_cpu_ptr[extension_index];
buffer_info = &rx->buffer_info[extension_index];
- ts_sec = descriptor->data1;
- ts_nsec = (descriptor->data2 &
+ ts_sec = le32_to_cpu(descriptor->data1);
+ ts_nsec = (le32_to_cpu(descriptor->data2) &
RX_DESC_DATA2_TS_NS_MASK_);
lan743x_rx_reuse_ring_element(rx, extension_index);
real_last_index = extension_index;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 404af3f4635e..f3f778910fcc 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -661,7 +661,7 @@ struct lan743x_tx {
struct lan743x_tx_buffer_info *buffer_info;
- u32 *head_cpu_ptr;
+ __le32 *head_cpu_ptr;
dma_addr_t head_dma_ptr;
int last_head;
int last_tail;
@@ -691,7 +691,7 @@ struct lan743x_rx {
struct lan743x_rx_buffer_info *buffer_info;
- u32 *head_cpu_ptr;
+ __le32 *head_cpu_ptr;
dma_addr_t head_dma_ptr;
u32 last_head;
u32 last_tail;
@@ -775,10 +775,10 @@ struct lan743x_adapter {
#define TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_ (0x3FFF0000)
struct lan743x_tx_descriptor {
- u32 data0;
- u32 data1;
- u32 data2;
- u32 data3;
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
#define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
@@ -813,10 +813,10 @@ struct lan743x_tx_buffer_info {
#define RX_HEAD_PADDING NET_IP_ALIGN
struct lan743x_rx_descriptor {
- u32 data0;
- u32 data1;
- u32 data2;
- u32 data3;
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
#define RX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 58f94c3d80f9..346bba2730ad 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -6,7 +6,8 @@ mscc_ocelot_switch_lib-y := \
ocelot_police.o \
ocelot_vcap.o \
ocelot_flower.o \
- ocelot_ptp.o
+ ocelot_ptp.o \
+ ocelot_devlink.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
ocelot_vsc7514.o \
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0b9992bd6626..5b2c0cea49ea 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -60,14 +60,27 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type type)
{
+ u32 cmd = ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_DEST_IDX(port) |
+ ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
+ unsigned int mc_ports;
+
+ /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
+ if (type == ENTRYTYPE_MACv4)
+ mc_ports = (mac[1] << 8) | mac[2];
+ else if (type == ENTRYTYPE_MACv6)
+ mc_ports = (mac[0] << 8) | mac[1];
+ else
+ mc_ports = 0;
+
+ if (mc_ports & BIT(ocelot->num_phys_ports))
+ cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
- ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
- ANA_TABLES_MACACCESS_DEST_IDX(port) |
- ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
- ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
- ANA_TABLES_MACACCESS);
+ ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
return ocelot_mact_wait_for_completion(ocelot);
}
@@ -208,25 +221,20 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
- bool vlan_aware, struct switchdev_trans *trans)
+ bool vlan_aware)
{
+ struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_vcap_filter *filter;
u32 val;
- if (switchdev_trans_ph_prepare(trans)) {
- struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
- struct ocelot_vcap_filter *filter;
-
- list_for_each_entry(filter, &block->rules, list) {
- if (filter->ingress_port_mask & BIT(port) &&
- filter->action.vid_replace_ena) {
- dev_err(ocelot->dev,
- "Cannot change VLAN state with vlan modify rules active\n");
- return -EBUSY;
- }
+ list_for_each_entry(filter, &block->rules, list) {
+ if (filter->ingress_port_mask & BIT(port) &&
+ filter->action.vid_replace_ena) {
+ dev_err(ocelot->dev,
+ "Cannot change VLAN state with vlan modify rules active\n");
+ return -EBUSY;
}
-
- return 0;
}
ocelot_port->vlan_aware = vlan_aware;
@@ -1179,7 +1187,6 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct ocelot_vlan pvid = {0}, native_vlan = {0};
- struct switchdev_trans trans;
int ret;
ocelot->bridge_mask &= ~BIT(port);
@@ -1187,13 +1194,7 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
- trans.ph_prepare = true;
- ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
- if (ret)
- return ret;
-
- trans.ph_prepare = false;
- ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
+ ret = ocelot_port_vlan_filtering(ocelot, port, false);
if (ret)
return ret;
@@ -1366,7 +1367,7 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
pause_stop);
/* Tail dropping watermarks */
- atop_tot = (ocelot->shared_queue_sz - 9 * maxlen) /
+ atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) /
OCELOT_BUFFER_CELL_SZ;
atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
@@ -1479,6 +1480,21 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
ANA_PORT_VLAN_CFG, cpu);
}
+static void ocelot_detect_features(struct ocelot *ocelot)
+{
+ int mmgt, eq_ctrl;
+
+ /* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds
+ * the number of 240-byte free memory words (aka 4-cell chunks) and not
+ * 192 bytes as the documentation incorrectly says.
+ */
+ mmgt = ocelot_read(ocelot, SYS_MMGT);
+ ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt);
+
+ eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL);
+ ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
+}
+
int ocelot_init(struct ocelot *ocelot)
{
char queue_name[32];
@@ -1521,6 +1537,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
+ ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_vcap_init(ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 291d39d49c4e..e8621dbc14f7 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -121,13 +121,15 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
struct phy_device *phy);
-
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction);
+int ocelot_devlink_init(struct ocelot *ocelot);
+void ocelot_devlink_teardown(struct ocelot *ocelot);
+int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
+ enum devlink_port_flavour flavour);
+void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
+extern const struct devlink_ops ocelot_devlink_ops;
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
new file mode 100644
index 000000000000..edafbd37d12c
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright 2020-2021 NXP Semiconductors
+ */
+#include <net/devlink.h>
+#include "ocelot.h"
+
+/* The queue system tracks four resource consumptions:
+ * Resource 0: Memory tracked per source port
+ * Resource 1: Frame references tracked per source port
+ * Resource 2: Memory tracked per destination port
+ * Resource 3: Frame references tracked per destination port
+ */
+#define OCELOT_RESOURCE_SZ 256
+#define OCELOT_NUM_RESOURCES 4
+
+#define BUF_xxxx_I (0 * OCELOT_RESOURCE_SZ)
+#define REF_xxxx_I (1 * OCELOT_RESOURCE_SZ)
+#define BUF_xxxx_E (2 * OCELOT_RESOURCE_SZ)
+#define REF_xxxx_E (3 * OCELOT_RESOURCE_SZ)
+
+/* For each resource type there are 4 types of watermarks:
+ * Q_RSRV: reservation per QoS class per port
+ * PRIO_SHR: sharing watermark per QoS class across all ports
+ * P_RSRV: reservation per port
+ * COL_SHR: sharing watermark per color (drop precedence) across all ports
+ */
+#define xxx_Q_RSRV_x 0
+#define xxx_PRIO_SHR_x 216
+#define xxx_P_RSRV_x 224
+#define xxx_COL_SHR_x 254
+
+/* Reservation Watermarks
+ * ----------------------
+ *
+ * For setting up the reserved areas, egress watermarks exist per port and per
+ * QoS class for both ingress and egress.
+ */
+
+/* Amount of packet buffer
+ * | per QoS class
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * BUF_Q_RSRV_E
+ */
+#define BUF_Q_RSRV_E(port, prio) \
+ (BUF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of packet buffer
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * BUF_P_RSRV_E
+ */
+#define BUF_P_RSRV_E(port) \
+ (BUF_xxxx_E + xxx_P_RSRV_x + (port))
+
+/* Amount of packet buffer
+ * | per QoS class
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * BUF_Q_RSRV_I
+ */
+#define BUF_Q_RSRV_I(port, prio) \
+ (BUF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of packet buffer
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * BUF_P_RSRV_I
+ */
+#define BUF_P_RSRV_I(port) \
+ (BUF_xxxx_I + xxx_P_RSRV_x + (port))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * REF_Q_RSRV_E
+ */
+#define REF_Q_RSRV_E(port, prio) \
+ (REF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of frame references
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * REF_P_RSRV_E
+ */
+#define REF_P_RSRV_E(port) \
+ (REF_xxxx_E + xxx_P_RSRV_x + (port))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * REF_Q_RSRV_I
+ */
+#define REF_Q_RSRV_I(port, prio) \
+ (REF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of frame references
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * REF_P_RSRV_I
+ */
+#define REF_P_RSRV_I(port) \
+ (REF_xxxx_I + xxx_P_RSRV_x + (port))
+
+/* Sharing Watermarks
+ * ------------------
+ *
+ * The shared memory area is shared between all ports.
+ */
+
+/* Amount of buffer
+ * | per QoS class
+ * | | from the shared memory area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * BUF_PRIO_SHR_E
+ */
+#define BUF_PRIO_SHR_E(prio) \
+ (BUF_xxxx_E + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of buffer
+ * | per color (drop precedence level)
+ * | | from the shared memory area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * BUF_COL_SHR_E
+ */
+#define BUF_COL_SHR_E(dp) \
+ (BUF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of buffer
+ * | per QoS class
+ * | | from the shared memory area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * BUF_PRIO_SHR_I
+ */
+#define BUF_PRIO_SHR_I(prio) \
+ (BUF_xxxx_I + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of buffer
+ * | per color (drop precedence level)
+ * | | from the shared memory area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * BUF_COL_SHR_I
+ */
+#define BUF_COL_SHR_I(dp) \
+ (BUF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | from the shared area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * REF_PRIO_SHR_E
+ */
+#define REF_PRIO_SHR_E(prio) \
+ (REF_xxxx_E + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of frame references
+ * | per color (drop precedence level)
+ * | | from the shared area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * REF_COL_SHR_E
+ */
+#define REF_COL_SHR_E(dp) \
+ (REF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | from the shared area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * REF_PRIO_SHR_I
+ */
+#define REF_PRIO_SHR_I(prio) \
+ (REF_xxxx_I + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of frame references
+ * | per color (drop precedence level)
+ * | | from the shared area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * REF_COL_SHR_I
+ */
+#define REF_COL_SHR_I(dp) \
+ (REF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
+
+static u32 ocelot_wm_read(struct ocelot *ocelot, int index)
+{
+ int wm = ocelot_read_gix(ocelot, QSYS_RES_CFG, index);
+
+ return ocelot->ops->wm_dec(wm);
+}
+
+static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val)
+{
+ u32 wm = ocelot->ops->wm_enc(val);
+
+ ocelot_write_gix(ocelot, wm, QSYS_RES_CFG, index);
+}
+
+static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse,
+ u32 *maxuse)
+{
+ int res_stat = ocelot_read_gix(ocelot, QSYS_RES_STAT, index);
+
+ return ocelot->ops->wm_stat(res_stat, inuse, maxuse);
+}
+
+/* The hardware comes out of reset with strange defaults: the sum of all
+ * reservations for frame memory is larger than the total buffer size.
+ * One has to wonder how can the reservation watermarks still guarantee
+ * anything under congestion.
+ * Bring some sense into the hardware by changing the defaults to disable all
+ * reservations and rely only on the sharing watermark for frames with drop
+ * precedence 0. The user can still explicitly request reservations per port
+ * and per port-tc through devlink-sb.
+ */
+static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot,
+ int port)
+{
+ int prio;
+
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_write(ocelot, BUF_Q_RSRV_I(port, prio), 0);
+ ocelot_wm_write(ocelot, BUF_Q_RSRV_E(port, prio), 0);
+ ocelot_wm_write(ocelot, REF_Q_RSRV_I(port, prio), 0);
+ ocelot_wm_write(ocelot, REF_Q_RSRV_E(port, prio), 0);
+ }
+
+ ocelot_wm_write(ocelot, BUF_P_RSRV_I(port), 0);
+ ocelot_wm_write(ocelot, BUF_P_RSRV_E(port), 0);
+ ocelot_wm_write(ocelot, REF_P_RSRV_I(port), 0);
+ ocelot_wm_write(ocelot, REF_P_RSRV_E(port), 0);
+}
+
+/* We want the sharing watermarks to consume all nonreserved resources, for
+ * efficient resource utilization (a single traffic flow should be able to use
+ * up the entire buffer space and frame resources as long as there's no
+ * interference).
+ * The switch has 10 sharing watermarks per lookup: 8 per traffic class and 2
+ * per color (drop precedence).
+ * The trouble with configuring these sharing watermarks is that:
+ * (1) There's a risk that we overcommit the resources if we configure
+ * (a) all 8 per-TC sharing watermarks to the max
+ * (b) all 2 per-color sharing watermarks to the max
+ * (2) There's a risk that we undercommit the resources if we configure
+ * (a) all 8 per-TC sharing watermarks to "max / 8"
+ * (b) all 2 per-color sharing watermarks to "max / 2"
+ * So for Linux, let's just disable the sharing watermarks per traffic class
+ * (setting them to 0 will make them always exceeded), and rely only on the
+ * sharing watermark for drop priority 0. So frames with drop priority set to 1
+ * by QoS classification or policing will still be allowed, but only as long as
+ * the port and port-TC reservations are not exceeded.
+ */
+static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot)
+{
+ int prio;
+
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_write(ocelot, BUF_PRIO_SHR_I(prio), 0);
+ ocelot_wm_write(ocelot, BUF_PRIO_SHR_E(prio), 0);
+ ocelot_wm_write(ocelot, REF_PRIO_SHR_I(prio), 0);
+ ocelot_wm_write(ocelot, REF_PRIO_SHR_E(prio), 0);
+ }
+}
+
+static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i,
+ u32 *buf_rsrv_e)
+{
+ int port, prio;
+
+ *buf_rsrv_i = 0;
+ *buf_rsrv_e = 0;
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ *buf_rsrv_i += ocelot_wm_read(ocelot,
+ BUF_Q_RSRV_I(port, prio));
+ *buf_rsrv_e += ocelot_wm_read(ocelot,
+ BUF_Q_RSRV_E(port, prio));
+ }
+
+ *buf_rsrv_i += ocelot_wm_read(ocelot, BUF_P_RSRV_I(port));
+ *buf_rsrv_e += ocelot_wm_read(ocelot, BUF_P_RSRV_E(port));
+ }
+
+ *buf_rsrv_i *= OCELOT_BUFFER_CELL_SZ;
+ *buf_rsrv_e *= OCELOT_BUFFER_CELL_SZ;
+}
+
+static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i,
+ u32 *ref_rsrv_e)
+{
+ int port, prio;
+
+ *ref_rsrv_i = 0;
+ *ref_rsrv_e = 0;
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ *ref_rsrv_i += ocelot_wm_read(ocelot,
+ REF_Q_RSRV_I(port, prio));
+ *ref_rsrv_e += ocelot_wm_read(ocelot,
+ REF_Q_RSRV_E(port, prio));
+ }
+
+ *ref_rsrv_i += ocelot_wm_read(ocelot, REF_P_RSRV_I(port));
+ *ref_rsrv_e += ocelot_wm_read(ocelot, REF_P_RSRV_E(port));
+ }
+}
+
+/* Calculate all reservations, then set up the sharing watermark for DP=0 to
+ * consume the remaining resources up to the pool's configured size.
+ */
+static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot)
+{
+ u32 buf_rsrv_i, buf_rsrv_e;
+ u32 ref_rsrv_i, ref_rsrv_e;
+ u32 buf_shr_i, buf_shr_e;
+ u32 ref_shr_i, ref_shr_e;
+
+ ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
+ ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
+
+ buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] -
+ buf_rsrv_i;
+ buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] -
+ buf_rsrv_e;
+ ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] -
+ ref_rsrv_i;
+ ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] -
+ ref_rsrv_e;
+
+ buf_shr_i /= OCELOT_BUFFER_CELL_SZ;
+ buf_shr_e /= OCELOT_BUFFER_CELL_SZ;
+
+ ocelot_wm_write(ocelot, BUF_COL_SHR_I(0), buf_shr_i);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_E(0), buf_shr_e);
+ ocelot_wm_write(ocelot, REF_COL_SHR_E(0), ref_shr_e);
+ ocelot_wm_write(ocelot, REF_COL_SHR_I(0), ref_shr_i);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_I(1), 0);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_E(1), 0);
+ ocelot_wm_write(ocelot, REF_COL_SHR_E(1), 0);
+ ocelot_wm_write(ocelot, REF_COL_SHR_I(1), 0);
+}
+
+/* Ensure that all reservations can be enforced */
+static int ocelot_watermark_validate(struct ocelot *ocelot,
+ struct netlink_ext_ack *extack)
+{
+ u32 buf_rsrv_i, buf_rsrv_e;
+ u32 ref_rsrv_i, ref_rsrv_e;
+
+ ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
+ ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
+
+ if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress frame reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress frame reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress reference reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress reference reservations exceed pool size");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/* The hardware works like this:
+ *
+ * Frame forwarding decision taken
+ * |
+ * v
+ * +--------------------+--------------------+--------------------+
+ * | | | |
+ * v v v v
+ * Ingress memory Egress memory Ingress frame Egress frame
+ * check check reference check reference check
+ * | | | |
+ * v v v v
+ * BUF_Q_RSRV_I ok BUF_Q_RSRV_E ok REF_Q_RSRV_I ok REF_Q_RSRV_E ok
+ *(src port, prio) -+ (dst port, prio) -+ (src port, prio) -+ (dst port, prio) -+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_P_RSRV_I ok| BUF_P_RSRV_E ok| REF_P_RSRV_I ok| REF_P_RSRV_E ok|
+ * (src port) ----+ (dst port) ----+ (src port) ----+ (dst port) -----+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_PRIO_SHR_I ok| BUF_PRIO_SHR_E ok| REF_PRIO_SHR_I ok| REF_PRIO_SHR_E ok|
+ * (prio) ------+ (prio) ------+ (prio) ------+ (prio) -------+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_COL_SHR_I ok| BUF_COL_SHR_E ok| REF_COL_SHR_I ok| REF_COL_SHR_E ok|
+ * (dp) -------+ (dp) -------+ (dp) -------+ (dp) --------+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v v v v v v v v
+ * fail success fail success fail success fail success
+ * | | | | | | | |
+ * v v v v v v v v
+ * +-----+----+ +-----+----+ +-----+----+ +-----+-----+
+ * | | | |
+ * +-------> OR <-------+ +-------> OR <-------+
+ * | |
+ * v v
+ * +----------------> AND <-----------------+
+ * |
+ * v
+ * FIFO drop / accept
+ *
+ * We are modeling each of the 4 parallel lookups as a devlink-sb pool.
+ * At least one (ingress or egress) memory pool and one (ingress or egress)
+ * frame reference pool need to have resources for frame acceptance to succeed.
+ *
+ * The following watermarks are controlled explicitly through devlink-sb:
+ * BUF_Q_RSRV_I, BUF_Q_RSRV_E, REF_Q_RSRV_I, REF_Q_RSRV_E
+ * BUF_P_RSRV_I, BUF_P_RSRV_E, REF_P_RSRV_I, REF_P_RSRV_E
+ * The following watermarks are controlled implicitly through devlink-sb:
+ * BUF_COL_SHR_I, BUF_COL_SHR_E, REF_COL_SHR_I, REF_COL_SHR_E
+ * The following watermarks are unused and disabled:
+ * BUF_PRIO_SHR_I, BUF_PRIO_SHR_E, REF_PRIO_SHR_I, REF_PRIO_SHR_E
+ *
+ * This function overrides the hardware defaults with more sane ones (no
+ * reservations by default, let sharing use all resources) and disables the
+ * unused watermarks.
+ */
+static void ocelot_watermark_init(struct ocelot *ocelot)
+{
+ int all_tcs = GENMASK(OCELOT_NUM_TC - 1, 0);
+ int port;
+
+ ocelot_write(ocelot, all_tcs, QSYS_RES_QOS_MODE);
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++)
+ ocelot_disable_reservation_watermarks(ocelot, port);
+
+ ocelot_disable_tc_sharing_watermarks(ocelot);
+ ocelot_setup_sharing_watermarks(ocelot);
+}
+
+/* Pool size and type are fixed up at runtime. Keeping this structure to
+ * look up the cell size multipliers.
+ */
+static const struct devlink_sb_pool_info ocelot_sb_pool[] = {
+ [OCELOT_SB_BUF] = {
+ .cell_size = OCELOT_BUFFER_CELL_SZ,
+ .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+ },
+ [OCELOT_SB_REF] = {
+ .cell_size = 1,
+ .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+ },
+};
+
+/* Returns the pool size configured through ocelot_sb_pool_set */
+int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ if (sb_index >= OCELOT_SB_NUM)
+ return -ENODEV;
+ if (pool_index >= OCELOT_SB_POOL_NUM)
+ return -ENODEV;
+
+ *pool_info = ocelot_sb_pool[sb_index];
+ pool_info->size = ocelot->pool_size[sb_index][pool_index];
+ if (pool_index)
+ pool_info->pool_type = DEVLINK_SB_POOL_TYPE_INGRESS;
+ else
+ pool_info->pool_type = DEVLINK_SB_POOL_TYPE_EGRESS;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_pool_get);
+
+/* The pool size received here configures the total amount of resources used on
+ * ingress (or on egress, depending upon the pool index). The pool size, minus
+ * the values for the port and port-tc reservations, is written into the
+ * COL_SHR(dp=0) sharing watermark.
+ */
+int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ u32 old_pool_size;
+ int err;
+
+ if (sb_index >= OCELOT_SB_NUM) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid sb, use 0 for buffers and 1 for frame references");
+ return -ENODEV;
+ }
+ if (pool_index >= OCELOT_SB_POOL_NUM) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid pool, use 0 for ingress and 1 for egress");
+ return -ENODEV;
+ }
+ if (threshold_type != DEVLINK_SB_THRESHOLD_TYPE_STATIC) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only static threshold supported");
+ return -EOPNOTSUPP;
+ }
+
+ old_pool_size = ocelot->pool_size[sb_index][pool_index];
+ ocelot->pool_size[sb_index][pool_index] = size;
+
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot->pool_size[sb_index][pool_index] = old_pool_size;
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_pool_set);
+
+/* This retrieves the configuration made with ocelot_sb_port_pool_set */
+int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ *p_threshold = ocelot_wm_read(ocelot, wm_index);
+ *p_threshold *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_port_pool_get);
+
+/* This configures the P_RSRV per-port reserved resource watermark */
+int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack)
+{
+ int wm_index, err;
+ u32 old_thr;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
+ return -ENODEV;
+ }
+
+ threshold /= ocelot_sb_pool[sb_index].cell_size;
+
+ old_thr = ocelot_wm_read(ocelot, wm_index);
+ ocelot_wm_write(ocelot, wm_index, threshold);
+
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot_wm_write(ocelot, wm_index, old_thr);
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_port_pool_set);
+
+/* This retrieves the configuration done by ocelot_sb_tc_pool_bind_set */
+int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ *p_threshold = ocelot_wm_read(ocelot, wm_index);
+ *p_threshold *= ocelot_sb_pool[sb_index].cell_size;
+
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ *p_pool_index = 0;
+ else
+ *p_pool_index = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_get);
+
+/* This configures the Q_RSRV per-port-tc reserved resource watermark */
+int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ int wm_index, err;
+ u32 old_thr;
+
+ /* Paranoid check? */
+ if (pool_index == OCELOT_SB_POOL_ING &&
+ pool_type != DEVLINK_SB_POOL_TYPE_INGRESS)
+ return -EINVAL;
+ if (pool_index == OCELOT_SB_POOL_EGR &&
+ pool_type != DEVLINK_SB_POOL_TYPE_EGRESS)
+ return -EINVAL;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
+ return -ENODEV;
+ }
+
+ threshold /= ocelot_sb_pool[sb_index].cell_size;
+
+ old_thr = ocelot_wm_read(ocelot, wm_index);
+ ocelot_wm_write(ocelot, wm_index, threshold);
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot_wm_write(ocelot, wm_index, old_thr);
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_set);
+
+/* The hardware does not support atomic snapshots, we'll read out the
+ * occupancy registers individually and have this as just a stub.
+ */
+int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_snapshot);
+
+/* The watermark occupancy registers are cleared upon read,
+ * so let's read them.
+ */
+int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index)
+{
+ u32 inuse, maxuse;
+ int port, prio;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_status(ocelot, BUF_Q_RSRV_I(port, prio),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, BUF_Q_RSRV_E(port, prio),
+ &inuse, &maxuse);
+ }
+ ocelot_wm_status(ocelot, BUF_P_RSRV_I(port),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, BUF_P_RSRV_E(port),
+ &inuse, &maxuse);
+ }
+ break;
+ case OCELOT_SB_REF:
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_status(ocelot, REF_Q_RSRV_I(port, prio),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, REF_Q_RSRV_E(port, prio),
+ &inuse, &maxuse);
+ }
+ ocelot_wm_status(ocelot, REF_P_RSRV_I(port),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, REF_P_RSRV_E(port),
+ &inuse, &maxuse);
+ }
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_max_clear);
+
+/* This retrieves the watermark occupancy for per-port P_RSRV watermarks */
+int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
+ *p_cur *= ocelot_sb_pool[sb_index].cell_size;
+ *p_max *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_port_pool_get);
+
+/* This retrieves the watermark occupancy for per-port-tc Q_RSRV watermarks */
+int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
+ *p_cur *= ocelot_sb_pool[sb_index].cell_size;
+ *p_max *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_tc_port_bind_get);
+
+int ocelot_devlink_sb_register(struct ocelot *ocelot)
+{
+ int err;
+
+ err = devlink_sb_register(ocelot->devlink, OCELOT_SB_BUF,
+ ocelot->packet_buffer_size, 1, 1,
+ OCELOT_NUM_TC, OCELOT_NUM_TC);
+ if (err)
+ return err;
+
+ err = devlink_sb_register(ocelot->devlink, OCELOT_SB_REF,
+ ocelot->num_frame_refs, 1, 1,
+ OCELOT_NUM_TC, OCELOT_NUM_TC);
+ if (err) {
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
+ return err;
+ }
+
+ ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] = ocelot->packet_buffer_size;
+ ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] = ocelot->packet_buffer_size;
+ ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] = ocelot->num_frame_refs;
+ ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] = ocelot->num_frame_refs;
+
+ ocelot_watermark_init(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_devlink_sb_register);
+
+void ocelot_devlink_sb_unregister(struct ocelot *ocelot)
+{
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_REF);
+}
+EXPORT_SYMBOL(ocelot_devlink_sb_unregister);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 2bd2840d88bd..9553eb3e441c 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1,13 +1,190 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
*
+ * This contains glue logic between the switchdev driver operations and the
+ * mscc_ocelot_switch_lib.
+ *
* Copyright (c) 2017, 2019 Microsemi Corporation
+ * Copyright 2020-2021 NXP Semiconductors
*/
#include <linux/if_bridge.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
+static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
+{
+ return devlink_priv(dlp->devlink);
+}
+
+static int devlink_port_to_port(struct devlink_port *dlp)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+
+ return dlp - ocelot->devlink_ports;
+}
+
+static int ocelot_devlink_sb_pool_get(struct devlink *dl,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
+}
+
+static int ocelot_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
+ threshold_type, extack);
+}
+
+static int ocelot_devlink_sb_port_pool_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_threshold);
+}
+
+static int ocelot_devlink_sb_port_pool_set(struct devlink_port *dlp,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
+ threshold, extack);
+}
+
+static int
+ocelot_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_pool_index,
+ p_threshold);
+}
+
+static int
+ocelot_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
+ pool_type, pool_index, threshold,
+ extack);
+}
+
+static int ocelot_devlink_sb_occ_snapshot(struct devlink *dl,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_occ_snapshot(ocelot, sb_index);
+}
+
+static int ocelot_devlink_sb_occ_max_clear(struct devlink *dl,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_occ_max_clear(ocelot, sb_index);
+}
+
+static int ocelot_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
+ unsigned int sb_index,
+ u16 pool_index, u32 *p_cur,
+ u32 *p_max)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_cur, p_max);
+}
+
+static int
+ocelot_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index,
+ tc_index, pool_type,
+ p_cur, p_max);
+}
+
+const struct devlink_ops ocelot_devlink_ops = {
+ .sb_pool_get = ocelot_devlink_sb_pool_get,
+ .sb_pool_set = ocelot_devlink_sb_pool_set,
+ .sb_port_pool_get = ocelot_devlink_sb_port_pool_get,
+ .sb_port_pool_set = ocelot_devlink_sb_port_pool_set,
+ .sb_tc_pool_bind_get = ocelot_devlink_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = ocelot_devlink_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = ocelot_devlink_sb_occ_snapshot,
+ .sb_occ_max_clear = ocelot_devlink_sb_occ_max_clear,
+ .sb_occ_port_pool_get = ocelot_devlink_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = ocelot_devlink_sb_occ_tc_port_bind_get,
+};
+
+int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
+ enum devlink_port_flavour flavour)
+{
+ struct devlink_port *dlp = &ocelot->devlink_ports[port];
+ int id_len = sizeof(ocelot->base_mac);
+ struct devlink *dl = ocelot->devlink;
+ struct devlink_port_attrs attrs = {};
+
+ memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len);
+ attrs.switch_id.id_len = id_len;
+ attrs.phys.port_number = port;
+ attrs.flavour = flavour;
+
+ devlink_port_attrs_set(dlp, &attrs);
+
+ return devlink_port_register(dl, dlp, port);
+}
+
+void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port)
+{
+ struct devlink_port *dlp = &ocelot->devlink_ports[port];
+
+ devlink_port_unregister(dlp);
+}
+
+static struct devlink_port *ocelot_get_devlink_port(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return &ocelot->devlink_ports[port];
+}
+
int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
struct flow_cls_offload *f,
bool ingress)
@@ -457,7 +634,7 @@ static void ocelot_mact_work(struct work_struct *work)
break;
default:
break;
- };
+ }
kfree(w);
}
@@ -525,20 +702,6 @@ static void ocelot_set_rx_mode(struct net_device *dev)
__dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync);
}
-static int ocelot_port_get_phys_port_name(struct net_device *dev,
- char *buf, size_t len)
-{
- struct ocelot_port_private *priv = netdev_priv(dev);
- int port = priv->chip_port;
- int ret;
-
- ret = snprintf(buf, len, "p%d", port);
- if (ret >= len)
- return -EINVAL;
-
- return 0;
-}
-
static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -689,18 +852,6 @@ static int ocelot_set_features(struct net_device *dev,
return 0;
}
-static int ocelot_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
-
- ppid->id_len = sizeof(ocelot->base_mac);
- memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
-
- return 0;
-}
-
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -727,7 +878,6 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_stop = ocelot_port_stop,
.ndo_start_xmit = ocelot_port_xmit,
.ndo_set_rx_mode = ocelot_set_rx_mode,
- .ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
.ndo_fdb_add = ocelot_port_fdb_add,
@@ -736,9 +886,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
- .ndo_get_port_parent_id = ocelot_get_port_parent_id,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_do_ioctl = ocelot_ioctl,
+ .ndo_get_devlink_port = ocelot_get_devlink_port,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
@@ -825,12 +975,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
};
static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
- struct switchdev_trans *trans,
u8 state)
{
- if (switchdev_trans_ph_prepare(trans))
- return;
-
ocelot_bridge_stp_state_set(ocelot, port, state);
}
@@ -858,8 +1004,7 @@ static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
}
static int ocelot_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
@@ -868,15 +1013,13 @@ static int ocelot_port_attr_set(struct net_device *dev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ocelot_port_attr_stp_state_set(ocelot, port, trans,
- attr->u.stp_state);
+ ocelot_port_attr_stp_state_set(ocelot, port, attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- ocelot_port_vlan_filtering(ocelot, port,
- attr->u.vlan_filtering, trans);
+ ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
@@ -890,56 +1033,27 @@ static int ocelot_port_attr_set(struct net_device *dev,
}
static int ocelot_port_obj_add_vlan(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
-{
- int ret;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
-
- if (switchdev_trans_ph_prepare(trans))
- ret = ocelot_vlan_vid_prepare(dev, vid, pvid,
- untagged);
- else
- ret = ocelot_vlan_vid_add(dev, vid, pvid, untagged);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int ocelot_port_vlan_del_vlan(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan)
+ const struct switchdev_obj_port_vlan *vlan)
{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
int ret;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ret = ocelot_vlan_vid_del(dev, vid);
- if (ret)
- return ret;
- }
+ ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged);
+ if (ret)
+ return ret;
- return 0;
+ return ocelot_vlan_vid_add(dev, vlan->vid, pvid, untagged);
}
static int ocelot_port_obj_add_mdb(struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return ocelot_port_mdb_add(ocelot, port, mdb);
}
@@ -956,7 +1070,6 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
static int ocelot_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
int ret = 0;
@@ -964,12 +1077,10 @@ static int ocelot_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
ret = ocelot_port_obj_add_vlan(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
default:
return -EOPNOTSUPP;
@@ -985,8 +1096,8 @@ static int ocelot_port_obj_del(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- ret = ocelot_port_vlan_del_vlan(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj));
+ ret = ocelot_vlan_vid_del(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
@@ -1042,10 +1153,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
- if (!ocelot_netdevice_dev_check(dev))
- return 0;
-
if (event == NETDEV_PRECHANGEUPPER &&
+ ocelot_netdevice_dev_check(dev) &&
netif_is_lag_master(info->upper_dev)) {
struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
struct netlink_ext_ack *extack;
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 9cf2bc5f4289..30a38df08a21 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -517,7 +517,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
ocelot->map = ocelot_regmap;
ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
- ocelot->shared_queue_sz = 224 * 1024;
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
@@ -764,9 +763,25 @@ static u16 ocelot_wm_enc(u16 value)
return value;
}
+static u16 ocelot_wm_dec(u16 wm)
+{
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+
+static void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
+ .wm_dec = ocelot_wm_dec,
+ .wm_stat = ocelot_wm_stat,
.port_to_netdev = ocelot_port_to_netdev,
.netdev_to_port = ocelot_netdev_to_port,
};
@@ -1036,6 +1051,14 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.enable = ocelot_ptp_enable,
};
+static void mscc_ocelot_teardown_devlink_ports(struct ocelot *ocelot)
+{
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_port_devlink_teardown(ocelot, port);
+}
+
static void mscc_ocelot_release_ports(struct ocelot *ocelot)
{
int port;
@@ -1063,28 +1086,44 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
struct device_node *portnp;
- int err;
+ bool *registered_ports;
+ int port, err;
+ u32 reg;
ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
if (!ocelot->ports)
return -ENOMEM;
+ ocelot->devlink_ports = devm_kcalloc(ocelot->dev,
+ ocelot->num_phys_ports,
+ sizeof(*ocelot->devlink_ports),
+ GFP_KERNEL);
+ if (!ocelot->devlink_ports)
+ return -ENOMEM;
+
+ registered_ports = kcalloc(ocelot->num_phys_ports, sizeof(bool),
+ GFP_KERNEL);
+ if (!registered_ports)
+ return -ENOMEM;
+
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
struct device_node *phy_node;
+ struct devlink_port *dlp;
phy_interface_t phy_mode;
struct phy_device *phy;
struct regmap *target;
struct resource *res;
struct phy *serdes;
char res_name[8];
- u32 port;
- if (of_property_read_u32(portnp, "reg", &port))
+ if (of_property_read_u32(portnp, "reg", &reg))
continue;
+ port = reg;
+
snprintf(res_name, sizeof(res_name), "port%d", port);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -1102,15 +1141,26 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
if (!phy)
continue;
+ err = ocelot_port_devlink_init(ocelot, port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL);
+ if (err) {
+ of_node_put(portnp);
+ goto out_teardown;
+ }
+
err = ocelot_probe_port(ocelot, port, target, phy);
if (err) {
of_node_put(portnp);
- return err;
+ goto out_teardown;
}
+ registered_ports[port] = true;
+
ocelot_port = ocelot->ports[port];
priv = container_of(ocelot_port, struct ocelot_port_private,
port);
+ dlp = &ocelot->devlink_ports[port];
+ devlink_port_type_eth_set(dlp, priv->dev);
of_get_phy_mode(portnp, &phy_mode);
@@ -1135,7 +1185,8 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
"invalid phy mode for port%d, (Q)SGMII only\n",
port);
of_node_put(portnp);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_teardown;
}
serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
@@ -1149,13 +1200,46 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
port);
of_node_put(portnp);
- return err;
+ goto out_teardown;
}
priv->serdes = serdes;
}
+ /* Initialize unused devlink ports at the end */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (registered_ports[port])
+ continue;
+
+ err = ocelot_port_devlink_init(ocelot, port,
+ DEVLINK_PORT_FLAVOUR_UNUSED);
+ if (err) {
+ while (port-- >= 0) {
+ if (!registered_ports[port])
+ continue;
+ ocelot_port_devlink_teardown(ocelot, port);
+ }
+
+ goto out_teardown;
+ }
+ }
+
+ kfree(registered_ports);
+
return 0;
+
+out_teardown:
+ /* Unregister the network interfaces */
+ mscc_ocelot_release_ports(ocelot);
+ /* Tear down devlink ports for the registered network interfaces */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (!registered_ports[port])
+ continue;
+
+ ocelot_port_devlink_teardown(ocelot, port);
+ }
+ kfree(registered_ports);
+ return err;
}
static int mscc_ocelot_probe(struct platform_device *pdev)
@@ -1163,6 +1247,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int err, irq_xtr, irq_ptp_rdy;
struct device_node *ports;
+ struct devlink *devlink;
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
@@ -1186,10 +1271,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (!np && !pdev->dev.platform_data)
return -ENODEV;
- ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL);
- if (!ocelot)
+ devlink = devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot));
+ if (!devlink)
return -ENOMEM;
+ ocelot = devlink_priv(devlink);
+ ocelot->devlink = priv_to_devlink(ocelot);
platform_set_drvdata(pdev, ocelot);
ocelot->dev = &pdev->dev;
@@ -1206,7 +1293,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[io_target[i].id] = NULL;
continue;
}
- return PTR_ERR(target);
+ err = PTR_ERR(target);
+ goto out_free_devlink;
}
ocelot->targets[io_target[i].id] = target;
@@ -1215,24 +1303,25 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
if (IS_ERR(hsio)) {
dev_err(&pdev->dev, "missing hsio syscon\n");
- return PTR_ERR(hsio);
+ err = PTR_ERR(hsio);
+ goto out_free_devlink;
}
ocelot->targets[HSIO] = hsio;
err = ocelot_chip_init(ocelot, &ocelot_ops);
if (err)
- return err;
+ goto out_free_devlink;
irq_xtr = platform_get_irq_byname(pdev, "xtr");
if (irq_xtr < 0)
- return -ENODEV;
+ goto out_free_devlink;
err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
ocelot_xtr_irq_handler, IRQF_ONESHOT,
"frame extraction", ocelot);
if (err)
- return err;
+ goto out_free_devlink;
irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
@@ -1241,7 +1330,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
IRQF_ONESHOT, "ptp ready",
ocelot);
if (err)
- return err;
+ goto out_free_devlink;
/* Both the PTP interrupt and the PTP bank are available */
ocelot->ptp = 1;
@@ -1250,7 +1339,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
dev_err(ocelot->dev, "no ethernet-ports child node found\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_free_devlink;
}
ocelot->num_phys_ports = of_get_child_count(ports);
@@ -1265,10 +1355,18 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_put_ports;
- err = mscc_ocelot_init_ports(pdev, ports);
+ err = devlink_register(devlink, ocelot->dev);
if (err)
goto out_ocelot_deinit;
+ err = mscc_ocelot_init_ports(pdev, ports);
+ if (err)
+ goto out_ocelot_devlink_unregister;
+
+ err = ocelot_devlink_sb_register(ocelot);
+ if (err)
+ goto out_ocelot_release_ports;
+
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
if (err) {
@@ -1288,10 +1386,17 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
return 0;
+out_ocelot_release_ports:
+ mscc_ocelot_release_ports(ocelot);
+ mscc_ocelot_teardown_devlink_ports(ocelot);
+out_ocelot_devlink_unregister:
+ devlink_unregister(devlink);
out_ocelot_deinit:
ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
+out_free_devlink:
+ devlink_free(devlink);
return err;
}
@@ -1300,11 +1405,15 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
struct ocelot *ocelot = platform_get_drvdata(pdev);
ocelot_deinit_timestamp(ocelot);
+ ocelot_devlink_sb_unregister(ocelot);
mscc_ocelot_release_ports(ocelot);
+ mscc_ocelot_teardown_devlink_ports(ocelot);
+ devlink_unregister(ocelot->devlink);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
+ devlink_free(ocelot->devlink);
return 0;
}
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 776b7d264dc3..2289e1fe3741 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
- goto out;
+ goto undo_probe;
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(dev);
@@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
err = register_netdev(ndev);
if (err)
- goto out;
+ goto undo_probe;
nubus_set_drvdata(board, ndev);
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(ndev);
return err;
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index afa166ff7aef..28d9e98db81a 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
sonic_msg_init(dev);
if ((err = register_netdev(dev)))
- goto out1;
+ goto undo_probe1;
return 0;
-out1:
+undo_probe1:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 0a721f6e8676..e31f8fbbc696 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -3109,13 +3109,19 @@ mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
return 0;
}
-static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ if (meta->insn.imm != BPF_ADD)
+ return -EOPNOTSUPP;
+
return mem_xadd(nfp_prog, meta, false);
}
-static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ if (meta->insn.imm != BPF_ADD)
+ return -EOPNOTSUPP;
+
return mem_xadd(nfp_prog, meta, true);
}
@@ -3475,8 +3481,8 @@ static const instr_cb_t instr_cb[256] = {
[BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
[BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
[BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
- [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4,
- [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
+ [BPF_STX | BPF_ATOMIC | BPF_W] = mem_atomic4,
+ [BPF_STX | BPF_ATOMIC | BPF_DW] = mem_atomic8,
[BPF_ST | BPF_MEM | BPF_B] = mem_st1,
[BPF_ST | BPF_MEM | BPF_H] = mem_st2,
[BPF_ST | BPF_MEM | BPF_W] = mem_st4,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index fac9c6f9e197..d0e17eebddd9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -428,9 +428,9 @@ static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
}
-static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta)
{
- return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
+ return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC);
}
static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index e92ee510fd52..9d235c0ce46a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -479,7 +479,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
pr_vlog(env, "map writes not supported\n");
return -EOPNOTSUPP;
}
- if (is_mbpf_xadd(meta)) {
+ if (is_mbpf_atomic(meta)) {
err = nfp_bpf_map_mark_used(env, meta, reg,
NFP_MAP_USE_ATOMIC_CNT);
if (err)
@@ -523,12 +523,17 @@ exit_check_ptr:
}
static int
-nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- struct bpf_verifier_env *env)
+nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env)
{
const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
+ if (meta->insn.imm != BPF_ADD) {
+ pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm);
+ return -EOPNOTSUPP;
+ }
+
if (dreg->type != PTR_TO_MAP_VALUE) {
pr_vlog(env, "atomic add not to a map value pointer: %d\n",
dreg->type);
@@ -655,8 +660,8 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
if (is_mbpf_store(meta))
return nfp_bpf_check_store(nfp_prog, meta, env);
- if (is_mbpf_xadd(meta))
- return nfp_bpf_check_xadd(nfp_prog, meta, env);
+ if (is_mbpf_atomic(meta))
+ return nfp_bpf_check_atomic(nfp_prog, meta, env);
if (is_mbpf_alu(meta))
return nfp_bpf_check_alu(nfp_prog, meta, env);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index f21fb573ea3e..eeb30680b4dc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1822,8 +1822,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
- xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
- xdp.rxq = &rx_ring->xdp_rxq;
+ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+ &rx_ring->xdp_rxq);
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
@@ -1914,10 +1914,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
unsigned int dma_off;
int act;
- xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
- xdp.data = orig_data;
- xdp.data_meta = orig_data;
- xdp.data_end = orig_data + pkt_len;
+ xdp_prepare_buff(&xdp,
+ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+ pkt_off - NFP_NET_RX_BUF_HEADROOM,
+ pkt_len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -3656,8 +3656,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = nfp_net_xdp,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 9156c9825a16..162a1ff1e9d2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -337,7 +337,7 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int i, j;
unsigned int len;
- len = netdev->mtu + ETH_HLEN;
+ len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
@@ -979,7 +979,7 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
stats->vlan_inserted++;
}
- if (skb->csum_not_inet)
+ if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
else
stats->csum++;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 4366c7a8de95..6b5ddb07ee83 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -78,6 +78,7 @@ config QED
depends on PCI
select ZLIB_INFLATE
select CRC8
+ select CRC32
select NET_DEVLINK
help
This enables the support for Marvell FastLinQ adapters family.
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 5e9f8ee99800..2fcbcecb41d1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -113,7 +113,8 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
return NX_RCODE_INVALID_ARGS;
}
- addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, size,
+ &md_template_addr, GFP_KERNEL);
if (!addr) {
dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
return -ENOMEM;
@@ -133,7 +134,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
cmd.rsp.cmd, size, cmd.rsp.arg2);
}
- pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
+ dma_free_coherent(&adapter->pdev->dev, size, addr, md_template_addr);
return 0;
}
@@ -281,14 +282,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
rsp_size =
SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
- addr = pci_alloc_consistent(adapter->pdev,
- rq_size, &hostrq_phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
prq = addr;
- addr = pci_alloc_consistent(adapter->pdev,
- rsp_size, &cardrsp_phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &cardrsp_phys_addr, GFP_KERNEL);
if (addr == NULL) {
err = -ENOMEM;
goto out_free_rq;
@@ -387,9 +388,10 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
recv_ctx->virt_port = prsp->virt_port;
out_free_rsp:
- pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
+ cardrsp_phys_addr);
out_free_rq:
- pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
return err;
}
@@ -429,14 +431,14 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
struct netxen_cmd_args cmd;
rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
- rq_addr = pci_alloc_consistent(adapter->pdev,
- rq_size, &rq_phys_addr);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
- rsp_addr = pci_alloc_consistent(adapter->pdev,
- rsp_size, &rsp_phys_addr);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
@@ -491,10 +493,11 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
err = -EIO;
}
- pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
+ rsp_phys_addr);
out_free_rq:
- pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
return err;
}
@@ -745,9 +748,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
recv_ctx = &adapter->recv_ctx;
tx_ring = adapter->tx_ring;
- addr = pci_alloc_consistent(pdev,
- sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
- &recv_ctx->phys_addr);
+ addr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ &recv_ctx->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev, "failed to allocate hw context\n");
return -ENOMEM;
@@ -762,8 +765,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
(__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
/* cmd desc ring */
- addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
- &tx_ring->phys_addr);
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
@@ -776,9 +779,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
- addr = pci_alloc_consistent(adapter->pdev,
- RCV_DESC_RINGSIZE(rds_ring),
- &rds_ring->phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev,
"%s: failed to allocate rds ring [%d]\n",
@@ -797,9 +800,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- addr = pci_alloc_consistent(adapter->pdev,
- STATUS_DESC_RINGSIZE(sds_ring),
- &sds_ring->phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev,
"%s: failed to allocate sds ring [%d]\n",
@@ -874,19 +877,17 @@ done:
recv_ctx = &adapter->recv_ctx;
if (recv_ctx->hwctx != NULL) {
- pci_free_consistent(adapter->pdev,
- sizeof(struct netxen_ring_ctx) +
- sizeof(uint32_t),
- recv_ctx->hwctx,
- recv_ctx->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ recv_ctx->hwctx, recv_ctx->phys_addr);
recv_ctx->hwctx = NULL;
}
tx_ring = adapter->tx_ring;
if (tx_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- TX_DESC_RINGSIZE(tx_ring),
- tx_ring->desc_head, tx_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
tx_ring->desc_head = NULL;
}
@@ -894,10 +895,10 @@ done:
rds_ring = &recv_ctx->rds_rings[ring];
if (rds_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- RCV_DESC_RINGSIZE(rds_ring),
- rds_ring->desc_head,
- rds_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
rds_ring->desc_head = NULL;
}
}
@@ -906,10 +907,10 @@ done:
sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- STATUS_DESC_RINGSIZE(sds_ring),
- sds_ring->desc_head,
- sds_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
sds_ring->desc_head = NULL;
}
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 94546ed5f867..08f9477d2ee8 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -102,10 +102,8 @@ void netxen_release_rx_buffers(struct netxen_adapter *adapter)
rx_buf = &(rds_ring->rx_buf_arr[i]);
if (rx_buf->state == NETXEN_BUFFER_FREE)
continue;
- pci_unmap_single(adapter->pdev,
- rx_buf->dma,
- rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pdev->dev, rx_buf->dma,
+ rds_ring->dma_size, DMA_FROM_DEVICE);
if (rx_buf->skb != NULL)
dev_kfree_skb_any(rx_buf->skb);
}
@@ -124,16 +122,16 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
if (buffrag->dma) {
- pci_unmap_single(adapter->pdev, buffrag->dma,
- buffrag->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffrag->dma,
+ buffrag->length, DMA_TO_DEVICE);
buffrag->dma = 0ULL;
}
for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
- pci_unmap_page(adapter->pdev, buffrag->dma,
- buffrag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev,
+ buffrag->dma, buffrag->length,
+ DMA_TO_DEVICE);
buffrag->dma = 0ULL;
}
}
@@ -1250,9 +1248,10 @@ int netxen_init_dummy_dma(struct netxen_adapter *adapter)
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
- adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev,
- NETXEN_HOST_DUMMY_DMA_SIZE,
- &adapter->dummy_dma.phys_addr);
+ adapter->dummy_dma.addr = dma_alloc_coherent(&adapter->pdev->dev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ &adapter->dummy_dma.phys_addr,
+ GFP_KERNEL);
if (adapter->dummy_dma.addr == NULL) {
dev_err(&adapter->pdev->dev,
"ERROR: Could not allocate dummy DMA memory\n");
@@ -1304,10 +1303,10 @@ void netxen_free_dummy_dma(struct netxen_adapter *adapter)
}
if (i) {
- pci_free_consistent(adapter->pdev,
- NETXEN_HOST_DUMMY_DMA_SIZE,
- adapter->dummy_dma.addr,
- adapter->dummy_dma.phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ adapter->dummy_dma.addr,
+ adapter->dummy_dma.phys_addr);
adapter->dummy_dma.addr = NULL;
} else
dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
@@ -1467,10 +1466,10 @@ netxen_alloc_rx_skb(struct netxen_adapter *adapter,
if (!adapter->ahw.cut_through)
skb_reserve(skb, 2);
- dma = pci_map_single(pdev, skb->data,
- rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+ dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(pdev, dma)) {
+ if (dma_mapping_error(&pdev->dev, dma)) {
dev_kfree_skb_any(skb);
buffer->skb = NULL;
return 1;
@@ -1491,8 +1490,8 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
buffer = &rds_ring->rx_buf_arr[index];
- pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer->dma, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
skb = buffer->skb;
if (!skb)
@@ -1754,13 +1753,13 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
buffer = &tx_ring->cmd_buf_arr[sw_consumer];
if (buffer->skb) {
frag = &buffer->frag_array[0];
- pci_unmap_single(pdev, frag->dma, frag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, frag->dma, frag->length,
+ DMA_TO_DEVICE);
frag->dma = 0ULL;
for (i = 1; i < buffer->frag_count; i++) {
frag++; /* Get the next frag */
- pci_unmap_page(pdev, frag->dma, frag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, frag->dma,
+ frag->length, DMA_TO_DEVICE);
frag->dma = 0ULL;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f21847739ef1..7e6bac85495d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -243,8 +243,8 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter)
cmask = mask;
}
- if (pci_set_dma_mask(pdev, mask) == 0 &&
- pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+ if (dma_set_mask(&pdev->dev, mask) == 0 &&
+ dma_set_coherent_mask(&pdev->dev, cmask) == 0) {
adapter->pci_using_dac = 1;
return 0;
}
@@ -277,13 +277,13 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
mask = DMA_BIT_MASK(32+shift);
- err = pci_set_dma_mask(pdev, mask);
+ err = dma_set_mask(&pdev->dev, mask);
if (err)
goto err_out;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
- err = pci_set_consistent_dma_mask(pdev, mask);
+ err = dma_set_coherent_mask(&pdev->dev, mask);
if (err)
goto err_out;
}
@@ -293,8 +293,8 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
return 0;
err_out:
- pci_set_dma_mask(pdev, old_mask);
- pci_set_consistent_dma_mask(pdev, old_cmask);
+ dma_set_mask(&pdev->dev, old_mask);
+ dma_set_coherent_mask(&pdev->dev, old_cmask);
return err;
}
@@ -564,11 +564,6 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_set_features = netxen_set_features,
};
-static inline bool netxen_function_zero(struct pci_dev *pdev)
-{
- return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
-}
-
static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
u32 mode)
{
@@ -664,7 +659,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter)
netxen_initialize_interrupt_registers(adapter);
netxen_set_msix_bit(pdev, 0);
- if (netxen_function_zero(pdev)) {
+ if (adapter->portnum == 0) {
if (!netxen_setup_msi_interrupts(adapter, num_msix))
netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
else
@@ -1983,9 +1978,9 @@ netxen_map_tx_skb(struct pci_dev *pdev,
nr_frags = skb_shinfo(skb)->nr_frags;
nf = &pbuf->frag_array[0];
- map = pci_map_single(pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, map))
+ map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
goto out_err;
nf->dma = map;
@@ -2009,12 +2004,12 @@ netxen_map_tx_skb(struct pci_dev *pdev,
unwind:
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
- pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE);
nf->dma = 0ULL;
}
nf = &pbuf->frag_array[0];
- pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE);
nf->dma = 0ULL;
out_err:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index a2494bf85007..70c8d3cd85c0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1090,12 +1090,9 @@ static bool qede_rx_xdp(struct qede_dev *edev,
struct xdp_buff xdp;
enum xdp_action act;
- xdp.data_hard_start = page_address(bd->data);
- xdp.data = xdp.data_hard_start + *data_offset;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.rxq = &rxq->xdp_rxq;
- xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
+ xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
+ *len, false);
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
@@ -1799,6 +1796,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
ntohs(udp_hdr(skb)->dest) != gnv_port))
return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK);
+ } else if (l4_proto == IPPROTO_IPIP) {
+ /* IPIP tunnels are unknown to the device or at least unsupported natively,
+ * offloads for them can't be done trivially, so disable them for such skb.
+ */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9cf960a6d007..4bf94797aac5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -663,8 +663,6 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_get_vf_config = qede_get_vf_config,
.ndo_set_vf_rate = qede_set_vf_rate,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
.ndo_bpf = qede_xdp,
#ifdef CONFIG_RFS_ACCEL
@@ -688,8 +686,6 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
};
@@ -707,8 +703,6 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
.ndo_bpf = qede_xdp,
.ndo_xdp_xmit = qede_xdp_transmit,
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 27740c027681..214e347097a7 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -315,12 +315,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
* buffer
*/
skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
+ map = dma_map_single(&qdev->pdev->dev,
lrg_buf_cb->skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -1802,13 +1801,12 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
* first buffer
*/
skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
+ map = dma_map_single(&qdev->pdev->dev,
lrg_buf_cb->skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -1943,18 +1941,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
goto invalid_seg_count;
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[0], mapaddr),
- dma_unmap_len(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE);
tx_cb->seg_count--;
if (tx_cb->seg_count) {
for (i = 1; i < tx_cb->seg_count; i++) {
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[i],
- mapaddr),
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[i], mapaddr),
dma_unmap_len(&tx_cb->map[i], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
}
qdev->ndev->stats.tx_packets++;
@@ -2021,10 +2017,9 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
qdev->ndev->stats.rx_bytes += length;
skb_put(skb, length);
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb2, mapaddr),
- dma_unmap_len(lrg_buf_cb2, maplen),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
prefetch(skb->data);
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, qdev->ndev);
@@ -2067,10 +2062,9 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb2 = lrg_buf_cb2->skb;
skb_put(skb2, length); /* Just the second buffer length here. */
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb2, mapaddr),
- dma_unmap_len(lrg_buf_cb2, maplen),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
prefetch(skb2->data);
skb_checksum_none_assert(skb2);
@@ -2319,9 +2313,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
/*
* Map the skb buffer first.
*/
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
err);
@@ -2357,11 +2351,11 @@ static int ql_send_map(struct ql3_adapter *qdev,
(seg == 7 && seg_cnt > 8) ||
(seg == 12 && seg_cnt > 13) ||
(seg == 17 && seg_cnt > 18)) {
- map = pci_map_single(qdev->pdev, oal,
+ map = dma_map_single(&qdev->pdev->dev, oal,
sizeof(struct oal),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
@@ -2423,24 +2417,24 @@ map_error:
(seg == 7 && seg_cnt > 8) ||
(seg == 12 && seg_cnt > 13) ||
(seg == 17 && seg_cnt > 18)) {
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[seg], mapaddr),
- dma_unmap_len(&tx_cb->map[seg], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ DMA_TO_DEVICE);
oal++;
seg++;
}
- pci_unmap_page(qdev->pdev,
+ dma_unmap_page(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[seg], mapaddr),
dma_unmap_len(&tx_cb->map[seg], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[0], mapaddr),
dma_unmap_addr(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return NETDEV_TX_BUSY;
@@ -2525,9 +2519,8 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
wmb();
qdev->req_q_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- (size_t) qdev->req_q_size,
- &qdev->req_q_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ &qdev->req_q_phy_addr, GFP_KERNEL);
if ((qdev->req_q_virt_addr == NULL) ||
LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
@@ -2536,16 +2529,14 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
}
qdev->rsp_q_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- (size_t) qdev->rsp_q_size,
- &qdev->rsp_q_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size,
+ &qdev->rsp_q_phy_addr, GFP_KERNEL);
if ((qdev->rsp_q_virt_addr == NULL) ||
LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
netdev_err(qdev->ndev, "rspQ allocation failed\n");
- pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
- qdev->req_q_virt_addr,
- qdev->req_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
return -ENOMEM;
}
@@ -2561,15 +2552,13 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
return;
}
- pci_free_consistent(qdev->pdev,
- qdev->req_q_size,
- qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
qdev->req_q_virt_addr = NULL;
- pci_free_consistent(qdev->pdev,
- qdev->rsp_q_size,
- qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size,
+ qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
qdev->rsp_q_virt_addr = NULL;
@@ -2593,9 +2582,9 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
return -ENOMEM;
qdev->lrg_buf_q_alloc_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->lrg_buf_q_alloc_size,
- &qdev->lrg_buf_q_alloc_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL);
if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "lBufQ failed\n");
@@ -2613,15 +2602,16 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
qdev->small_buf_q_alloc_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->small_buf_q_alloc_size,
- &qdev->small_buf_q_alloc_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_q_alloc_size,
+ &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL);
if (qdev->small_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
- pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
- qdev->lrg_buf_q_alloc_virt_addr,
- qdev->lrg_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
return -ENOMEM;
}
@@ -2638,17 +2628,15 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
return;
}
kfree(qdev->lrg_buf);
- pci_free_consistent(qdev->pdev,
- qdev->lrg_buf_q_alloc_size,
- qdev->lrg_buf_q_alloc_virt_addr,
- qdev->lrg_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
qdev->lrg_buf_q_virt_addr = NULL;
- pci_free_consistent(qdev->pdev,
- qdev->small_buf_q_alloc_size,
- qdev->small_buf_q_alloc_virt_addr,
- qdev->small_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size,
+ qdev->small_buf_q_alloc_virt_addr,
+ qdev->small_buf_q_alloc_phy_addr);
qdev->small_buf_q_virt_addr = NULL;
@@ -2666,9 +2654,9 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
QL_SMALL_BUFFER_SIZE);
qdev->small_buf_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->small_buf_total_size,
- &qdev->small_buf_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ &qdev->small_buf_phy_addr, GFP_KERNEL);
if (qdev->small_buf_virt_addr == NULL) {
netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
@@ -2701,10 +2689,10 @@ static void ql_free_small_buffers(struct ql3_adapter *qdev)
return;
}
if (qdev->small_buf_virt_addr != NULL) {
- pci_free_consistent(qdev->pdev,
- qdev->small_buf_total_size,
- qdev->small_buf_virt_addr,
- qdev->small_buf_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ qdev->small_buf_virt_addr,
+ qdev->small_buf_phy_addr);
qdev->small_buf_virt_addr = NULL;
}
@@ -2719,10 +2707,10 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
lrg_buf_cb = &qdev->lrg_buf[i];
if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb);
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb, mapaddr),
dma_unmap_len(lrg_buf_cb, maplen),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
} else {
break;
@@ -2774,13 +2762,11 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
* buffer
*/
skb_reserve(skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
- skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data,
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -2865,8 +2851,8 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
* Network Completion Queue Producer Index Register
*/
qdev->shadow_reg_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->shadow_reg_phy_addr, GFP_KERNEL);
if (qdev->shadow_reg_virt_addr != NULL) {
qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
@@ -2921,10 +2907,9 @@ err_small_buffers:
err_buffer_queues:
ql_free_net_req_rsp_queues(qdev);
err_req_rsp:
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->shadow_reg_virt_addr,
- qdev->shadow_reg_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
return -ENOMEM;
}
@@ -2937,10 +2922,9 @@ static void ql_free_mem_resources(struct ql3_adapter *qdev)
ql_free_buffer_queues(qdev);
ql_free_net_req_rsp_queues(qdev);
if (qdev->shadow_reg_virt_addr != NULL) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->shadow_reg_virt_addr,
- qdev->shadow_reg_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
qdev->shadow_reg_virt_addr = NULL;
}
}
@@ -3641,18 +3625,15 @@ static void ql_reset_work(struct work_struct *work)
if (tx_cb->skb) {
netdev_printk(KERN_DEBUG, ndev,
"Freeing lost SKB\n");
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[0],
- mapaddr),
- dma_unmap_len(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
+ DMA_TO_DEVICE);
for (j = 1; j < tx_cb->seg_count; j++) {
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[j],
- mapaddr),
- dma_unmap_len(&tx_cb->map[j],
- maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[j], mapaddr),
+ dma_unmap_len(&tx_cb->map[j], maplen),
+ DMA_TO_DEVICE);
}
dev_kfree_skb(tx_cb->skb);
tx_cb->skb = NULL;
@@ -3784,13 +3765,10 @@ static int ql3xxx_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ else if (!(err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))))
pci_using_dac = 0;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
if (err) {
pr_err("%s no usable DMA configuration\n", pci_name(pdev));
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c2faf96fcade..96b947fde646 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -520,8 +520,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
.ndo_get_phys_port_id = qlcnic_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qlcnic_features_check,
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 46d8510b2fe2..475e6f01ea10 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -28,6 +28,7 @@
#include <linux/bitfield.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
+#include <asm/unaligned.h>
#include <net/ip6_checksum.h>
#include "r8169.h"
@@ -260,6 +261,9 @@ enum rtl8168_8101_registers {
#define CSIAR_BYTE_ENABLE 0x0000f000
#define CSIAR_ADDR_MASK 0x00000fff
PMCH = 0x6f,
+#define D3COLD_NO_PLL_DOWN BIT(7)
+#define D3HOT_NO_PLL_DOWN BIT(6)
+#define D3_NO_PLL_DOWN (BIT(7) | BIT(6))
EPHYAR = 0x80,
#define EPHYAR_FLAG 0x80000000
#define EPHYAR_WRITE_CMD 0x80000000
@@ -529,6 +533,9 @@ enum rtl_rx_desc_bit {
IPFail = (1 << 16), /* IP checksum failed */
UDPFail = (1 << 15), /* UDP/IP checksum failed */
TCPFail = (1 << 14), /* TCP/IP checksum failed */
+
+#define RxCSFailMask (IPFail | UDPFail | TCPFail)
+
RxVlanTag = (1 << 16), /* VLAN tag available */
};
@@ -584,6 +591,12 @@ enum rtl_flag {
RTL_FLAG_MAX
};
+enum rtl_dash_type {
+ RTL_DASH_NONE,
+ RTL_DASH_DP,
+ RTL_DASH_EP,
+};
+
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev;
@@ -591,6 +604,7 @@ struct rtl8169_private {
struct phy_device *phydev;
struct napi_struct napi;
enum mac_version mac_version;
+ enum rtl_dash_type dash_type;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_tx;
@@ -746,14 +760,75 @@ static const struct rtl_cond name = { \
\
static bool name ## _check(struct rtl8169_private *tp)
-static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
+static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
- if (reg & 0xffff0001) {
- if (net_ratelimit())
- netdev_err(tp->dev, "Invalid ocp reg %x!\n", reg);
- return true;
- }
- return false;
+ /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
+ *cmd |= 0x7f0 << 18;
+}
+
+DECLARE_RTL_COND(rtl_eriar_cond)
+{
+ return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
+}
+
+static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val, int type)
+{
+ u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
+
+ if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask))
+ return;
+
+ RTL_W32(tp, ERIDR, val);
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
+
+ rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
+}
+
+static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val)
+{
+ _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
+}
+
+static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
+{
+ u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
+
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
+
+ return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ RTL_R32(tp, ERIDR) : ~0;
+}
+
+static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
+{
+ return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
+}
+
+static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
+{
+ u32 val = rtl_eri_read(tp, addr);
+
+ rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
+}
+
+static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
+{
+ rtl_w0w1_eri(tp, addr, p, 0);
+}
+
+static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
+{
+ rtl_w0w1_eri(tp, addr, 0, m);
+}
+
+static bool rtl_ocp_reg_failure(u32 reg)
+{
+ return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg);
}
DECLARE_RTL_COND(rtl_ocp_gphy_cond)
@@ -763,7 +838,7 @@ DECLARE_RTL_COND(rtl_ocp_gphy_cond)
static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return;
RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
@@ -773,7 +848,7 @@ static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return 0;
RTL_W32(tp, GPHY_OCP, reg << 15);
@@ -784,7 +859,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return;
RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
@@ -792,7 +867,7 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return 0;
RTL_W32(tp, OCPDR, reg << 15);
@@ -808,6 +883,25 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
}
+/* Work around a hw issue with RTL8168g PHY, the quirk disables
+ * PHY MCU interrupts before PHY power-down.
+ */
+static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_49:
+ if (value & BMCR_RESET || !(value & BMCR_PDOWN))
+ rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
+ else
+ rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
+ break;
+ default:
+ break;
+ }
+};
+
static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
if (reg == 0x1f) {
@@ -818,6 +912,9 @@ static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
if (tp->ocp_base != OCP_STD_PHY_BASE)
reg -= 0x10;
+ if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR)
+ rtl8168g_phy_suspend_quirk(tp, value);
+
r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
}
@@ -1009,70 +1106,6 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
-static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
-{
- /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
- if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
- *cmd |= 0x7f0 << 18;
-}
-
-DECLARE_RTL_COND(rtl_eriar_cond)
-{
- return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
-}
-
-static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
- u32 val, int type)
-{
- u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
-
- BUG_ON((addr & 3) || (mask == 0));
- RTL_W32(tp, ERIDR, val);
- r8168fp_adjust_ocp_cmd(tp, &cmd, type);
- RTL_W32(tp, ERIAR, cmd);
-
- rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
-}
-
-static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
- u32 val)
-{
- _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
-}
-
-static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
-{
- u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
-
- r8168fp_adjust_ocp_cmd(tp, &cmd, type);
- RTL_W32(tp, ERIAR, cmd);
-
- return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
- RTL_R32(tp, ERIDR) : ~0;
-}
-
-static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
-{
- return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
-}
-
-static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
-{
- u32 val = rtl_eri_read(tp, addr);
-
- rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
-}
-
-static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
-{
- rtl_w0w1_eri(tp, addr, p, 0);
-}
-
-static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
-{
- rtl_w0w1_eri(tp, addr, 0, m);
-}
-
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg)
{
RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff));
@@ -1158,19 +1191,10 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
+ if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_start(tp);
- break;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
+ else
rtl8168ep_driver_start(tp);
- break;
- default:
- BUG();
- break;
- }
}
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
@@ -1189,44 +1213,51 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
+ if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_stop(tp);
- break;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
+ else
rtl8168ep_driver_stop(tp);
- break;
- default:
- BUG();
- break;
- }
}
static bool r8168dp_check_dash(struct rtl8169_private *tp)
{
u16 reg = rtl8168_get_ocp_reg(tp);
- return !!(r8168dp_ocp_read(tp, reg) & 0x00008000);
+ return r8168dp_ocp_read(tp, reg) & BIT(15);
}
static bool r8168ep_check_dash(struct rtl8169_private *tp)
{
- return r8168ep_ocp_read(tp, 0x128) & 0x00000001;
+ return r8168ep_ocp_read(tp, 0x128) & BIT(0);
}
-static bool r8168_check_dash(struct rtl8169_private *tp)
+static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
- return r8168dp_check_dash(tp);
+ return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
- return r8168ep_check_dash(tp);
+ return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
default:
- return false;
+ return RTL_DASH_NONE;
+ }
+}
+
+static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ if (enable)
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
+ else
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN);
+ break;
+ default:
+ break;
}
}
@@ -1396,6 +1427,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+ rtl_set_d3_pll_down(tp, !wolopts);
tp->dev->wol_enabled = wolopts ? 1 : 0;
}
@@ -1962,7 +1994,11 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
/* 8168DP family. */
- { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ /* It seems this early RTL8168dp version never made it to
+ * the wild. Let's see whether somebody complains, if not
+ * we'll remove support for this chip version completely.
+ * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ */
{ 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
{ 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
@@ -2081,18 +2117,12 @@ static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
}
-static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
+static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr)
{
- const u16 w[] = {
- addr[0] | (addr[1] << 8),
- addr[2] | (addr[3] << 8),
- addr[4] | (addr[5] << 8)
- };
-
- rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16));
- rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]);
- rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16);
- rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
+ rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr));
+ rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4));
+ rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16);
+ rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2));
}
u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
@@ -2142,14 +2172,14 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
genphy_soft_reset(tp->phydev);
}
-static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
+static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr)
{
rtl_unlock_config_regs(tp);
- RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
+ RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4));
rtl_pci_commit(tp);
- RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
+ RTL_W32(tp, MAC0, get_unaligned_le32(addr));
rtl_pci_commit(tp);
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
@@ -2172,28 +2202,16 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+static void rtl_wol_enable_rx(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_29:
- case RTL_GIGA_MAC_VER_30:
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_63:
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
- break;
- default:
- break;
- }
}
-static void rtl_pll_power_down(struct rtl8169_private *tp)
+static void rtl_prepare_power_down(struct rtl8169_private *tp)
{
- if (r8168_check_dash(tp))
+ if (tp->dash_type != RTL_DASH_NONE)
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
@@ -2202,62 +2220,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
if (device_may_wakeup(tp_to_dev(tp))) {
phy_speed_down(tp->phydev, false);
- rtl_wol_suspend_quirk(tp);
- return;
+ rtl_wol_enable_rx(tp);
}
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
- break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
- rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
- break;
- default:
- break;
- }
-}
-
-static void rtl_pll_power_up(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
- break;
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
- break;
- default:
- break;
- }
-
- phy_resume(tp->phydev);
}
static void rtl_init_rxcfg(struct rtl8169_private *tp)
@@ -2338,13 +2302,14 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
static void rtl_jumbo_config(struct rtl8169_private *tp)
{
bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
+ int readrq = 4096;
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ readrq = 512;
r8168b_1_hw_jumbo_enable(tp);
} else {
r8168b_1_hw_jumbo_disable(tp);
@@ -2352,7 +2317,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ readrq = 512;
r8168c_hw_jumbo_enable(tp);
} else {
r8168c_hw_jumbo_disable(tp);
@@ -2365,20 +2330,18 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
r8168dp_hw_jumbo_disable(tp);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ if (jumbo)
r8168e_hw_jumbo_enable(tp);
- } else {
+ else
r8168e_hw_jumbo_disable(tp);
- }
break;
default:
break;
}
rtl_lock_config_regs(tp);
- if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
- pcie_set_readrq(tp->pci_dev, 4096);
+ if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+ pcie_set_readrq(tp->pci_dev, readrq);
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -4406,10 +4369,9 @@ static inline int rtl8169_fragmented_frame(u32 status)
static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
{
- u32 status = opts1 & RxProtoMask;
+ u32 status = opts1 & (RxProtoMask | RxCSFailMask);
- if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)))
+ if (status == RxProtoTCP || status == RxProtoUDP)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -4616,12 +4578,12 @@ static void rtl8169_down(struct rtl8169_private *tp)
rtl8169_cleanup(tp, true);
- rtl_pll_power_down(tp);
+ rtl_prepare_power_down(tp);
}
static void rtl8169_up(struct rtl8169_private *tp)
{
- rtl_pll_power_up(tp);
+ phy_resume(tp->phydev);
rtl8169_init_phy(tp);
napi_enable(&tp->napi);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
@@ -4888,12 +4850,10 @@ static void rtl_shutdown(struct pci_dev *pdev)
rtl_rar_set(tp, tp->dev->perm_addr);
if (system_state == SYSTEM_POWER_OFF) {
- if (tp->saved_wolopts) {
- rtl_wol_suspend_quirk(tp);
+ if (tp->saved_wolopts)
rtl_wol_shutdown_quirk(tp);
- }
- pci_wake_from_d3(pdev, true);
+ pci_wake_from_d3(pdev, tp->saved_wolopts);
pci_set_power_state(pdev, PCI_D3hot);
}
}
@@ -4907,7 +4867,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
unregister_netdev(tp->dev);
- if (r8168_check_dash(tp))
+ if (tp->dash_type != RTL_DASH_NONE)
rtl8168_driver_stop(tp);
rtl_release_firmware(tp);
@@ -4975,16 +4935,12 @@ static void rtl_read_mac_address(struct rtl8169_private *tp,
{
/* Get MAC address */
if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) {
- u32 value = rtl_eri_read(tp, 0xe0);
-
- mac_addr[0] = (value >> 0) & 0xff;
- mac_addr[1] = (value >> 8) & 0xff;
- mac_addr[2] = (value >> 16) & 0xff;
- mac_addr[3] = (value >> 24) & 0xff;
+ u32 value;
+ value = rtl_eri_read(tp, 0xe0);
+ put_unaligned_le32(value, mac_addr);
value = rtl_eri_read(tp, 0xe4);
- mac_addr[4] = (value >> 0) & 0xff;
- mac_addr[5] = (value >> 8) & 0xff;
+ put_unaligned_le16(value, mac_addr + 4);
} else if (rtl_is_8125(tp)) {
rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP);
}
@@ -5265,12 +5221,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
if (chipset == RTL_GIGA_MAC_NONE) {
- dev_err(&pdev->dev, "unknown chip XID %03x\n", xid);
+ dev_err(&pdev->dev, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid);
return -ENODEV;
}
tp->mac_version = chipset;
+ tp->dash_type = rtl_check_dash(tp);
+
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
@@ -5340,6 +5298,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure chip for default features */
rtl8169_set_features(dev, dev->features);
+ rtl_set_d3_pll_down(tp, true);
+
jumbo_max = rtl_jumbo_max(tp);
if (jumbo_max)
dev->max_mtu = jumbo_max;
@@ -5360,9 +5320,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* chip gets powered up in rtl_open() */
- rtl_pll_power_down(tp);
-
rc = register_netdev(dev);
if (rc)
return rc;
@@ -5376,7 +5333,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
"ok" : "ko");
- if (r8168_check_dash(tp)) {
+ if (tp->dash_type != RTL_DASH_NONE) {
netdev_info(dev, "DASH enabled\n");
rtl8168_driver_start(tp);
}
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 7453b17a37a2..cb47e68c1a3e 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -165,7 +165,7 @@ enum ravb_reg {
GTO2 = 0x03A8,
GIC = 0x03AC,
GIS = 0x03B0,
- GCPT = 0x03B4, /* Undocumented? */
+ GCPT = 0x03B4, /* Documented for R-Car Gen3 only */
GCT0 = 0x03B8,
GCT1 = 0x03BC,
GCT2 = 0x03C0,
@@ -225,7 +225,7 @@ enum CSR_BIT {
CSR_OPS_RESET = 0x00000001,
CSR_OPS_CONFIG = 0x00000002,
CSR_OPS_OPERATION = 0x00000004,
- CSR_OPS_STANDBY = 0x00000008, /* Undocumented? */
+ CSR_OPS_STANDBY = 0x00000008, /* Documented for R-Car Gen3 only */
CSR_DTS = 0x00000100,
CSR_TPO0 = 0x00010000,
CSR_TPO1 = 0x00020000,
@@ -241,13 +241,12 @@ enum ESR_BIT {
ESR_EIL = 0x00001000,
};
-/* APSR */
+/* APSR (R-Car Gen3 only) */
enum APSR_BIT {
- APSR_MEMS = 0x00000002,
- APSR_CMSW = 0x00000010,
- APSR_DM = 0x00006000, /* Undocumented? */
- APSR_DM_RDM = 0x00002000,
- APSR_DM_TDM = 0x00004000,
+ APSR_MEMS = 0x00000002, /* Undocumented */
+ APSR_CMSW = 0x00000010,
+ APSR_RDM = 0x00002000,
+ APSR_TDM = 0x00004000,
};
/* RCR */
@@ -530,16 +529,16 @@ enum RIS2_BIT {
/* TIC */
enum TIC_BIT {
- TIC_FTE0 = 0x00000001, /* Undocumented? */
- TIC_FTE1 = 0x00000002, /* Undocumented? */
+ TIC_FTE0 = 0x00000001, /* Documented for R-Car Gen3 only */
+ TIC_FTE1 = 0x00000002, /* Documented for R-Car Gen3 only */
TIC_TFUE = 0x00000100,
TIC_TFWE = 0x00000200,
};
/* TIS */
enum TIS_BIT {
- TIS_FTF0 = 0x00000001, /* Undocumented? */
- TIS_FTF1 = 0x00000002, /* Undocumented? */
+ TIS_FTF0 = 0x00000001, /* Documented for R-Car Gen3 only */
+ TIS_FTF1 = 0x00000002, /* Documented for R-Car Gen3 only */
TIS_TFUF = 0x00000100,
TIS_TFWF = 0x00000200,
TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
@@ -547,8 +546,8 @@ enum TIS_BIT {
/* ISS */
enum ISS_BIT {
- ISS_FRS = 0x00000001, /* Undocumented? */
- ISS_FTS = 0x00000004, /* Undocumented? */
+ ISS_FRS = 0x00000001, /* Documented for R-Car Gen3 only */
+ ISS_FTS = 0x00000004, /* Documented for R-Car Gen3 only */
ISS_ES = 0x00000040,
ISS_MS = 0x00000080,
ISS_TFUS = 0x00000100,
@@ -608,13 +607,13 @@ enum GTI_BIT {
/* GIC */
enum GIC_BIT {
- GIC_PTCE = 0x00000001, /* Undocumented? */
+ GIC_PTCE = 0x00000001, /* Documented for R-Car Gen3 only */
GIC_PTME = 0x00000004,
};
/* GIS */
enum GIS_BIT {
- GIS_PTCF = 0x00000001, /* Undocumented? */
+ GIS_PTCF = 0x00000001, /* Documented for R-Car Gen3 only */
GIS_PTMF = 0x00000004,
GIS_RESERVED = GENMASK(15, 10),
};
@@ -808,10 +807,10 @@ enum ECMR_BIT {
ECMR_TE = 0x00000020,
ECMR_RE = 0x00000040,
ECMR_MPDE = 0x00000200,
- ECMR_TXF = 0x00010000, /* Undocumented? */
+ ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */
ECMR_RXF = 0x00020000,
ECMR_PFR = 0x00040000,
- ECMR_ZPF = 0x00080000, /* Undocumented? */
+ ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 only */
ECMR_RZPF = 0x00100000,
ECMR_DPAD = 0x00200000,
ECMR_RCSC = 0x00800000,
@@ -830,7 +829,7 @@ enum ECSR_BIT {
enum ECSIPR_BIT {
ECSIPR_ICDIP = 0x00000001,
ECSIPR_MPDIP = 0x00000002,
- ECSIPR_LCHNGIP = 0x00000004, /* Undocumented? */
+ ECSIPR_LCHNGIP = 0x00000004,
};
/* PIR */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index bd30505fbc57..eb0c03bdb12d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2034,10 +2034,10 @@ static void ravb_set_delay_mode(struct net_device *ndev)
u32 set = 0;
if (priv->rxcidm)
- set |= APSR_DM_RDM;
+ set |= APSR_RDM;
if (priv->txcidm)
- set |= APSR_DM_TDM;
- ravb_modify(ndev, APSR, APSR_DM, set);
+ set |= APSR_TDM;
+ ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
}
static int ravb_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c63304632935..590b088bc4c7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2606,10 +2606,10 @@ static int sh_eth_close(struct net_device *ndev)
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev);
- pm_runtime_put_sync(&mdp->pdev->dev);
-
mdp->is_opened = 0;
+ pm_runtime_put(&mdp->pdev->dev);
+
return 0;
}
@@ -3034,6 +3034,28 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
return 0;
}
+static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_read(bus, phy, reg);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
+static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_write(bus, phy, reg, val);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
/* MDIO bus init function */
static int sh_mdio_init(struct sh_eth_private *mdp,
struct sh_eth_plat_data *pd)
@@ -3058,6 +3080,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
if (!mdp->mii_bus)
return -ENOMEM;
+ /* Wrap accessors with Runtime PM-aware ops */
+ mdp->mii_bus->read = sh_mdiobb_read;
+ mdp->mii_bus->write = sh_mdiobb_write;
+
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = dev;
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 6fad25321dc5..315a6e5c0f59 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -103,15 +103,13 @@ struct rocker_world_ops {
int (*port_attr_stp_state_set)(struct rocker_port *rocker_port,
u8 state);
int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans);
+ unsigned long brport_flags);
int (*port_attr_bridge_flags_support_get)(const struct rocker_port *
rocker_port,
unsigned long *
p_brport_flags);
int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans);
+ u32 ageing_time);
int (*port_obj_vlan_add)(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan);
int (*port_obj_vlan_del)(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index dd0bc7f0aaee..740a715c49c6 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1550,17 +1550,13 @@ static void rocker_world_port_stop(struct rocker_port *rocker_port)
}
static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
- u8 state,
- struct switchdev_trans *trans)
+ u8 state)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_stp_state_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return wops->port_attr_stp_state_set(rocker_port, state);
}
@@ -1580,8 +1576,7 @@ rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
static int
rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ unsigned long brport_flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
unsigned long brport_flags_s;
@@ -1603,52 +1598,37 @@ rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
static int
rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ unsigned long brport_flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_bridge_flags_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
- trans);
+ return wops->port_attr_bridge_flags_set(rocker_port, brport_flags);
}
static int
rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans)
-
+ u32 ageing_time)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_bridge_ageing_time_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
- trans);
+ return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time);
}
static int
rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_obj_vlan_add)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return wops->port_obj_vlan_add(rocker_port, vlan);
}
@@ -2066,8 +2046,7 @@ static const struct net_device_ops rocker_port_netdev_ops = {
********************/
static int rocker_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
@@ -2075,23 +2054,19 @@ static int rocker_port_attr_set(struct net_device *dev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
err = rocker_world_port_attr_stp_state_set(rocker_port,
- attr->u.stp_state,
- trans);
+ attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port,
- attr->u.brport_flags,
- trans);
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = rocker_world_port_attr_bridge_flags_set(rocker_port,
- attr->u.brport_flags,
- trans);
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
- attr->u.ageing_time,
- trans);
+ attr->u.ageing_time);
break;
default:
err = -EOPNOTSUPP;
@@ -2102,8 +2077,7 @@ static int rocker_port_attr_set(struct net_device *dev,
}
static int rocker_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ const struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
@@ -2111,8 +2085,7 @@ static int rocker_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = rocker_world_port_obj_vlan_add(rocker_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -2725,8 +2698,7 @@ rocker_switchdev_port_attr_set_event(struct net_device *netdev,
{
int err;
- err = rocker_port_attr_set(netdev, port_attr_info->attr,
- port_attr_info->trans);
+ err = rocker_port_attr_set(netdev, port_attr_info->attr);
port_attr_info->handled = true;
return notifier_from_errno(err);
@@ -2847,8 +2819,7 @@ rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = rocker_port_obj_add(netdev, port_obj_info->obj,
- port_obj_info->trans);
+ err = rocker_port_obj_add(netdev, port_obj_info->obj);
break;
case SWITCHDEV_PORT_OBJ_DEL:
err = rocker_port_obj_del(netdev, port_obj_info->obj);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 7072b249c8bd..967a634ee9ac 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -923,7 +923,7 @@ static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
struct ofdpa_flow_tbl_entry *entry;
u32 priority;
bool vlan_bridging = !!vlan_id;
- bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
+ bool dflt = !eth_dst || eth_dst_mask;
bool wild = false;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
@@ -2488,8 +2488,7 @@ static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
}
static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ unsigned long brport_flags)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
unsigned long orig_flags;
@@ -2497,14 +2496,11 @@ static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
orig_flags = ofdpa_port->brport_flags;
ofdpa_port->brport_flags = brport_flags;
- if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
- !switchdev_trans_ph_prepare(trans))
+
+ if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING)
err = rocker_port_set_learning(ofdpa_port->rocker_port,
!!(ofdpa_port->brport_flags & BR_LEARNING));
- if (switchdev_trans_ph_prepare(trans))
- ofdpa_port->brport_flags = orig_flags;
-
return err;
}
@@ -2520,18 +2516,15 @@ ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
static int
ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans)
+ u32 ageing_time)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
- if (!switchdev_trans_ph_prepare(trans)) {
- ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
- if (ofdpa_port->ageing_time < ofdpa->ageing_time)
- ofdpa->ageing_time = ofdpa_port->ageing_time;
- mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
- }
+ ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
+ if (ofdpa_port->ageing_time < ofdpa->ageing_time)
+ ofdpa->ageing_time = ofdpa_port->ageing_time;
+ mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
return 0;
}
@@ -2540,32 +2533,16 @@ static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- u16 vid;
- int err;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
- if (err)
- return err;
- }
- return 0;
+ return ofdpa_port_vlan_add(ofdpa_port, vlan->vid, vlan->flags);
}
static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- u16 vid;
- int err;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
- if (err)
- return err;
- }
- return 0;
+ return ofdpa_port_vlan_del(ofdpa_port, vlan->vid, vlan->flags);
}
static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 718308076341..36c8625a6fd7 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -612,8 +612,6 @@ static const struct net_device_ops efx_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_xdp_xmit = efx_xdp_xmit,
.ndo_bpf = efx_xdp
};
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index a4a626e9cd9a..1bfeee283ea9 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -17,6 +17,7 @@
#include "rx_common.h"
#include "nic.h"
#include "sriov.h"
+#include "workarounds.h"
/* This is the first interrupt mode to try out of:
* 0 => MSI-X
@@ -137,6 +138,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
{
unsigned int n_channels = parallelism;
int vec_count;
+ int tx_per_ev;
int n_xdp_tx;
int n_xdp_ev;
@@ -149,9 +151,9 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* multiple tx queues, assuming tx and ev queues are both
* maximum size.
*/
-
+ tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
n_xdp_tx = num_possible_cpus();
- n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_MAX_TXQ_PER_CHANNEL);
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
vec_count = pci_msix_vec_count(efx->pci_dev);
if (vec_count < 0)
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index aaa112877561..89c5c75f479f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -293,14 +293,10 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
efx->rx_prefix_size);
- xdp.data = *ehp;
- xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
-
+ xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
/* No support yet for XDP metadata */
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + rx_buf->len;
- xdp.rxq = &rx_queue->xdp_rxq_info;
- xdp.frame_sz = efx->rx_page_buf_step;
+ xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
+ rx_buf->len, false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock();
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 742a1f7a838c..891b49281bc6 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2191,7 +2191,7 @@ static const struct of_device_id smc91x_match[] = {
MODULE_DEVICE_TABLE(of, smc91x_match);
/**
- * of_try_set_control_gpio - configure a gpio if it exists
+ * try_toggle_control_gpio - configure a gpio if it exists
* @dev: net device
* @desc: where to store the GPIO descriptor, if it exists
* @name: name of the GPIO in DT
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 823d9a7184fe..606c79de93a6 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -557,6 +557,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
unsigned int addr;
int i, reg;
+ pm_runtime_get_sync(bus->parent);
spin_lock_irqsave(&pdata->mac_lock, flags);
/* Confirm MII not busy */
@@ -582,6 +583,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
out:
spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ pm_runtime_put(bus->parent);
return reg;
}
@@ -594,6 +596,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
unsigned int addr;
int i, reg;
+ pm_runtime_get_sync(bus->parent);
spin_lock_irqsave(&pdata->mac_lock, flags);
/* Confirm MII not busy */
@@ -623,6 +626,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
out:
spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ pm_runtime_put(bus->parent);
return reg;
}
@@ -1589,6 +1593,8 @@ static int smsc911x_open(struct net_device *dev)
int retval;
int irq_flags;
+ pm_runtime_get_sync(dev->dev.parent);
+
/* find and start the given phy */
if (!dev->phydev) {
retval = smsc911x_mii_probe(dev);
@@ -1735,6 +1741,7 @@ mii_free_out:
phy_disconnect(dev->phydev);
dev->phydev = NULL;
out:
+ pm_runtime_put(dev->dev.parent);
return retval;
}
@@ -1766,6 +1773,7 @@ static int smsc911x_stop(struct net_device *dev)
dev->phydev = NULL;
}
netif_carrier_off(dev);
+ pm_runtime_put(dev->dev.parent);
SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
@@ -2334,7 +2342,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
free_netdev(dev);
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
@@ -2540,6 +2547,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
}
spin_unlock_irq(&pdata->mac_lock);
+ pm_runtime_put(&pdev->dev);
netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 19d20a6d0d44..3c53051bdacf 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -956,8 +956,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
u32 xdp_act = 0;
int done = 0;
- xdp.rxq = &dring->xdp_rxq;
- xdp.frame_sz = PAGE_SIZE;
+ xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq);
rcu_read_lock();
xdp_prog = READ_ONCE(priv->xdp_prog);
@@ -1016,10 +1015,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
dma_dir);
prefetch(desc->addr);
- xdp.data_hard_start = desc->addr;
- xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + pkt_len;
+ xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
+ pkt_len, false);
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index 82b1c7a5a7a9..ba0e4d2b256a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -129,7 +129,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set tx_clk\n");
- return ret;
+ goto err_remove_config_dt;
}
}
}
@@ -143,7 +143,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set clk_ptp_ref\n");
- return ret;
+ goto err_remove_config_dt;
}
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index a2e80c89de2d..1c9c67b641a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -375,6 +375,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
+ plat->addr64 = 32;
return ehl_common_data(pdev, plat);
}
@@ -406,6 +407,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 3;
+ plat->addr64 = 32;
return ehl_common_data(pdev, plat);
}
@@ -457,6 +459,21 @@ static struct stmmac_pci_info tgl_sgmii1g_info = {
.setup = tgl_sgmii_data,
};
+static int adls_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+
+ /* SerDes power up and power down are done in BIOS for ADL */
+
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adls_sgmii1g_info = {
+ .setup = adls_sgmii_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -721,7 +738,11 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
+#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0_ID 0x7aac
+#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1_ID 0x7aad
static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) },
@@ -735,6 +756,10 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0_ID, &adls_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1_ID, &adls_sgmii1g_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 459ae715b33d..848e5c37746b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -68,10 +68,21 @@
*/
#define PRG_ETH0_ADJ_SKEW GENMASK(24, 20)
+#define PRG_ETH1 0x4
+
+/* Defined for adding a delay to the input RX_CLK for better timing.
+ * Each step is 200ps. These bits are used with external RGMII PHYs
+ * because RGMII RX only has the small window. cfg_rxclk_dly can
+ * adjust the window between RX_CLK and RX_DATA and improve the stability
+ * of "rx data valid".
+ */
+#define PRG_ETH1_CFG_RXCLK_DLY GENMASK(19, 16)
+
struct meson8b_dwmac;
struct meson8b_dwmac_data {
int (*set_phy_mode)(struct meson8b_dwmac *dwmac);
+ bool has_prg_eth1_rgmii_rx_delay;
};
struct meson8b_dwmac {
@@ -82,7 +93,7 @@ struct meson8b_dwmac {
phy_interface_t phy_mode;
struct clk *rgmii_tx_clk;
u32 tx_delay_ns;
- u32 rx_delay_ns;
+ u32 rx_delay_ps;
struct clk *timing_adj_clk;
};
@@ -135,7 +146,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
struct device *dev = dwmac->dev;
static const struct clk_parent_data mux_parents[] = {
{ .fw_name = "clkin0", },
- { .fw_name = "clkin1", },
+ { .index = -1, },
};
static const struct clk_div_table div_table[] = {
{ .div = 2, .val = 2, },
@@ -268,32 +279,37 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
return 0;
}
-static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac)
{
- u32 tx_dly_config, rx_dly_config, delay_config;
+ u32 tx_dly_config, rx_adj_config, cfg_rxclk_dly, delay_config;
int ret;
+ rx_adj_config = 0;
+ cfg_rxclk_dly = 0;
tx_dly_config = FIELD_PREP(PRG_ETH0_TXDLY_MASK,
dwmac->tx_delay_ns >> 1);
- if (dwmac->rx_delay_ns == 2)
- rx_dly_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
- else
- rx_dly_config = 0;
+ if (dwmac->data->has_prg_eth1_rgmii_rx_delay)
+ cfg_rxclk_dly = FIELD_PREP(PRG_ETH1_CFG_RXCLK_DLY,
+ dwmac->rx_delay_ps / 200);
+ else if (dwmac->rx_delay_ps == 2000)
+ rx_adj_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
switch (dwmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
- delay_config = tx_dly_config | rx_dly_config;
+ delay_config = tx_dly_config | rx_adj_config;
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
delay_config = tx_dly_config;
+ cfg_rxclk_dly = 0;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
- delay_config = rx_dly_config;
+ delay_config = rx_adj_config;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RMII:
delay_config = 0;
+ cfg_rxclk_dly = 0;
break;
default:
dev_err(dwmac->dev, "unsupported phy-mode %s\n",
@@ -301,7 +317,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return -EINVAL;
}
- if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
+ if (delay_config & PRG_ETH0_ADJ_ENABLE) {
if (!dwmac->timing_adj_clk) {
dev_err(dwmac->dev,
"The timing-adjustment clock is mandatory for the RX delay re-timing\n");
@@ -323,6 +339,16 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
PRG_ETH0_ADJ_DELAY | PRG_ETH0_ADJ_SKEW,
delay_config);
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH1, PRG_ETH1_CFG_RXCLK_DLY,
+ cfg_rxclk_dly);
+
+ return 0;
+}
+
+static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+{
+ int ret;
+
if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) {
/* only relevant for RMII mode -> disable in RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
@@ -406,16 +432,30 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
- /* use 0ns as fallback since this is what most boards actually use */
- if (of_property_read_u32(pdev->dev.of_node, "amlogic,rx-delay-ns",
- &dwmac->rx_delay_ns))
- dwmac->rx_delay_ns = 0;
+ /* RX delay defaults to 0ps since this is what many boards use */
+ if (of_property_read_u32(pdev->dev.of_node, "rx-internal-delay-ps",
+ &dwmac->rx_delay_ps)) {
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "amlogic,rx-delay-ns",
+ &dwmac->rx_delay_ps))
+ /* convert ns to ps */
+ dwmac->rx_delay_ps *= 1000;
+ }
- if (dwmac->rx_delay_ns != 0 && dwmac->rx_delay_ns != 2) {
- dev_err(&pdev->dev,
- "The only allowed RX delays values are: 0ns, 2ns");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ if (dwmac->data->has_prg_eth1_rgmii_rx_delay) {
+ if (dwmac->rx_delay_ps > 3000 || dwmac->rx_delay_ps % 200) {
+ dev_err(dwmac->dev,
+ "The RGMII RX delay range is 0..3000ps in 200ps steps");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+ } else {
+ if (dwmac->rx_delay_ps != 0 && dwmac->rx_delay_ps != 2000) {
+ dev_err(dwmac->dev,
+ "The only allowed RGMII RX delays values are: 0ps, 2000ps");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
}
dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
@@ -425,6 +465,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ ret = meson8b_init_rgmii_delays(dwmac);
+ if (ret)
+ goto err_remove_config_dt;
+
ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
@@ -453,10 +497,17 @@ err_remove_config_dt:
static const struct meson8b_dwmac_data meson8b_dwmac_data = {
.set_phy_mode = meson8b_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = false,
};
static const struct meson8b_dwmac_data meson_axg_dwmac_data = {
.set_phy_mode = meson_axg_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = false,
+};
+
+static const struct meson8b_dwmac_data meson_g12a_dwmac_data = {
+ .set_phy_mode = meson_axg_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = true,
};
static const struct of_device_id meson8b_dwmac_match[] = {
@@ -478,7 +529,7 @@ static const struct of_device_id meson8b_dwmac_match[] = {
},
{
.compatible = "amlogic,meson-g12a-dwmac",
- .data = &meson_axg_dwmac_data,
+ .data = &meson_g12a_dwmac_data,
},
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 58e0511badba..a5e0eff4a387 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -64,6 +64,7 @@ struct emac_variant {
* @variant: reference to the current board variant
* @regmap: regmap for using the syscon
* @internal_phy_powered: Does the internal PHY is enabled
+ * @use_internal_phy: Is the internal PHY selected for use
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
@@ -74,6 +75,7 @@ struct sunxi_priv_data {
const struct emac_variant *variant;
struct regmap_field *regmap_field;
bool internal_phy_powered;
+ bool use_internal_phy;
void *mux_handle;
};
@@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.dma_interrupt = sun8i_dwmac_dma_interrupt,
};
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
+
static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct sunxi_priv_data *gmac = priv;
int ret;
@@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
ret = clk_prepare_enable(gmac->tx_clk);
if (ret) {
- if (gmac->regulator)
- regulator_disable(gmac->regulator);
dev_err(&pdev->dev, "Could not enable AHB clock\n");
- return ret;
+ goto err_disable_regulator;
+ }
+
+ if (gmac->use_internal_phy) {
+ ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
+ if (ret)
+ goto err_disable_clk;
}
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(gmac->tx_clk);
+err_disable_regulator:
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+
+ return ret;
}
static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@@ -831,7 +848,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
u32 reg, val;
int ret = 0;
- bool need_power_ephy = false;
if (current_child ^ desired_child) {
regmap_field_read(gmac->regmap_field, &reg);
@@ -839,13 +855,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
-
- need_power_ephy = true;
+ gmac->use_internal_phy = true;
break;
case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
dev_info(priv->device, "Switch mux to external PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
- need_power_ephy = false;
+ gmac->use_internal_phy = false;
break;
default:
dev_err(priv->device, "Invalid child ID %x\n",
@@ -853,7 +868,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
return -EINVAL;
}
regmap_field_write(gmac->regmap_field, val);
- if (need_power_ephy) {
+ if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(priv);
if (ret)
return ret;
@@ -883,22 +898,23 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
return ret;
}
-static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+static int sun8i_dwmac_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
{
- struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- struct device_node *node = priv->device->of_node;
+ struct sunxi_priv_data *gmac = plat->bsp_priv;
+ struct device_node *node = dev->of_node;
int ret;
u32 reg, val;
ret = regmap_field_read(gmac->regmap_field, &val);
if (ret) {
- dev_err(priv->device, "Fail to read from regmap field.\n");
+ dev_err(dev, "Fail to read from regmap field.\n");
return ret;
}
reg = gmac->variant->default_syscon_value;
if (reg != val)
- dev_warn(priv->device,
+ dev_warn(dev,
"Current syscon value is not the default %x (expect %x)\n",
val, reg);
@@ -911,9 +927,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
- ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
+ ret = of_mdio_parse_addr(dev, plat->phy_node);
if (ret < 0) {
- dev_err(priv->device, "Could not parse MDIO addr\n");
+ dev_err(dev, "Could not parse MDIO addr\n");
return ret;
}
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
@@ -929,17 +945,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "tx-delay must be a multiple of 100\n");
+ dev_err(dev, "tx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set tx-delay to %x\n", val);
+ dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
reg &= ~(gmac->variant->tx_delay_max <<
SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid TX clock delay: %d\n",
+ dev_err(dev, "Invalid TX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -947,17 +963,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "rx-delay must be a multiple of 100\n");
+ dev_err(dev, "rx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set rx-delay to %x\n", val);
+ dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
reg &= ~(gmac->variant->rx_delay_max <<
SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid RX clock delay: %d\n",
+ dev_err(dev, "Invalid RX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -968,7 +984,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
- switch (priv->plat->interface) {
+ switch (plat->interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -982,8 +998,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
break;
default:
- dev_err(priv->device, "Unsupported interface mode: %s",
- phy_modes(priv->plat->interface));
+ dev_err(dev, "Unsupported interface mode: %s",
+ phy_modes(plat->interface));
return -EINVAL;
}
@@ -1004,17 +1020,10 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
struct sunxi_priv_data *gmac = priv;
if (gmac->variant->soc_has_internal_phy) {
- /* sun8i_dwmac_exit could be called with mdiomux uninit */
- if (gmac->mux_handle)
- mdio_mux_uninit(gmac->mux_handle);
if (gmac->internal_phy_powered)
sun8i_dwmac_unpower_internal_phy(gmac);
}
- sun8i_dwmac_unset_syscon(gmac);
-
- reset_control_put(gmac->rst_ephy);
-
clk_disable_unprepare(gmac->tx_clk);
if (gmac->regulator)
@@ -1049,16 +1058,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
{
struct mac_device_info *mac;
struct stmmac_priv *priv = ppriv;
- int ret;
mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
if (!mac)
return NULL;
- ret = sun8i_dwmac_set_syscon(priv);
- if (ret)
- return NULL;
-
mac->pcsr = priv->ioaddr;
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
@@ -1134,10 +1138,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
- if (IS_ERR(plat_dat))
- return PTR_ERR(plat_dat);
-
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
return -ENOMEM;
@@ -1201,11 +1201,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = of_get_phy_mode(dev->of_node, &interface);
if (ret)
return -EINVAL;
- plat_dat->interface = interface;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
+ plat_dat->interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->has_sun8i = true;
@@ -1214,9 +1218,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
plat_dat->exit = sun8i_dwmac_exit;
plat_dat->setup = sun8i_dwmac_setup;
+ ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
+ if (ret)
+ goto dwmac_deconfig;
+
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto dwmac_syscon;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -1230,7 +1238,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (gmac->variant->soc_has_internal_phy) {
ret = get_ephy_nodes(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
ret = sun8i_dwmac_register_mdio_mux(priv);
if (ret) {
dev_err(&pdev->dev, "Failed to register mux\n");
@@ -1239,15 +1247,42 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
} else {
ret = sun8i_dwmac_reset(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
}
return ret;
dwmac_mux:
- sun8i_dwmac_unset_syscon(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+dwmac_remove:
+ stmmac_dvr_remove(&pdev->dev);
dwmac_exit:
+ sun8i_dwmac_exit(pdev, gmac);
+dwmac_syscon:
+ sun8i_dwmac_unset_syscon(gmac);
+dwmac_deconfig:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int sun8i_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ if (gmac->variant->soc_has_internal_phy) {
+ mdio_mux_uninit(gmac->mux_handle);
+ sun8i_dwmac_unpower_internal_phy(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+ }
+
stmmac_pltfr_remove(pdev);
-return ret;
+ sun8i_dwmac_unset_syscon(gmac);
+
+ return 0;
}
static const struct of_device_id sun8i_dwmac_match[] = {
@@ -1269,7 +1304,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = sun8i_dwmac_remove,
.driver = {
.name = "dwmac-sun8i",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 03e79a677c8b..8f7ac24545ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -568,68 +568,24 @@ static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
unsigned int ptp_rate)
{
- u32 speed, total_offset, offset, ctrl, ctr_low;
- u32 extcfg = readl(ioaddr + GMAC_EXT_CONFIG);
- u32 mac_cfg = readl(ioaddr + GMAC_CONFIG);
int i, ret = 0x0;
- u64 total_ctr;
-
- if (extcfg & GMAC_CONFIG_EIPG_EN) {
- offset = (extcfg & GMAC_CONFIG_EIPG) >> GMAC_CONFIG_EIPG_SHIFT;
- offset = 104 + (offset * 8);
- } else {
- offset = (mac_cfg & GMAC_CONFIG_IPG) >> GMAC_CONFIG_IPG_SHIFT;
- offset = 96 - (offset * 8);
- }
-
- speed = mac_cfg & (GMAC_CONFIG_PS | GMAC_CONFIG_FES);
- speed = speed >> GMAC_CONFIG_FES_SHIFT;
-
- switch (speed) {
- case 0x0:
- offset = offset * 1000; /* 1G */
- break;
- case 0x1:
- offset = offset * 400; /* 2.5G */
- break;
- case 0x2:
- offset = offset * 100000; /* 10M */
- break;
- case 0x3:
- offset = offset * 10000; /* 100M */
- break;
- default:
- return -EINVAL;
- }
-
- offset = offset / 1000;
+ u32 ctrl;
ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
+ ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
+ ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
if (ret)
return ret;
- total_offset = 0;
for (i = 0; i < cfg->gcl_size; i++) {
- ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i] + offset, true);
+ ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
if (ret)
return ret;
-
- total_offset += offset;
}
- total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
- total_ctr += total_offset;
-
- ctr_low = do_div(total_ctr, 1000000000);
-
- ret |= dwmac5_est_write(ioaddr, CTR_LOW, ctr_low, false);
- ret |= dwmac5_est_write(ioaddr, CTR_HIGH, total_ctr, false);
- if (ret)
- return ret;
-
ctrl = readl(ioaddr + MTL_EST_CONTROL);
ctrl &= ~PTOV;
ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5b1c12ff98c0..26b971cd4da5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2184,7 +2184,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule_irqoff(&ch->rx_napi);
+ __napi_schedule(&ch->rx_napi);
}
}
@@ -2193,7 +2193,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule_irqoff(&ch->tx_napi);
+ __napi_schedule(&ch->tx_napi);
}
}
@@ -4026,6 +4026,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
+ const int mtu = new_mtu;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
@@ -4043,7 +4044,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
- dev->mtu = new_mtu;
+ dev->mtu = mtu;
netdev_update_features(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index f5bed4d26e80..8ed3b2c834a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -599,7 +599,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
- struct timespec64 time;
+ struct timespec64 time, current_time;
+ ktime_t current_time_ns;
bool fpe = false;
int i, ret = 0;
u64 ctr;
@@ -694,7 +695,22 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
}
/* Adjust for real system time */
- time = ktime_to_timespec64(qopt->base_time);
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ if (ktime_after(qopt->base_time, current_time_ns)) {
+ time = ktime_to_timespec64(qopt->base_time);
+ } else {
+ ktime_t base_time;
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
+ qopt->cycle_time);
+ base_time = ktime_add_ns(qopt->base_time,
+ (n + 1) * qopt->cycle_time);
+
+ time = ktime_to_timespec64(base_time);
+ }
+
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 766e8866bbef..1850743c04da 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -366,8 +366,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
dev_err(dev, "Failed to map rx skb buffer\n");
return -EINVAL;
@@ -375,6 +376,7 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = skb;
@@ -691,8 +693,9 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
- dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
dev_kfree_skb_any(skb);
@@ -779,6 +782,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
@@ -793,7 +797,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
- dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
@@ -864,7 +868,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
- struct device *dev,
struct cppi5_host_desc_t *desc)
{
struct cppi5_host_desc_t *first_desc, *next_desc;
@@ -875,20 +878,23 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
next_desc = first_desc;
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
- dma_unmap_single(dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
while (next_desc_dma) {
next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
next_desc_dma);
cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
- dma_unmap_page(dev, buf_dma, buf_dma_len,
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
DMA_TO_DEVICE);
next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
@@ -906,7 +912,7 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
dev_kfree_skb_any(skb);
}
@@ -926,7 +932,7 @@ am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
ndev = skb->dev;
@@ -1119,9 +1125,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
netif_txq = netdev_get_tx_queue(ndev, q_idx);
/* Map the linear buffer */
- buf_dma = dma_map_single(dev, skb->data, pkt_len,
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
dev_err(dev, "Failed to map tx skb buffer\n");
ndev->stats.tx_errors++;
goto err_free_skb;
@@ -1130,7 +1136,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
if (!first_desc) {
dev_dbg(dev, "Failed to allocate descriptor\n");
- dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
+ DMA_TO_DEVICE);
goto busy_stop_q;
}
@@ -1140,6 +1147,7 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
cppi5_hdesc_set_pkttype(first_desc, 0x7);
cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
*(swdata) = skb;
@@ -1175,9 +1183,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_free_descs;
}
- buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size,
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
dev_err(dev, "Failed to map tx skb page\n");
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
ndev->stats.tx_errors++;
@@ -1185,11 +1193,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
}
cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(next_desc,
buf_dma, frag_size, buf_dma, frag_size);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
pkt_len += frag_size;
@@ -1237,14 +1247,14 @@ done_tx:
return NETDEV_TX_OK;
err_free_descs:
- am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
err_free_skb:
ndev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
busy_free_descs:
- am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
busy_stop_q:
netif_tx_stop_queue(netif_txq);
return NETDEV_TX_BUSY;
@@ -1545,16 +1555,6 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
tx_chn->common = common;
tx_chn->id = i;
tx_chn->descs_num = max_desc_num;
- tx_chn->desc_pool =
- k3_cppi_desc_pool_create_name(dev,
- tx_chn->descs_num,
- hdesc_size,
- tx_chn->tx_chn_name);
- if (IS_ERR(tx_chn->desc_pool)) {
- ret = PTR_ERR(tx_chn->desc_pool);
- dev_err(dev, "Failed to create poll %d\n", ret);
- goto err;
- }
tx_chn->tx_chn =
k3_udma_glue_request_tx_chn(dev,
@@ -1565,6 +1565,17 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
"Failed to request tx dma channel\n");
goto err;
}
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+
+ tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->tx_chn_name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ dev_err(dev, "Failed to create poll %d\n", ret);
+ goto err;
+ }
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
if (tx_chn->irq <= 0) {
@@ -1622,14 +1633,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
/* init all flows */
rx_chn->dev = dev;
rx_chn->descs_num = max_desc_num;
- rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
- rx_chn->descs_num,
- hdesc_size, "rx");
- if (IS_ERR(rx_chn->desc_pool)) {
- ret = PTR_ERR(rx_chn->desc_pool);
- dev_err(dev, "Failed to create rx poll %d\n", ret);
- goto err;
- }
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) {
@@ -1637,6 +1640,16 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
"Failed to request rx dma channel\n");
goto err;
}
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
+ rx_chn->descs_num,
+ hdesc_size, "rx");
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ dev_err(dev, "Failed to create rx poll %d\n", ret);
+ goto err;
+ }
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
@@ -2102,9 +2115,16 @@ static const struct am65_cpsw_pdata j721e_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
};
+static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+};
+
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
+ { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
@@ -2164,12 +2184,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->tx_ch_num = 1;
common->pf_p0_rx_ptype_rrobin = false;
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- dev_err(dev, "error setting dma mask: %d\n", ret);
- return ret;
- }
-
common->ports = devm_kcalloc(dev, common->port_num,
sizeof(*common->ports),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 02aed4c0ceba..d7f8a0f76fdc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -56,6 +56,7 @@ struct am65_cpsw_host {
};
struct am65_cpsw_tx_chn {
+ struct device *dma_dev;
struct napi_struct napi_tx;
struct am65_cpsw_common *common;
struct k3_cppi_desc_pool *desc_pool;
@@ -69,6 +70,7 @@ struct am65_cpsw_tx_chn {
struct am65_cpsw_rx_chn {
struct device *dev;
+ struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
u32 descs_num;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 3bdd4dbcd2ff..ebcc6386cc34 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -356,7 +356,7 @@ static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
}
-/**
+/*
* Enable ESTf periodic output, set cycle start time and interval.
*/
static int am65_cpsw_timer_set(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 5dc60ecabe56..9caaae79fc95 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -727,7 +727,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
/**
* am65_cpts_rx_enable - enable rx timestamping
* @cpts: cpts handle
- * @skb: packet
+ * @en: enable
*
* This functions enables rx packets timestamping. The CPTS can timestamp all
* rx packets.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b0f00b4edd94..5239318e9686 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -392,21 +392,15 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
+ int headroom = CPSW_HEADROOM, size = len;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
- xdp.data = pa + CPSW_HEADROOM +
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- xdp.data_end = xdp.data + len -
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- } else {
- xdp.data = pa + CPSW_HEADROOM;
- xdp.data_end = xdp.data + len;
+ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_set_data_meta_invalid(&xdp);
-
- xdp.data_hard_start = pa;
- xdp.rxq = &priv->xdp_rxq[ch];
- xdp.frame_sz = PAGE_SIZE;
+ xdp_prepare_buff(&xdp, pa, headroom, size, false);
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index cdc308a2aa3e..d828f856237a 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -1256,6 +1256,13 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.major_ver_mask = 0x7,
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
},
+ {
+ .dev_id = "am64-cpswxg",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .major_ver_mask = 0x7,
+ .vlan_entry_tbl = vlan_entry_k3_cpswxg,
+ .tbl_entries = 512,
+ },
{ },
};
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 2f5e0ad23ad7..94747f82c60b 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -335,21 +335,15 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
+ int headroom = CPSW_HEADROOM, size = len;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
- xdp.data = pa + CPSW_HEADROOM +
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- xdp.data_end = xdp.data + len -
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- } else {
- xdp.data = pa + CPSW_HEADROOM;
- xdp.data_end = xdp.data + len;
+ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_set_data_meta_invalid(&xdp);
-
- xdp.data_hard_start = pa;
- xdp.rxq = &priv->xdp_rxq[ch];
- xdp.frame_sz = PAGE_SIZE;
+ xdp_prepare_buff(&xdp, pa, headroom, size, false);
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
if (ret != CPSW_XDP_PASS)
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index 29747da5c514..9967cf985728 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -24,16 +24,12 @@ struct cpsw_switchdev_event_work {
unsigned long event;
};
-static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
- struct switchdev_trans *trans, u8 state)
+static int cpsw_port_stp_state_set(struct cpsw_priv *priv, u8 state)
{
struct cpsw_common *cpsw = priv->cpsw;
u8 cpsw_state;
int ret = 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
switch (state) {
case BR_STATE_FORWARDING:
cpsw_state = ALE_PORT_STATE_FORWARD;
@@ -60,16 +56,12 @@ static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
}
static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
unsigned long brport_flags)
{
struct cpsw_common *cpsw = priv->cpsw;
bool unreg_mcast_add = false;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (brport_flags & BR_MCAST_FLOOD)
unreg_mcast_add = true;
dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
@@ -82,7 +74,6 @@ static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
}
static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
- struct switchdev_trans *trans,
unsigned long flags)
{
if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
@@ -92,8 +83,7 @@ static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
}
static int cpsw_port_attr_set(struct net_device *ndev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
@@ -102,15 +92,15 @@ static int cpsw_port_attr_set(struct net_device *ndev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- ret = cpsw_port_attr_br_flags_pre_set(ndev, trans,
+ ret = cpsw_port_attr_br_flags_pre_set(ndev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ret = cpsw_port_stp_state_set(priv, trans, attr->u.stp_state);
+ ret = cpsw_port_stp_state_set(priv, attr->u.stp_state);
dev_dbg(priv->dev, "stp state: %u\n", attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- ret = cpsw_port_attr_br_flags_set(priv, trans, attr->orig_dev,
+ ret = cpsw_port_attr_br_flags_set(priv, attr->orig_dev,
attr->u.brport_flags);
break;
default:
@@ -253,56 +243,24 @@ static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
}
static int cpsw_port_vlans_add(struct cpsw_priv *priv,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- u16 vid;
dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
- priv->ndev->name, vlan->vid_begin, vlan->flags);
+ priv->ndev->name, vlan->vid, vlan->flags);
if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
return 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = cpsw_port_vlan_add(priv, untag, pvid, vid, orig_dev);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int cpsw_port_vlans_del(struct cpsw_priv *priv,
- const struct switchdev_obj_port_vlan *vlan)
-
-{
- struct net_device *orig_dev = vlan->obj.orig_dev;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = cpsw_port_vlan_del(priv, vid, orig_dev);
- if (err)
- return err;
- }
-
- return 0;
+ return cpsw_port_vlan_add(priv, untag, pvid, vlan->vid, orig_dev);
}
static int cpsw_port_mdb_add(struct cpsw_priv *priv,
- struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ struct switchdev_obj_port_mdb *mdb)
{
struct net_device *orig_dev = mdb->obj.orig_dev;
@@ -311,9 +269,6 @@ static int cpsw_port_mdb_add(struct cpsw_priv *priv,
int port_mask;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (cpu_port)
port_mask = BIT(HOST_PORT_NUM);
else
@@ -352,7 +307,6 @@ static int cpsw_port_mdb_del(struct cpsw_priv *priv,
static int cpsw_port_obj_add(struct net_device *ndev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
@@ -365,11 +319,11 @@ static int cpsw_port_obj_add(struct net_device *ndev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = cpsw_port_vlans_add(priv, vlan, trans);
+ err = cpsw_port_vlans_add(priv, vlan);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
case SWITCHDEV_OBJ_ID_HOST_MDB:
- err = cpsw_port_mdb_add(priv, mdb, trans);
+ err = cpsw_port_mdb_add(priv, mdb);
break;
default:
err = -EOPNOTSUPP;
@@ -392,7 +346,7 @@ static int cpsw_port_obj_del(struct net_device *ndev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = cpsw_port_vlans_del(priv, vlan);
+ err = cpsw_port_vlan_del(priv, vlan->vid, vlan->obj.orig_dev);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
case SWITCHDEV_OBJ_ID_HOST_MDB:
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index d1fc7955d422..43222a34cba0 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -599,6 +599,7 @@ void cpts_unregister(struct cpts *cpts)
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
+ cpts->phc_index = -1;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
@@ -784,6 +785,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.read = cpts_systim_read;
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
+ cpts->phc_index = -1;
if (n_ext_ts)
cpts->info.n_ext_ts = n_ext_ts;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 3d1fc8d2ca66..55e652624bd7 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1100,7 +1100,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
return packets_done;
}
-/**
+/*
* gelic_card_interrupt - event handler for gelic_net
*/
static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
@@ -1400,6 +1400,7 @@ out:
/**
* gelic_net_tx_timeout - called when the tx timeout watchdog kicks in.
* @netdev: interface device structure
+ * @txqueue: unused
*
* called, if tx hangs. Schedules a task that resets the interface
*/
@@ -1431,6 +1432,7 @@ static const struct net_device_ops gelic_netdevice_ops = {
/**
* gelic_ether_setup_netdev_ops - initialization of net_device operations
* @netdev: net_device structure
+ * @napi: napi structure
*
* fills out function pointers in the net_device structure
*/
@@ -1632,7 +1634,7 @@ static void gelic_card_get_vlan_info(struct gelic_card *card)
dev_info(ctodev(card), "internal vlan %s\n",
card->vlan_required? "enabled" : "disabled");
}
-/**
+/*
* ps3_gelic_driver_probe - add a device to the control of this driver
*/
static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
@@ -1787,7 +1789,7 @@ fail_open:
return result;
}
-/**
+/*
* ps3_gelic_driver_remove - remove a device from the control of this driver
*/
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5f5b33e6653b..d5a75ef7e3ca 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -254,7 +254,7 @@ spider_net_set_promisc(struct spider_net_card *card)
/**
* spider_net_get_descr_status -- returns the status of a descriptor
- * @descr: descriptor to look at
+ * @hwdescr: descriptor to look at
*
* returns the status as in the dmac_cmd_status field of the descriptor
*/
@@ -542,6 +542,7 @@ error:
/**
* spider_net_get_multicast_hash - generates hash for multicast filter table
+ * @netdev: interface device structure
* @addr: multicast address
*
* returns the hash value.
@@ -890,7 +891,7 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/**
* spider_net_cleanup_tx_ring - cleans up the TX ring
- * @card: card structure
+ * @t: timer context used to obtain the pointer to net card data structure
*
* spider_net_cleanup_tx_ring is called by either the tx_timer
* or from the NAPI polling routine.
@@ -1063,6 +1064,7 @@ static void show_rx_chain(struct spider_net_card *card)
/**
* spider_net_resync_head_ptr - Advance head ptr past empty descrs
+ * @card: card structure
*
* If the driver fails to keep up and empty the queue, then the
* hardware wil run out of room to put incoming packets. This
@@ -1220,7 +1222,7 @@ bad_desc:
/**
* spider_net_poll - NAPI poll function called by the stack to return packets
- * @netdev: interface device structure
+ * @napi: napi device structure
* @budget: number of packets we can pass to the stack at most
*
* returns 0 if no more packets available to the driver/stack. Returns 1,
@@ -1268,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
/**
* spider_net_set_mac - sets the MAC of an interface
* @netdev: interface device structure
- * @ptr: pointer to new MAC address
+ * @p: pointer to new MAC address
*
* Returns 0 on success, <0 on failure. Currently, we don't support this
* and will always return EOPNOTSUPP.
@@ -1340,6 +1342,8 @@ spider_net_link_reset(struct net_device *netdev)
* spider_net_handle_error_irq - handles errors raised by an interrupt
* @card: card structure
* @status_reg: interrupt status register 0 (GHIINT0STS)
+ * @error_reg1: interrupt status register 1 (GHIINT1STS)
+ * @error_reg2: interrupt status register 2 (GHIINT2STS)
*
* spider_net_handle_error_irq treats or ignores all error conditions
* found when an interrupt is presented
@@ -1961,8 +1965,7 @@ init_firmware_failed:
/**
* spider_net_link_phy
- * @data: used for pointer to card structure
- *
+ * @t: timer context used to obtain the pointer to net card data structure
*/
static void spider_net_link_phy(struct timer_list *t)
{
@@ -2140,7 +2143,7 @@ spider_net_stop(struct net_device *netdev)
/**
* spider_net_tx_timeout_task - task scheduled by the watchdog timeout
* function (to be called not under interrupt status)
- * @data: data, is interface device structure
+ * @work: work context used to obtain the pointer to net card data structure
*
* called as task when tx hangs, resets interface (if interface is up)
*/
@@ -2174,6 +2177,7 @@ out:
/**
* spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
* @netdev: interface device structure
+ * @txqueue: unused
*
* called, if tx hangs. Schedules a task that resets the interface
*/
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 2e5202923510..0152f1e70783 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -247,7 +247,7 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
}
#endif
-static spinlock_t mdio_lock;
+static DEFINE_SPINLOCK(mdio_lock);
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
static struct mii_bus *mdio_bus;
static int ports_open;
@@ -528,7 +528,6 @@ static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
mdio_regs = regs;
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
- spin_lock_init(&mdio_lock);
mdio_bus->name = "IXP4xx MII Bus";
mdio_bus->read = &ixp4xx_mdio_read;
mdio_bus->write = &ixp4xx_mdio_write;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 5523f069b9a5..4ac0373326ef 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1197,11 +1197,12 @@ static void geneve_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &geneve_type);
dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
/* MTU range: 68 - (something less than 65535) */
@@ -1851,16 +1852,10 @@ static int geneve_netdevice_event(struct notifier_block *unused,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
- event == NETDEV_UDP_TUNNEL_DROP_INFO) {
- geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
- } else if (event == NETDEV_UNREGISTER) {
- if (!dev->udp_tunnel_nic_info)
- geneve_offload_rx_ports(dev, false);
- } else if (event == NETDEV_REGISTER) {
- if (!dev->udp_tunnel_nic_info)
- geneve_offload_rx_ports(dev, true);
- }
+ if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
+ geneve_offload_rx_ports(dev, true);
+ else if (event == NETDEV_UDP_TUNNEL_DROP_INFO)
+ geneve_offload_rx_ports(dev, false);
return NOTIFY_DONE;
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 4c04e271f184..851364314ecc 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -21,6 +21,7 @@
#include <linux/file.h>
#include <linux/gtp.h>
+#include <net/dst_metadata.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
@@ -73,6 +74,9 @@ struct gtp_dev {
unsigned int hash_size;
struct hlist_head *tid_hash;
struct hlist_head *addr_hash;
+ /* Used by LWT tunnel. */
+ bool collect_md;
+ struct socket *collect_md_sock;
};
static unsigned int gtp_net_id __read_mostly;
@@ -179,33 +183,121 @@ static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
return false;
}
-static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
- unsigned int hdrlen, unsigned int role)
+static int gtp_set_tun_dst(struct gtp_dev *gtp, struct sk_buff *skb,
+ unsigned int hdrlen, u8 gtp_version,
+ __be64 tid, u8 flags)
{
- if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
- netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
- return 1;
+ struct metadata_dst *tun_dst;
+ int opts_len = 0;
+
+ if (unlikely(flags & GTP1_F_MASK))
+ opts_len = sizeof(struct gtpu_metadata);
+
+ tun_dst = udp_tun_rx_dst(skb, gtp->sk1u->sk_family, TUNNEL_KEY, tid, opts_len);
+ if (!tun_dst) {
+ netdev_dbg(gtp->dev, "Failed to allocate tun_dst");
+ goto err;
}
+ netdev_dbg(gtp->dev, "attaching metadata_dst to skb, gtp ver %d hdrlen %d\n",
+ gtp_version, hdrlen);
+ if (unlikely(opts_len)) {
+ struct gtpu_metadata *opts;
+ struct gtp1_header *gtp1;
+
+ opts = ip_tunnel_info_opts(&tun_dst->u.tun_info);
+ gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+ opts->ver = GTP_METADATA_V1;
+ opts->flags = gtp1->flags;
+ opts->type = gtp1->type;
+ netdev_dbg(gtp->dev, "recved control pkt: flag %x type: %d\n",
+ opts->flags, opts->type);
+ tun_dst->u.tun_info.key.tun_flags |= TUNNEL_GTPU_OPT;
+ tun_dst->u.tun_info.options_len = opts_len;
+ skb->protocol = htons(0xffff); /* Unknown */
+ }
/* Get rid of the GTP + UDP headers. */
if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
- !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
- return -1;
+ !net_eq(sock_net(gtp->sk1u), dev_net(gtp->dev)))) {
+ gtp->dev->stats.rx_length_errors++;
+ goto err;
+ }
+
+ skb_dst_set(skb, &tun_dst->dst);
+ return 0;
+err:
+ return -1;
+}
+
+static int gtp_rx(struct gtp_dev *gtp, struct sk_buff *skb,
+ unsigned int hdrlen, u8 gtp_version, unsigned int role,
+ __be64 tid, u8 flags, u8 type)
+{
+ if (ip_tunnel_collect_metadata() || gtp->collect_md) {
+ int err;
+
+ err = gtp_set_tun_dst(gtp, skb, hdrlen, gtp_version, tid, flags);
+ if (err)
+ goto err;
+ } else {
+ struct pdp_ctx *pctx;
+
+ if (flags & GTP1_F_MASK)
+ hdrlen += 4;
- netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
+ if (type != GTP_TPDU)
+ return 1;
+
+ if (gtp_version == GTP_V0)
+ pctx = gtp0_pdp_find(gtp, be64_to_cpu(tid));
+ else
+ pctx = gtp1_pdp_find(gtp, be64_to_cpu(tid));
+ if (!pctx) {
+ netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
+ return 1;
+ }
+
+ if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
+ netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
+ return 1;
+ }
+ /* Get rid of the GTP + UDP headers. */
+ if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
+ !net_eq(sock_net(pctx->sk), dev_net(gtp->dev)))) {
+ gtp->dev->stats.rx_length_errors++;
+ goto err;
+ }
+ }
+ netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
/* Now that the UDP and the GTP header have been removed, set up the
* new network header. This is required by the upper layer to
* calculate the transport header.
*/
skb_reset_network_header(skb);
+ if (pskb_may_pull(skb, sizeof(struct iphdr))) {
+ struct iphdr *iph;
+
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ netdev_dbg(gtp->dev, "inner pkt: ipv4");
+ skb->protocol = htons(ETH_P_IP);
+ } else if (iph->version == 6) {
+ netdev_dbg(gtp->dev, "inner pkt: ipv6");
+ skb->protocol = htons(ETH_P_IPV6);
+ } else {
+ netdev_dbg(gtp->dev, "inner pkt error: Unknown type");
+ }
+ }
- skb->dev = pctx->dev;
-
- dev_sw_netstats_rx_add(pctx->dev, skb->len);
-
+ skb->dev = gtp->dev;
+ dev_sw_netstats_rx_add(gtp->dev, skb->len);
netif_rx(skb);
return 0;
+
+err:
+ gtp->dev->stats.rx_dropped++;
+ return -1;
}
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
@@ -214,7 +306,6 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp0_header);
struct gtp0_header *gtp0;
- struct pdp_ctx *pctx;
if (!pskb_may_pull(skb, hdrlen))
return -1;
@@ -224,16 +315,7 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if ((gtp0->flags >> 5) != GTP_V0)
return 1;
- if (gtp0->type != GTP_TPDU)
- return 1;
-
- pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
- if (!pctx) {
- netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
- return 1;
- }
-
- return gtp_rx(pctx, skb, hdrlen, gtp->role);
+ return gtp_rx(gtp, skb, hdrlen, GTP_V0, gtp->role, gtp0->tid, gtp0->flags, gtp0->type);
}
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
@@ -241,41 +323,30 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp1_header);
struct gtp1_header *gtp1;
- struct pdp_ctx *pctx;
if (!pskb_may_pull(skb, hdrlen))
return -1;
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+ netdev_dbg(gtp->dev, "GTPv1 recv: flags %x\n", gtp1->flags);
if ((gtp1->flags >> 5) != GTP_V1)
return 1;
- if (gtp1->type != GTP_TPDU)
- return 1;
-
/* From 29.060: "This field shall be present if and only if any one or
* more of the S, PN and E flags are set.".
*
* If any of the bit is set, then the remaining ones also have to be
* set.
*/
- if (gtp1->flags & GTP1_F_MASK)
- hdrlen += 4;
-
/* Make sure the header is larger enough, including extensions. */
if (!pskb_may_pull(skb, hdrlen))
return -1;
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
- pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
- if (!pctx) {
- netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
- return 1;
- }
-
- return gtp_rx(pctx, skb, hdrlen, gtp->role);
+ return gtp_rx(gtp, skb, hdrlen, GTP_V1, gtp->role,
+ key32_to_tunnel_id(gtp1->tid), gtp1->flags, gtp1->type);
}
static void __gtp_encap_destroy(struct sock *sk)
@@ -315,6 +386,11 @@ static void gtp_encap_disable(struct gtp_dev *gtp)
{
gtp_encap_disable_sock(gtp->sk0);
gtp_encap_disable_sock(gtp->sk1u);
+ if (gtp->collect_md_sock) {
+ udp_tunnel_sock_release(gtp->collect_md_sock);
+ gtp->collect_md_sock = NULL;
+ netdev_dbg(gtp->dev, "GTP socket released.\n");
+ }
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
@@ -329,7 +405,8 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!gtp)
return 1;
- netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+ netdev_dbg(gtp->dev, "encap_recv sk=%p type %d\n",
+ sk, udp_sk(sk)->encap_type);
switch (udp_sk(sk)->encap_type) {
case UDP_ENCAP_GTP0:
@@ -383,12 +460,13 @@ static void gtp_dev_uninit(struct net_device *dev)
static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
const struct sock *sk,
- __be32 daddr)
+ __be32 daddr,
+ __be32 saddr)
{
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr;
- fl4->saddr = inet_sk(sk)->inet_saddr;
+ fl4->saddr = saddr;
fl4->flowi4_tos = RT_CONN_FLAGS(sk);
fl4->flowi4_proto = sk->sk_protocol;
@@ -412,7 +490,7 @@ static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
}
-static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+static inline void gtp1_push_header(struct sk_buff *skb, __be32 tid)
{
int payload_len = skb->len;
struct gtp1_header *gtp1;
@@ -428,46 +506,63 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
gtp1->flags = 0x30; /* v1, GTP-non-prime. */
gtp1->type = GTP_TPDU;
gtp1->length = htons(payload_len);
- gtp1->tid = htonl(pctx->u.v1.o_tei);
+ gtp1->tid = tid;
/* TODO: Suppport for extension header, sequence number and N-PDU.
* Update the length field if any of them is available.
*/
}
-struct gtp_pktinfo {
- struct sock *sk;
- struct iphdr *iph;
- struct flowi4 fl4;
- struct rtable *rt;
- struct pdp_ctx *pctx;
- struct net_device *dev;
- __be16 gtph_port;
-};
-
-static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
+static inline int gtp1_push_control_header(struct sk_buff *skb,
+ __be32 tid,
+ struct gtpu_metadata *opts,
+ struct net_device *dev)
{
- switch (pktinfo->pctx->gtp_version) {
- case GTP_V0:
- pktinfo->gtph_port = htons(GTP0_PORT);
- gtp0_push_header(skb, pktinfo->pctx);
- break;
- case GTP_V1:
- pktinfo->gtph_port = htons(GTP1U_PORT);
- gtp1_push_header(skb, pktinfo->pctx);
- break;
+ struct gtp1_header *gtp1c;
+ int payload_len;
+
+ if (opts->ver != GTP_METADATA_V1)
+ return -ENOENT;
+
+ if (opts->type == 0xFE) {
+ /* for end marker ignore skb data. */
+ netdev_dbg(dev, "xmit pkt with null data");
+ pskb_trim(skb, 0);
}
+ if (skb_cow_head(skb, sizeof(*gtp1c)) < 0)
+ return -ENOMEM;
+
+ payload_len = skb->len;
+
+ gtp1c = skb_push(skb, sizeof(*gtp1c));
+
+ gtp1c->flags = opts->flags;
+ gtp1c->type = opts->type;
+ gtp1c->length = htons(payload_len);
+ gtp1c->tid = tid;
+ netdev_dbg(dev, "xmit control pkt: ver %d flags %x type %x pkt len %d tid %x",
+ opts->ver, opts->flags, opts->type, skb->len, tid);
+ return 0;
}
+struct gtp_pktinfo {
+ struct sock *sk;
+ __u8 tos;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct net_device *dev;
+ __be16 gtph_port;
+};
+
static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
- struct sock *sk, struct iphdr *iph,
- struct pdp_ctx *pctx, struct rtable *rt,
+ struct sock *sk,
+ __u8 tos,
+ struct rtable *rt,
struct flowi4 *fl4,
struct net_device *dev)
{
pktinfo->sk = sk;
- pktinfo->iph = iph;
- pktinfo->pctx = pctx;
+ pktinfo->tos = tos;
pktinfo->rt = rt;
pktinfo->fl4 = *fl4;
pktinfo->dev = dev;
@@ -477,40 +572,99 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
struct gtp_pktinfo *pktinfo)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct gtpu_metadata *opts = NULL;
+ struct sock *sk = NULL;
struct pdp_ctx *pctx;
struct rtable *rt;
struct flowi4 fl4;
- struct iphdr *iph;
- __be16 df;
+ u8 gtp_version;
+ __be16 df = 0;
+ __be32 tun_id;
+ __be32 daddr;
+ __be32 saddr;
+ __u8 tos;
int mtu;
- /* Read the IP destination address and resolve the PDP context.
- * Prepend PDP header with TEI/TID from PDP ctx.
- */
- iph = ip_hdr(skb);
- if (gtp->role == GTP_ROLE_SGSN)
- pctx = ipv4_pdp_find(gtp, iph->saddr);
- else
- pctx = ipv4_pdp_find(gtp, iph->daddr);
+ if (gtp->collect_md) {
+ /* LWT GTP1U encap */
+ struct ip_tunnel_info *info = NULL;
- if (!pctx) {
- netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
- &iph->daddr);
- return -ENOENT;
+ info = skb_tunnel_info(skb);
+ if (!info) {
+ netdev_dbg(dev, "missing tunnel info");
+ return -ENOENT;
+ }
+ if (info->key.tp_dst && ntohs(info->key.tp_dst) != GTP1U_PORT) {
+ netdev_dbg(dev, "unexpected GTP dst port: %d", ntohs(info->key.tp_dst));
+ return -EOPNOTSUPP;
+ }
+ pctx = NULL;
+ gtp_version = GTP_V1;
+ tun_id = tunnel_id_to_key32(info->key.tun_id);
+ daddr = info->key.u.ipv4.dst;
+ saddr = info->key.u.ipv4.src;
+ sk = gtp->sk1u;
+ if (!sk) {
+ netdev_dbg(dev, "missing tunnel sock");
+ return -EOPNOTSUPP;
+ }
+ tos = info->key.tos;
+ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+
+ if (info->options_len != 0) {
+ if (info->key.tun_flags & TUNNEL_GTPU_OPT) {
+ opts = ip_tunnel_info_opts(info);
+ } else {
+ netdev_dbg(dev, "missing tunnel metadata for control pkt");
+ return -EOPNOTSUPP;
+ }
+ }
+ netdev_dbg(dev, "flow-based GTP1U encap: tunnel id %d\n",
+ be32_to_cpu(tun_id));
+ } else {
+ struct iphdr *iph;
+
+ if (ntohs(skb->protocol) != ETH_P_IP)
+ return -EOPNOTSUPP;
+
+ iph = ip_hdr(skb);
+
+ /* Read the IP destination address and resolve the PDP context.
+ * Prepend PDP header with TEI/TID from PDP ctx.
+ */
+ if (gtp->role == GTP_ROLE_SGSN)
+ pctx = ipv4_pdp_find(gtp, iph->saddr);
+ else
+ pctx = ipv4_pdp_find(gtp, iph->daddr);
+
+ if (!pctx) {
+ netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
+ &iph->daddr);
+ return -ENOENT;
+ }
+ sk = pctx->sk;
+ netdev_dbg(dev, "found PDP context %p\n", pctx);
+
+ gtp_version = pctx->gtp_version;
+ tun_id = htonl(pctx->u.v1.o_tei);
+ daddr = pctx->peer_addr_ip4.s_addr;
+ saddr = inet_sk(sk)->inet_saddr;
+ tos = iph->tos;
+ df = iph->frag_off;
+ netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n",
+ &iph->saddr, &iph->daddr);
}
- netdev_dbg(dev, "found PDP context %p\n", pctx);
- rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
+ rt = ip4_route_output_gtp(&fl4, sk, daddr, saddr);
if (IS_ERR(rt)) {
- netdev_dbg(dev, "no route to SSGN %pI4\n",
- &pctx->peer_addr_ip4.s_addr);
+ netdev_dbg(dev, "no route to SSGN %pI4\n", &daddr);
dev->stats.tx_carrier_errors++;
goto err;
}
if (rt->dst.dev == dev) {
- netdev_dbg(dev, "circular route to SSGN %pI4\n",
- &pctx->peer_addr_ip4.s_addr);
+ netdev_dbg(dev, "circular route to SSGN %pI4\n", &daddr);
dev->stats.collisions++;
goto err_rt;
}
@@ -518,11 +672,10 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
skb_dst_drop(skb);
/* This is similar to tnl_update_pmtu(). */
- df = iph->frag_off;
if (df) {
mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
sizeof(struct iphdr) - sizeof(struct udphdr);
- switch (pctx->gtp_version) {
+ switch (gtp_version) {
case GTP_V0:
mtu -= sizeof(struct gtp0_header);
break;
@@ -536,17 +689,38 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
- if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
- mtu < ntohs(iph->tot_len)) {
- netdev_dbg(dev, "packet too big, fragmentation needed\n");
+ if (!skb_is_gso(skb) && (df & htons(IP_DF)) && mtu < skb->len) {
+ netdev_dbg(dev, "packet too big, fragmentation needed");
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
goto err_rt;
}
- gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
- gtp_push_header(skb, pktinfo);
+ gtp_set_pktinfo_ipv4(pktinfo, sk, tos, rt, &fl4, dev);
+
+ if (unlikely(opts)) {
+ int err;
+
+ pktinfo->gtph_port = htons(GTP1U_PORT);
+ err = gtp1_push_control_header(skb, tun_id, opts, dev);
+ if (err) {
+ netdev_info(dev, "cntr pkt error %d", err);
+ goto err_rt;
+ }
+ return 0;
+ }
+
+ switch (gtp_version) {
+ case GTP_V0:
+ pktinfo->gtph_port = htons(GTP0_PORT);
+ gtp0_push_header(skb, pctx);
+ break;
+ case GTP_V1:
+ pktinfo->gtph_port = htons(GTP1U_PORT);
+ gtp1_push_header(skb, tun_id);
+ break;
+ }
return 0;
err_rt:
@@ -557,7 +731,6 @@ err:
static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned int proto = ntohs(skb->protocol);
struct gtp_pktinfo pktinfo;
int err;
@@ -569,32 +742,22 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
rcu_read_lock();
- switch (proto) {
- case ETH_P_IP:
- err = gtp_build_skb_ip4(skb, dev, &pktinfo);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
+ err = gtp_build_skb_ip4(skb, dev, &pktinfo);
rcu_read_unlock();
if (err < 0)
goto tx_err;
- switch (proto) {
- case ETH_P_IP:
- netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
- &pktinfo.iph->saddr, &pktinfo.iph->daddr);
- udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
- pktinfo.fl4.saddr, pktinfo.fl4.daddr,
- pktinfo.iph->tos,
- ip4_dst_hoplimit(&pktinfo.rt->dst),
- 0,
- pktinfo.gtph_port, pktinfo.gtph_port,
- true, false);
- break;
- }
+ udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
+ pktinfo.fl4.saddr,
+ pktinfo.fl4.daddr,
+ pktinfo.tos,
+ ip4_dst_hoplimit(&pktinfo.rt->dst),
+ 0,
+ pktinfo.gtph_port,
+ pktinfo.gtph_port,
+ true,
+ false);
return NETDEV_TX_OK;
tx_err:
@@ -610,6 +773,19 @@ static const struct net_device_ops gtp_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
};
+static struct gtp_dev *gtp_find_flow_based_dev(struct net *net)
+{
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp;
+
+ list_for_each_entry(gtp, &gn->gtp_dev_list, list) {
+ if (gtp->collect_md)
+ return gtp;
+ }
+
+ return NULL;
+}
+
static void gtp_link_setup(struct net_device *dev)
{
dev->netdev_ops = &gtp_netdev_ops;
@@ -634,7 +810,7 @@ static void gtp_link_setup(struct net_device *dev)
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
-static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
+static int gtp_encap_enable(struct gtp_dev *gtp, struct net_device *dev, struct nlattr *data[]);
static void gtp_destructor(struct net_device *dev)
{
@@ -652,11 +828,24 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct gtp_net *gn;
int hashsize, err;
- if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
+ if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1] &&
+ !data[IFLA_GTP_COLLECT_METADATA])
return -EINVAL;
gtp = netdev_priv(dev);
+ if (data[IFLA_GTP_COLLECT_METADATA]) {
+ if (data[IFLA_GTP_FD0]) {
+ netdev_dbg(dev, "LWT device does not support setting v0 socket");
+ return -EINVAL;
+ }
+ if (gtp_find_flow_based_dev(src_net)) {
+ netdev_dbg(dev, "LWT device already exist");
+ return -EBUSY;
+ }
+ gtp->collect_md = true;
+ }
+
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
} else {
@@ -669,7 +858,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
- err = gtp_encap_enable(gtp, data);
+ err = gtp_encap_enable(gtp, dev, data);
if (err < 0)
goto out_hashtable;
@@ -683,7 +872,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
list_add_rcu(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
- netdev_dbg(dev, "registered new GTP interface\n");
+ netdev_dbg(dev, "registered new GTP interface %s\n", dev->name);
return 0;
@@ -714,6 +903,7 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_FD1] = { .type = NLA_U32 },
[IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
+ [IFLA_GTP_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -737,6 +927,9 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
goto nla_put_failure;
+ if (gtp->collect_md && nla_put_flag(skb, IFLA_GTP_COLLECT_METADATA))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -782,35 +975,24 @@ err1:
return -ENOMEM;
}
-static struct sock *gtp_encap_enable_socket(int fd, int type,
- struct gtp_dev *gtp)
+static int __gtp_encap_enable_socket(struct socket *sock, int type,
+ struct gtp_dev *gtp)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
- struct socket *sock;
struct sock *sk;
- int err;
-
- pr_debug("enable gtp on %d, %d\n", fd, type);
-
- sock = sockfd_lookup(fd, &err);
- if (!sock) {
- pr_debug("gtp socket fd=%d not found\n", fd);
- return NULL;
- }
sk = sock->sk;
if (sk->sk_protocol != IPPROTO_UDP ||
sk->sk_type != SOCK_DGRAM ||
(sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
- pr_debug("socket fd=%d not UDP\n", fd);
- sk = ERR_PTR(-EINVAL);
- goto out_sock;
+ pr_debug("socket not UDP\n");
+ return -EINVAL;
}
lock_sock(sk);
if (sk->sk_user_data) {
- sk = ERR_PTR(-EBUSY);
- goto out_rel_sock;
+ release_sock(sock->sk);
+ return -EBUSY;
}
sock_hold(sk);
@@ -821,15 +1003,58 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
tuncfg.encap_destroy = gtp_encap_destroy;
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
-
-out_rel_sock:
release_sock(sock->sk);
-out_sock:
+ return 0;
+}
+
+static struct sock *gtp_encap_enable_socket(int fd, int type,
+ struct gtp_dev *gtp)
+{
+ struct socket *sock;
+ int err;
+
+ pr_debug("enable gtp on %d, %d\n", fd, type);
+
+ sock = sockfd_lookup(fd, &err);
+ if (!sock) {
+ pr_debug("gtp socket fd=%d not found\n", fd);
+ return NULL;
+ }
+ err = __gtp_encap_enable_socket(sock, type, gtp);
sockfd_put(sock);
- return sk;
+ if (err)
+ return ERR_PTR(err);
+
+ return sock->sk;
}
-static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
+static struct socket *gtp_create_gtp_socket(struct gtp_dev *gtp, struct net_device *dev)
+{
+ struct udp_port_cfg udp_conf;
+ struct socket *sock;
+ int err;
+
+ memset(&udp_conf, 0, sizeof(udp_conf));
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ udp_conf.local_udp_port = htons(GTP1U_PORT);
+
+ err = udp_sock_create(dev_net(dev), &udp_conf, &sock);
+ if (err < 0) {
+ pr_debug("create gtp sock failed: %d\n", err);
+ return ERR_PTR(err);
+ }
+ err = __gtp_encap_enable_socket(sock, UDP_ENCAP_GTP1U, gtp);
+ if (err) {
+ pr_debug("enable gtp sock encap failed: %d\n", err);
+ udp_tunnel_sock_release(sock);
+ return ERR_PTR(err);
+ }
+ pr_debug("create gtp sock done\n");
+ return sock;
+}
+
+static int gtp_encap_enable(struct gtp_dev *gtp, struct net_device *dev, struct nlattr *data[])
{
struct sock *sk1u = NULL;
struct sock *sk0 = NULL;
@@ -853,11 +1078,25 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
}
}
+ if (data[IFLA_GTP_COLLECT_METADATA]) {
+ struct socket *sock;
+
+ if (!sk1u) {
+ sock = gtp_create_gtp_socket(gtp, dev);
+ if (IS_ERR(sock))
+ return PTR_ERR(sock);
+
+ gtp->collect_md_sock = sock;
+ sk1u = sock->sk;
+ } else {
+ gtp->collect_md_sock = NULL;
+ }
+ }
+
if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
if (role > GTP_ROLE_SGSN) {
- gtp_encap_disable_sock(sk0);
- gtp_encap_disable_sock(sk1u);
+ gtp_encap_disable(gtp);
return -EINVAL;
}
}
@@ -1416,7 +1655,7 @@ static int __init gtp_init(void)
if (err < 0)
goto unreg_genl_family;
- pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
+ pr_info("GTP module loaded (pdp ctx size %zd bytes) with tnl-md support\n",
sizeof(struct pdp_ctx));
return 0;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 2a87cfa27ac0..e1a497d3c9ba 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -105,9 +105,43 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
u32 processor_masks_entry_size;
};
-/* Fwd declaration */
-struct ndis_tcp_ip_checksum_info;
-struct ndis_pkt_8021q_info;
+struct ndis_tcp_ip_checksum_info {
+ union {
+ struct {
+ u32 is_ipv4:1;
+ u32 is_ipv6:1;
+ u32 tcp_checksum:1;
+ u32 udp_checksum:1;
+ u32 ip_header_checksum:1;
+ u32 reserved:11;
+ u32 tcp_header_offset:10;
+ } transmit;
+ struct {
+ u32 tcp_checksum_failed:1;
+ u32 udp_checksum_failed:1;
+ u32 ip_checksum_failed:1;
+ u32 tcp_checksum_succeeded:1;
+ u32 udp_checksum_succeeded:1;
+ u32 ip_checksum_succeeded:1;
+ u32 loopback:1;
+ u32 tcp_checksum_value_invalid:1;
+ u32 ip_checksum_value_invalid:1;
+ } receive;
+ u32 value;
+ };
+};
+
+struct ndis_pkt_8021q_info {
+ union {
+ struct {
+ u32 pri:3; /* User Priority */
+ u32 cfi:1; /* Canonical Format ID */
+ u32 vlanid:12; /* VLAN ID */
+ u32 reserved:16;
+ };
+ u32 value;
+ };
+};
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
@@ -194,7 +228,8 @@ int netvsc_send(struct net_device *net,
struct sk_buff *skb,
bool xdp_tx);
void netvsc_linkstatus_callback(struct net_device *net,
- struct rndis_message *resp);
+ struct rndis_message *resp,
+ void *data);
int netvsc_recv_callback(struct net_device *net,
struct netvsc_device *nvdev,
struct netvsc_channel *nvchan);
@@ -884,9 +919,10 @@ struct multi_recv_comp {
#define NVSP_RSC_MAX 562 /* Max #RSC frags in a vmbus xfer page pkt */
struct nvsc_rsc {
- const struct ndis_pkt_8021q_info *vlan;
- const struct ndis_tcp_ip_checksum_info *csum_info;
- const u32 *hash_info;
+ struct ndis_pkt_8021q_info vlan;
+ struct ndis_tcp_ip_checksum_info csum_info;
+ u32 hash_info;
+ u8 ppi_flags; /* valid/present bits for the above PPIs */
u8 is_last; /* last RNDIS msg in a vmtransfer_page */
u32 cnt; /* #fragments in an RSC packet */
u32 pktlen; /* Full packet length */
@@ -894,6 +930,10 @@ struct nvsc_rsc {
u32 len[NVSP_RSC_MAX];
};
+#define NVSC_RSC_VLAN BIT(0) /* valid/present bit for 'vlan' */
+#define NVSC_RSC_CSUM_INFO BIT(1) /* valid/present bit for 'csum_info' */
+#define NVSC_RSC_HASH_INFO BIT(2) /* valid/present bit for 'hash_info' */
+
struct netvsc_stats {
u64 packets;
u64 bytes;
@@ -1002,6 +1042,7 @@ struct net_device_context {
struct netvsc_channel {
struct vmbus_channel *channel;
struct netvsc_device *net_device;
+ void *recv_buf; /* buffer to copy packets out from the receive buffer */
const struct vmpacket_descriptor *desc;
struct napi_struct napi;
struct multi_send_data msd;
@@ -1234,18 +1275,6 @@ struct rndis_pktinfo_id {
u16 pkt_id;
};
-struct ndis_pkt_8021q_info {
- union {
- struct {
- u32 pri:3; /* User Priority */
- u32 cfi:1; /* Canonical Format ID */
- u32 vlanid:12; /* VLAN ID */
- u32 reserved:16;
- };
- u32 value;
- };
-};
-
struct ndis_object_header {
u8 type;
u8 revision;
@@ -1436,32 +1465,6 @@ struct ndis_offload_params {
};
};
-struct ndis_tcp_ip_checksum_info {
- union {
- struct {
- u32 is_ipv4:1;
- u32 is_ipv6:1;
- u32 tcp_checksum:1;
- u32 udp_checksum:1;
- u32 ip_header_checksum:1;
- u32 reserved:11;
- u32 tcp_header_offset:10;
- } transmit;
- struct {
- u32 tcp_checksum_failed:1;
- u32 udp_checksum_failed:1;
- u32 ip_checksum_failed:1;
- u32 tcp_checksum_succeeded:1;
- u32 udp_checksum_succeeded:1;
- u32 ip_checksum_succeeded:1;
- u32 loopback:1;
- u32 tcp_checksum_value_invalid:1;
- u32 ip_checksum_value_invalid:1;
- } receive;
- u32 value;
- };
-};
-
struct ndis_tcp_lso_info {
union {
struct {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2350342b961f..0fba8257fc11 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -37,6 +37,10 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
+ /* Block sending traffic to VF if it's about to be gone */
+ if (!vf)
+ net_device_ctx->data_path_is_vf = vf;
+
memset(init_pkt, 0, sizeof(struct nvsp_message));
init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
if (vf)
@@ -50,8 +54,11 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
vmbus_sendpacket(dev->channel, init_pkt,
sizeof(struct nvsp_message),
- VMBUS_RQST_ID_NO_RESPONSE,
- VM_PKT_DATA_INBAND, 0);
+ (unsigned long)init_pkt,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ wait_for_completion(&nv_dev->channel_init_wait);
+ net_device_ctx->data_path_is_vf = vf;
}
/* Worker to setup sub channels on initial setup
@@ -124,6 +131,7 @@ static void free_netvsc_device(struct rcu_head *head)
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
+ kfree(nvdev->chan_table[i].recv_buf);
vfree(nvdev->chan_table[i].mrc.slots);
}
@@ -754,8 +762,31 @@ static void netvsc_send_completion(struct net_device *ndev,
const struct vmpacket_descriptor *desc,
int budget)
{
- const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
+ const struct nvsp_message *nvsp_packet;
u32 msglen = hv_pkt_datalen(desc);
+ struct nvsp_message *pkt_rqst;
+ u64 cmd_rqst;
+
+ /* First check if this is a VMBUS completion without data payload */
+ if (!msglen) {
+ cmd_rqst = vmbus_request_addr(&incoming_channel->requestor,
+ (u64)desc->trans_id);
+ if (cmd_rqst == VMBUS_RQST_ERROR) {
+ netdev_err(ndev, "Invalid transaction id\n");
+ return;
+ }
+
+ pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
+ switch (pkt_rqst->hdr.msg_type) {
+ case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
+ complete(&net_device->channel_init_wait);
+ break;
+
+ default:
+ netdev_err(ndev, "Unexpected VMBUS completion!!\n");
+ }
+ return;
+ }
/* Ensure packet is big enough to read header fields */
if (msglen < sizeof(struct nvsp_message_header)) {
@@ -763,6 +794,7 @@ static void netvsc_send_completion(struct net_device *ndev,
return;
}
+ nvsp_packet = hv_pkt_data(desc);
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
@@ -887,6 +919,7 @@ static inline int netvsc_send_pkt(
int ret;
u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
+ memset(&nvmsg, 0, sizeof(struct nvsp_message));
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (skb)
rpkt->channel_type = 0; /* 0 is RMC_DATA */
@@ -1252,6 +1285,19 @@ static int netvsc_receive(struct net_device *ndev,
continue;
}
+ /* We're going to copy (sections of) the packet into nvchan->recv_buf;
+ * make sure that nvchan->recv_buf is large enough to hold the packet.
+ */
+ if (unlikely(buflen > net_device->recv_section_size)) {
+ nvchan->rsc.cnt = 0;
+ status = NVSP_STAT_FAIL;
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Packet too big: buflen=%u recv_section_size=%u\n",
+ buflen, net_device->recv_section_size);
+
+ continue;
+ }
+
data = recv_buf + offset;
nvchan->rsc.is_last = (i == count - 1);
@@ -1306,7 +1352,7 @@ static void netvsc_send_table(struct net_device *ndev,
sizeof(union nvsp_6_message_uber);
/* Boundary check for all versions */
- if (offset > msglen - count * sizeof(u32)) {
+ if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
netdev_err(ndev, "Received send-table offset too big:%u\n",
offset);
return;
@@ -1503,6 +1549,12 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
struct netvsc_channel *nvchan = &net_device->chan_table[i];
+ nvchan->recv_buf = kzalloc(device_info->recv_section_size, GFP_KERNEL);
+ if (nvchan->recv_buf == NULL) {
+ ret = -ENOMEM;
+ goto cleanup2;
+ }
+
nvchan->channel = device->channel;
nvchan->net_device = net_device;
u64_stats_init(&nvchan->tx_stats.syncp);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 440486d9c999..aa877da113f8 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -37,6 +37,12 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
if (!prog)
goto out;
+ /* Ensure that the below memcpy() won't overflow the page buffer. */
+ if (len > ndev->mtu + ETH_HLEN) {
+ act = XDP_DROP;
+ goto out;
+ }
+
/* allocate page buffer for data */
page = alloc_page(GFP_ATOMIC);
if (!page) {
@@ -44,12 +50,8 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
goto out;
}
- xdp->data_hard_start = page_address(page);
- xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &nvchan->xdp_rxq;
- xdp->frame_sz = PAGE_SIZE;
+ xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
+ xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
memcpy(xdp->data, data, len);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f32f28311d57..8176fa0c8b16 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -539,7 +539,8 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
*/
vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
if (vf_netdev && netif_running(vf_netdev) &&
- netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net) &&
+ net_device_ctx->data_path_is_vf)
return netvsc_vf_xmit(net, vf_netdev, skb);
/* We will atmost need two pages to describe the rndis
@@ -742,7 +743,8 @@ static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
* netvsc_linkstatus_callback - Link up/down notification
*/
void netvsc_linkstatus_callback(struct net_device *net,
- struct rndis_message *resp)
+ struct rndis_message *resp,
+ void *data)
{
struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -756,12 +758,24 @@ void netvsc_linkstatus_callback(struct net_device *net,
return;
}
+ /* Copy the RNDIS indicate status into nvchan->recv_buf */
+ memcpy(indicate, data + RNDIS_HEADER_SIZE, sizeof(*indicate));
+
/* Update the physical link speed when changing to another vSwitch */
if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
u32 speed;
- speed = *(u32 *)((void *)indicate
- + indicate->status_buf_offset) / 10000;
+ /* Validate status_buf_offset */
+ if (indicate->status_buflen < sizeof(speed) ||
+ indicate->status_buf_offset < sizeof(*indicate) ||
+ resp->msg_len - RNDIS_HEADER_SIZE < indicate->status_buf_offset ||
+ resp->msg_len - RNDIS_HEADER_SIZE - indicate->status_buf_offset
+ < indicate->status_buflen) {
+ netdev_err(net, "invalid rndis_indicate_status packet\n");
+ return;
+ }
+
+ speed = *(u32 *)(data + RNDIS_HEADER_SIZE + indicate->status_buf_offset) / 10000;
ndev_ctx->speed = speed;
return;
}
@@ -816,10 +830,11 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct xdp_buff *xdp)
{
struct napi_struct *napi = &nvchan->napi;
- const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
+ const struct ndis_pkt_8021q_info *vlan = &nvchan->rsc.vlan;
const struct ndis_tcp_ip_checksum_info *csum_info =
- nvchan->rsc.csum_info;
- const u32 *hash_info = nvchan->rsc.hash_info;
+ &nvchan->rsc.csum_info;
+ const u32 *hash_info = &nvchan->rsc.hash_info;
+ u8 ppi_flags = nvchan->rsc.ppi_flags;
struct sk_buff *skb;
void *xbuf = xdp->data_hard_start;
int i;
@@ -863,22 +878,28 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* We compute it here if the flags are set, because on Linux, the IP
* checksum is always checked.
*/
- if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
+ if ((ppi_flags & NVSC_RSC_CSUM_INFO) && csum_info->receive.ip_checksum_value_invalid &&
csum_info->receive.ip_checksum_succeeded &&
- skb->protocol == htons(ETH_P_IP))
+ skb->protocol == htons(ETH_P_IP)) {
+ /* Check that there is enough space to hold the IP header. */
+ if (skb_headlen(skb) < sizeof(struct iphdr)) {
+ kfree_skb(skb);
+ return NULL;
+ }
netvsc_comp_ipcsum(skb);
+ }
/* Do L4 checksum offload if enabled and present. */
- if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if ((ppi_flags & NVSC_RSC_CSUM_INFO) && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- if (hash_info && (net->features & NETIF_F_RXHASH))
+ if ((ppi_flags & NVSC_RSC_HASH_INFO) && (net->features & NETIF_F_RXHASH))
skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
- if (vlan) {
+ if (ppi_flags & NVSC_RSC_VLAN) {
u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
(vlan->cfi ? VLAN_CFI_MASK : 0);
@@ -2381,12 +2402,15 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
* interface, there is only the CHANGE event and no UP or DOWN event.
*/
-static int netvsc_vf_changed(struct net_device *vf_netdev)
+static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
struct net_device *ndev;
- bool vf_is_up = netif_running(vf_netdev);
+ bool vf_is_up = false;
+
+ if (event != NETDEV_GOING_DOWN)
+ vf_is_up = netif_running(vf_netdev);
ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
@@ -2399,7 +2423,6 @@ static int netvsc_vf_changed(struct net_device *vf_netdev)
if (net_device_ctx->data_path_is_vf == vf_is_up)
return NOTIFY_OK;
- net_device_ctx->data_path_is_vf = vf_is_up;
netvsc_switch_datapath(ndev, vf_is_up);
netdev_info(ndev, "Data path switched %s VF: %s\n",
@@ -2716,7 +2739,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
case NETDEV_UP:
case NETDEV_DOWN:
case NETDEV_CHANGE:
- return netvsc_vf_changed(event_dev);
+ case NETDEV_GOING_DOWN:
+ return netvsc_vf_changed(event_dev, event);
default:
return NOTIFY_DONE;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 598713c0d5a8..6c48a4d62736 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -127,70 +127,89 @@ static void put_rndis_request(struct rndis_device *dev,
}
static void dump_rndis_message(struct net_device *netdev,
- const struct rndis_message *rndis_msg)
+ const struct rndis_message *rndis_msg,
+ const void *data)
{
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
- netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
- "data offset %u data len %u, # oob %u, "
- "oob offset %u, oob len %u, pkt offset %u, "
- "pkt len %u\n",
- rndis_msg->msg_len,
- rndis_msg->msg.pkt.data_offset,
- rndis_msg->msg.pkt.data_len,
- rndis_msg->msg.pkt.num_oob_data_elements,
- rndis_msg->msg.pkt.oob_data_offset,
- rndis_msg->msg.pkt.oob_data_len,
- rndis_msg->msg.pkt.per_pkt_info_offset,
- rndis_msg->msg.pkt.per_pkt_info_len);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
+ const struct rndis_packet *pkt = data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
+ "data offset %u data len %u, # oob %u, "
+ "oob offset %u, oob len %u, pkt offset %u, "
+ "pkt len %u\n",
+ rndis_msg->msg_len,
+ pkt->data_offset,
+ pkt->data_len,
+ pkt->num_oob_data_elements,
+ pkt->oob_data_offset,
+ pkt->oob_data_len,
+ pkt->per_pkt_info_offset,
+ pkt->per_pkt_info_len);
+ }
break;
case RNDIS_MSG_INIT_C:
- netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
- "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
- "device flags %d, max xfer size 0x%x, max pkts %u, "
- "pkt aligned %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.init_complete.req_id,
- rndis_msg->msg.init_complete.status,
- rndis_msg->msg.init_complete.major_ver,
- rndis_msg->msg.init_complete.minor_ver,
- rndis_msg->msg.init_complete.dev_flags,
- rndis_msg->msg.init_complete.max_xfer_size,
- rndis_msg->msg.init_complete.
- max_pkt_per_msg,
- rndis_msg->msg.init_complete.
- pkt_alignment_factor);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_initialize_complete)) {
+ const struct rndis_initialize_complete *init_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
+ "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
+ "device flags %d, max xfer size 0x%x, max pkts %u, "
+ "pkt aligned %u)\n",
+ rndis_msg->msg_len,
+ init_complete->req_id,
+ init_complete->status,
+ init_complete->major_ver,
+ init_complete->minor_ver,
+ init_complete->dev_flags,
+ init_complete->max_xfer_size,
+ init_complete->max_pkt_per_msg,
+ init_complete->pkt_alignment_factor);
+ }
break;
case RNDIS_MSG_QUERY_C:
- netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
- "(len %u, id 0x%x, status 0x%x, buf len %u, "
- "buf offset %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.query_complete.req_id,
- rndis_msg->msg.query_complete.status,
- rndis_msg->msg.query_complete.
- info_buflen,
- rndis_msg->msg.query_complete.
- info_buf_offset);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_query_complete)) {
+ const struct rndis_query_complete *query_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
+ "(len %u, id 0x%x, status 0x%x, buf len %u, "
+ "buf offset %u)\n",
+ rndis_msg->msg_len,
+ query_complete->req_id,
+ query_complete->status,
+ query_complete->info_buflen,
+ query_complete->info_buf_offset);
+ }
break;
case RNDIS_MSG_SET_C:
- netdev_dbg(netdev,
- "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.set_complete.req_id,
- rndis_msg->msg.set_complete.status);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
+ const struct rndis_set_complete *set_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev,
+ "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
+ rndis_msg->msg_len,
+ set_complete->req_id,
+ set_complete->status);
+ }
break;
case RNDIS_MSG_INDICATE:
- netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
- "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.indicate_status.status,
- rndis_msg->msg.indicate_status.status_buflen,
- rndis_msg->msg.indicate_status.status_buf_offset);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_indicate_status)) {
+ const struct rndis_indicate_status *indicate_status =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
+ "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
+ rndis_msg->msg_len,
+ indicate_status->status,
+ indicate_status->status_buflen,
+ indicate_status->status_buf_offset);
+ }
break;
default:
@@ -246,11 +265,20 @@ static void rndis_set_link_state(struct rndis_device *rdev,
{
u32 link_status;
struct rndis_query_complete *query_complete;
+ u32 msg_len = request->response_msg.msg_len;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
+ return;
query_complete = &request->response_msg.msg.query_complete;
if (query_complete->status == RNDIS_STATUS_SUCCESS &&
- query_complete->info_buflen == sizeof(u32)) {
+ query_complete->info_buflen >= sizeof(u32) &&
+ query_complete->info_buf_offset >= sizeof(*query_complete) &&
+ msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
+ msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
+ >= query_complete->info_buflen) {
memcpy(&link_status, (void *)((unsigned long)query_complete +
query_complete->info_buf_offset), sizeof(u32));
rdev->link_state = link_status != 0;
@@ -259,8 +287,10 @@ static void rndis_set_link_state(struct rndis_device *rdev,
static void rndis_filter_receive_response(struct net_device *ndev,
struct netvsc_device *nvdev,
- const struct rndis_message *resp)
+ struct rndis_message *resp,
+ void *data)
{
+ u32 *req_id = &resp->msg.init_complete.req_id;
struct rndis_device *dev = nvdev->extension;
struct rndis_request *request = NULL;
bool found = false;
@@ -285,14 +315,16 @@ static void rndis_filter_receive_response(struct net_device *ndev,
return;
}
+ /* Copy the request ID into nvchan->recv_buf */
+ *req_id = *(u32 *)(data + RNDIS_HEADER_SIZE);
+
spin_lock_irqsave(&dev->request_lock, flags);
list_for_each_entry(request, &dev->req_list, list_ent) {
/*
* All request/response message contains RequestId as the 1st
* field
*/
- if (request->request_msg.msg.init_req.req_id
- == resp->msg.init_complete.req_id) {
+ if (request->request_msg.msg.init_req.req_id == *req_id) {
found = true;
break;
}
@@ -302,8 +334,10 @@ static void rndis_filter_receive_response(struct net_device *ndev,
if (found) {
if (resp->msg_len <=
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
- memcpy(&request->response_msg, resp,
- resp->msg_len);
+ memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
+ memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ data + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id));
if (request->request_msg.ndis_msg_type ==
RNDIS_MSG_QUERY && request->request_msg.msg.
query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
@@ -332,7 +366,7 @@ static void rndis_filter_receive_response(struct net_device *ndev,
netdev_err(ndev,
"no rndis request found for this response "
"(id 0x%x res type 0x%x)\n",
- resp->msg.init_complete.req_id,
+ *req_id,
resp->ndis_msg_type);
}
}
@@ -343,7 +377,8 @@ static void rndis_filter_receive_response(struct net_device *ndev,
*/
static inline void *rndis_get_ppi(struct net_device *ndev,
struct rndis_packet *rpkt,
- u32 rpkt_len, u32 type, u8 internal)
+ u32 rpkt_len, u32 type, u8 internal,
+ u32 ppi_size, void *data)
{
struct rndis_per_packet_info *ppi;
int len;
@@ -359,7 +394,8 @@ static inline void *rndis_get_ppi(struct net_device *ndev,
return NULL;
}
- if (rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
+ if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
+ rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
rpkt->per_pkt_info_len);
return NULL;
@@ -367,6 +403,8 @@ static inline void *rndis_get_ppi(struct net_device *ndev,
ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
rpkt->per_pkt_info_offset);
+ /* Copy the PPIs into nvchan->recv_buf */
+ memcpy(ppi, data + RNDIS_HEADER_SIZE + rpkt->per_pkt_info_offset, rpkt->per_pkt_info_len);
len = rpkt->per_pkt_info_len;
while (len > 0) {
@@ -381,8 +419,15 @@ static inline void *rndis_get_ppi(struct net_device *ndev,
continue;
}
- if (ppi->type == type && ppi->internal == internal)
+ if (ppi->type == type && ppi->internal == internal) {
+ /* ppi->size should be big enough to hold the returned object. */
+ if (ppi->size - ppi->ppi_offset < ppi_size) {
+ netdev_err(ndev, "Invalid ppi: size %u ppi_offset %u\n",
+ ppi->size, ppi->ppi_offset);
+ continue;
+ }
return (void *)((ulong)ppi + ppi->ppi_offset);
+ }
len -= ppi->size;
ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
}
@@ -402,10 +447,29 @@ void rsc_add_data(struct netvsc_channel *nvchan,
if (cnt) {
nvchan->rsc.pktlen += len;
} else {
- nvchan->rsc.vlan = vlan;
- nvchan->rsc.csum_info = csum_info;
+ /* The data/values pointed by vlan, csum_info and hash_info are shared
+ * across the different 'fragments' of the RSC packet; store them into
+ * the packet itself.
+ */
+ if (vlan != NULL) {
+ memcpy(&nvchan->rsc.vlan, vlan, sizeof(*vlan));
+ nvchan->rsc.ppi_flags |= NVSC_RSC_VLAN;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_VLAN;
+ }
+ if (csum_info != NULL) {
+ memcpy(&nvchan->rsc.csum_info, csum_info, sizeof(*csum_info));
+ nvchan->rsc.ppi_flags |= NVSC_RSC_CSUM_INFO;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_CSUM_INFO;
+ }
nvchan->rsc.pktlen = len;
- nvchan->rsc.hash_info = hash_info;
+ if (hash_info != NULL) {
+ nvchan->rsc.csum_info = *csum_info;
+ nvchan->rsc.ppi_flags |= NVSC_RSC_HASH_INFO;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_HASH_INFO;
+ }
}
nvchan->rsc.data[cnt] = data;
@@ -417,7 +481,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_channel *nvchan,
struct rndis_message *msg,
- u32 data_buflen)
+ void *data, u32 data_buflen)
{
struct rndis_packet *rndis_pkt = &msg->msg.pkt;
const struct ndis_tcp_ip_checksum_info *csum_info;
@@ -425,7 +489,6 @@ static int rndis_filter_receive_data(struct net_device *ndev,
const struct rndis_pktinfo_id *pktinfo_id;
const u32 *hash_info;
u32 data_offset, rpkt_len;
- void *data;
bool rsc_more = false;
int ret;
@@ -436,6 +499,9 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return NVSP_STAT_FAIL;
}
+ /* Copy the RNDIS packet into nvchan->recv_buf */
+ memcpy(rndis_pkt, data + RNDIS_HEADER_SIZE, sizeof(*rndis_pkt));
+
/* Validate rndis_pkt offset */
if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
netdev_err(ndev, "invalid rndis packet offset: %u\n",
@@ -461,15 +527,17 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return NVSP_STAT_FAIL;
}
- vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0);
-
- csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0);
+ vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0, sizeof(*vlan),
+ data);
- hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0);
+ csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0,
+ sizeof(*csum_info), data);
- pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1);
+ hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0,
+ sizeof(*hash_info), data);
- data = (void *)msg + data_offset;
+ pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1,
+ sizeof(*pktinfo_id), data);
/* Identify RSC frags, drop erroneous packets */
if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
@@ -498,7 +566,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
* the data packet to the stack, without the rndis trailer padding
*/
rsc_add_data(nvchan, vlan, csum_info, hash_info,
- data, rndis_pkt->data_len);
+ data + data_offset, rndis_pkt->data_len);
if (rsc_more)
return NVSP_STAT_SUCCESS;
@@ -520,33 +588,41 @@ int rndis_filter_receive(struct net_device *ndev,
void *data, u32 buflen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct rndis_message *rndis_msg = data;
+ struct rndis_message *rndis_msg = nvchan->recv_buf;
- if (netif_msg_rx_status(net_device_ctx))
- dump_rndis_message(ndev, rndis_msg);
+ if (buflen < RNDIS_HEADER_SIZE) {
+ netdev_err(ndev, "Invalid rndis_msg (buflen: %u)\n", buflen);
+ return NVSP_STAT_FAIL;
+ }
+
+ /* Copy the RNDIS msg header into nvchan->recv_buf */
+ memcpy(rndis_msg, data, RNDIS_HEADER_SIZE);
/* Validate incoming rndis_message packet */
- if (buflen < RNDIS_HEADER_SIZE || rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
+ if (rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
buflen < rndis_msg->msg_len) {
netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
buflen, rndis_msg->msg_len);
return NVSP_STAT_FAIL;
}
+ if (netif_msg_rx_status(net_device_ctx))
+ dump_rndis_message(ndev, rndis_msg, data);
+
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
return rndis_filter_receive_data(ndev, net_dev, nvchan,
- rndis_msg, buflen);
+ rndis_msg, data, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
case RNDIS_MSG_SET_C:
/* completion msgs */
- rndis_filter_receive_response(ndev, net_dev, rndis_msg);
+ rndis_filter_receive_response(ndev, net_dev, rndis_msg, data);
break;
case RNDIS_MSG_INDICATE:
/* notification msgs */
- netvsc_linkstatus_callback(ndev, rndis_msg);
+ netvsc_linkstatus_callback(ndev, rndis_msg, data);
break;
default:
netdev_err(ndev,
@@ -567,6 +643,7 @@ static int rndis_filter_query_device(struct rndis_device *dev,
u32 inresult_size = *result_size;
struct rndis_query_request *query;
struct rndis_query_complete *query_complete;
+ u32 msg_len;
int ret = 0;
if (!result)
@@ -634,8 +711,19 @@ static int rndis_filter_query_device(struct rndis_device *dev,
/* Copy the response back */
query_complete = &request->response_msg.msg.query_complete;
+ msg_len = request->response_msg.msg_len;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
+ ret = -1;
+ goto cleanup;
+ }
- if (query_complete->info_buflen > inresult_size) {
+ if (query_complete->info_buflen > inresult_size ||
+ query_complete->info_buf_offset < sizeof(*query_complete) ||
+ msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
+ msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
+ < query_complete->info_buflen) {
ret = -1;
goto cleanup;
}
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index 9f0d2a93379c..b68f1289b89e 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -1,9 +1,10 @@
config QCOM_IPA
tristate "Qualcomm IPA support"
- depends on ARCH_QCOM && 64BIT && NET
- depends on QCOM_Q6V5_MSS
+ depends on 64BIT && NET && QCOM_SMEM
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_QMI_HELPERS
- select QCOM_MDT_LOADER
help
Choose Y or M here to include support for the Qualcomm
IP Accelerator (IPA), a hardware block present in some
@@ -11,7 +12,8 @@ config QCOM_IPA
that is capable of generic hardware handling of IP packets,
including routing, filtering, and NAT. Currently the IPA
driver supports only basic transport of network traffic
- between the AP and modem, on the Qualcomm SDM845 SoC.
+ between the AP and modem, on the Qualcomm SDM845 and SC7180
+ SoCs.
Note that if selected, the selection type must match that
of QCOM_Q6V5_COMMON (Y or M).
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index c4795249719d..f79cf3c327c1 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -89,9 +89,9 @@
/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
-#define GSI_CMD_TIMEOUT 5 /* seconds */
+#define GSI_CMD_TIMEOUT 50 /* milliseconds */
-#define GSI_CHANNEL_STOP_RX_RETRIES 10
+#define GSI_CHANNEL_STOP_RETRIES 10
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
@@ -220,7 +220,59 @@ static void gsi_irq_teardown(struct gsi *gsi)
/* Nothing to do */
}
-static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
+/* Event ring commands are performed one at a time. Their completion
+ * is signaled by the event ring control GSI interrupt type, which is
+ * only enabled when we issue an event ring command. Only the event
+ * ring being operated on has this interrupt enabled.
+ */
+static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val = BIT(evt_ring_id);
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+}
+
+/* Disable event ring control interrupts */
+static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
+{
+ gsi_irq_type_disable(gsi, GSI_EV_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+}
+
+/* Channel commands are performed one at a time. Their completion is
+ * signaled by the channel control GSI interrupt type, which is only
+ * enabled when we issue a channel command. Only the channel being
+ * operated on has this interrupt enabled.
+ */
+static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
+{
+ u32 val = BIT(channel_id);
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+}
+
+/* Disable channel control interrupts */
+static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
+{
+ gsi_irq_type_disable(gsi, GSI_CH_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+}
+
+static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
{
bool enable_ieob = !gsi->ieob_enabled_bitmap;
u32 val;
@@ -234,11 +286,11 @@ static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
gsi_irq_type_enable(gsi, GSI_IEOB);
}
-static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
+static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
{
u32 val;
- gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
+ gsi->ieob_enabled_bitmap &= ~event_mask;
/* Disable the interrupt type if this was the last enabled channel */
if (!gsi->ieob_enabled_bitmap)
@@ -248,6 +300,11 @@ static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
}
+static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
+{
+ gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
+}
+
/* Enable all GSI_interrupt types */
static void gsi_irq_enable(struct gsi *gsi)
{
@@ -307,11 +364,13 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
static bool
gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
{
+ unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
+
reinit_completion(completion);
iowrite32(val, gsi->virt + reg);
- return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
+ return !!wait_for_completion_timeout(completion, timeout);
}
/* Return the hardware's notion of the current state of an event ring */
@@ -326,48 +385,36 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
}
/* Issue an event ring command and wait for it to complete */
-static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
- enum gsi_evt_cmd_opcode opcode)
+static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
- bool success;
+ bool timeout;
u32 val;
- /* We only perform one event ring command at a time, and event
- * control interrupts should only occur when such a command
- * is issued here. Only permit *this* event ring to trigger
- * an interrupt, and only enable the event control IRQ type
- * when we expect it to occur.
- */
- val = BIT(evt_ring_id);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
- /* Disable the interrupt again */
- gsi_irq_type_disable(gsi, GSI_EV_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_ev_ctrl_disable(gsi);
- if (success)
- return 0;
+ if (!timeout)
+ return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state);
-
- return -ETIMEDOUT;
}
/* Allocate an event ring in NOT_ALLOCATED state */
static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
@@ -377,14 +424,16 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return -EINVAL;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
- evt_ring_id, evt_ring->state);
- ret = -EIO;
- }
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- return ret;
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ return 0;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+ evt_ring_id, evt_ring->state);
+
+ return -EIO;
}
/* Reset a GSI event ring in ALLOCATED or ERROR state. */
@@ -392,7 +441,6 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
enum gsi_evt_ring_state state = evt_ring->state;
- int ret;
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
@@ -401,17 +449,20 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
- evt_ring_id, evt_ring->state);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+ evt_ring_id, evt_ring->state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
@@ -419,10 +470,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
- evt_ring_id, evt_ring->state);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+ evt_ring_id, evt_ring->state);
}
/* Fetch the current state of a channel from hardware */
@@ -438,41 +493,30 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
}
/* Issue a channel command and wait for it to complete */
-static int
+static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
- bool success;
+ bool timeout;
u32 val;
- /* We only perform one channel command at a time, and channel
- * control interrupts should only occur when such a command is
- * issued here. So we only permit *this* channel to trigger
- * an interrupt and only enable the channel control IRQ type
- * when we expect it to occur.
- */
- val = BIT(channel_id);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ch_ctrl_enable(gsi, channel_id);
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
- /* Disable the interrupt again */
- gsi_irq_type_disable(gsi, GSI_CH_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_ch_ctrl_disable(gsi);
- if (success)
- return 0;
+ if (!timeout)
+ return;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
-
- return -ETIMEDOUT;
}
/* Allocate GSI channel in NOT_ALLOCATED state */
@@ -481,7 +525,6 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
/* Get initial channel state */
state = gsi_channel_state(channel);
@@ -491,17 +534,17 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+ gsi_channel_command(channel, GSI_CH_ALLOCATE);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(dev, "channel %u bad state %u after alloc\n",
- channel_id, state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_ALLOCATED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after alloc\n",
+ channel_id, state);
+
+ return -EIO;
}
/* Start an ALLOCATED channel */
@@ -509,7 +552,6 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
@@ -519,17 +561,17 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_START);
+ gsi_channel_command(channel, GSI_CH_START);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
- dev_err(dev, "channel %u bad state %u after start\n",
- gsi_channel_id(channel), state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_STARTED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after start\n",
+ gsi_channel_id(channel), state);
+
+ return -EIO;
}
/* Stop a GSI channel in STARTED state */
@@ -537,7 +579,6 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
@@ -554,12 +595,12 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_STOP);
+ gsi_channel_command(channel, GSI_CH_STOP);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (ret || state == GSI_CHANNEL_STATE_STOPPED)
- return ret;
+ if (state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
/* We may have to try again if stop is in progress */
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
@@ -576,9 +617,9 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
- msleep(1); /* A short delay is required before a RESET command */
+ /* A short delay is required before a RESET command */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
@@ -590,11 +631,11 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_RESET);
+ gsi_channel_command(channel, GSI_CH_RESET);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
+ if (state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(dev, "channel %u bad state %u after reset\n",
gsi_channel_id(channel), state);
}
@@ -605,7 +646,6 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
@@ -614,11 +654,12 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+ gsi_channel_command(channel, GSI_CH_DE_ALLOC);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
dev_err(dev, "channel %u bad state %u after dealloc\n",
channel_id, state);
}
@@ -730,13 +771,13 @@ static void gsi_channel_freeze(struct gsi_channel *channel)
napi_disable(&channel->napi);
- gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
+ gsi_irq_ieob_disable_one(channel->gsi, channel->evt_ring_id);
}
/* Allow transactions to be used on the channel again. */
static void gsi_channel_thaw(struct gsi_channel *channel)
{
- gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
+ gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
napi_enable(&channel->napi);
}
@@ -853,21 +894,18 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- u32 retries;
+ u32 retries = GSI_CHANNEL_STOP_RETRIES;
int ret;
gsi_channel_freeze(channel);
- /* RX channels might require a little time to enter STOPPED state */
- retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
-
mutex_lock(&gsi->mutex);
do {
ret = gsi_channel_stop_command(channel);
if (ret != -EAGAIN)
break;
- msleep(1);
+ usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
} while (retries--);
mutex_unlock(&gsi->mutex);
@@ -1167,6 +1205,7 @@ static void gsi_isr_ieob(struct gsi *gsi)
u32 event_mask;
event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
+ gsi_irq_ieob_disable(gsi, event_mask);
iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
while (event_mask) {
@@ -1174,7 +1213,6 @@ static void gsi_isr_ieob(struct gsi *gsi)
event_mask ^= BIT(evt_ring_id);
- gsi_irq_ieob_disable(gsi, evt_ring_id);
napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
}
}
@@ -1419,7 +1457,7 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
}
/* Consult hardware, move any newly completed transactions to completed list */
-static void gsi_channel_update(struct gsi_channel *channel)
+static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
@@ -1438,7 +1476,7 @@ static void gsi_channel_update(struct gsi_channel *channel)
offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
- return;
+ return NULL;
/* Get the transaction for the latest completed event. Take a
* reference to keep it from completing before we give the events
@@ -1463,6 +1501,8 @@ static void gsi_channel_update(struct gsi_channel *channel)
gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
gsi_trans_free(trans);
+
+ return gsi_channel_trans_complete(channel);
}
/**
@@ -1483,11 +1523,8 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
/* Get the first transaction from the completed list */
trans = gsi_channel_trans_complete(channel);
- if (!trans) {
- /* List is empty; see if there's more to do */
- gsi_channel_update(channel);
- trans = gsi_channel_trans_complete(channel);
- }
+ if (!trans) /* List is empty; see if there's more to do */
+ trans = gsi_channel_update(channel);
if (trans)
gsi_trans_move_polled(trans);
@@ -1510,23 +1547,20 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
static int gsi_channel_poll(struct napi_struct *napi, int budget)
{
struct gsi_channel *channel;
- int count = 0;
+ int count;
channel = container_of(napi, struct gsi_channel, napi);
- while (count < budget) {
+ for (count = 0; count < budget; count++) {
struct gsi_trans *trans;
- count++;
trans = gsi_channel_poll_one(channel);
if (!trans)
break;
gsi_trans_complete(trans);
}
- if (count < budget) {
- napi_complete(&channel->napi);
- gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
- }
+ if (count < budget && napi_complete(napi))
+ gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
return count;
}
@@ -1616,7 +1650,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode)
{
struct completion *completion = &gsi->completion;
- bool success;
+ bool timeout;
u32 val;
/* The error global interrupt type is always enabled (until we
@@ -1639,12 +1673,12 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
/* Disable the GP_INT1 IRQ type again */
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- if (success)
+ if (!timeout)
return gsi->result;
dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 4d4606b5fa95..3a4ab8a94d82 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -13,6 +13,7 @@
#include "ipa_cmd.h"
+struct page;
struct scatterlist;
struct device;
struct sk_buff;
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 6c2371084c55..802077631371 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -43,7 +43,7 @@ enum ipa_flag {
* @flags: Boolean state flags
* @version: IPA hardware version
* @pdev: Platform device
- * @modem_rproc: Remoteproc handle for modem subsystem
+ * @completion: Used to signal pipeline clear transfer complete
* @smp2p: SMP2P information
* @clock: IPA clocking information
* @table_addr: DMA address of filter/route table content
@@ -83,7 +83,7 @@ struct ipa {
DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
enum ipa_version version;
struct platform_device *pdev;
- struct rproc *modem_rproc;
+ struct completion completion;
struct notifier_block nb;
void *notifier;
struct ipa_smp2p *smp2p;
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 9dcf16f399b7..354675a643db 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -31,142 +31,154 @@
*/
/**
+ * struct ipa_interconnect - IPA interconnect information
+ * @path: Interconnect path
+ * @average_bandwidth: Average interconnect bandwidth (KB/second)
+ * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
+ */
+struct ipa_interconnect {
+ struct icc_path *path;
+ u32 average_bandwidth;
+ u32 peak_bandwidth;
+};
+
+/**
* struct ipa_clock - IPA clocking information
* @count: Clocking reference count
* @mutex: Protects clock enable/disable
* @core: IPA core clock
- * @memory_path: Memory interconnect
- * @imem_path: Internal memory interconnect
- * @config_path: Configuration space interconnect
- * @interconnect_data: Interconnect configuration data
+ * @interconnect_count: Number of elements in interconnect[]
+ * @interconnect: Interconnect array
*/
struct ipa_clock {
refcount_t count;
struct mutex mutex; /* protects clock enable/disable */
struct clk *core;
- struct icc_path *memory_path;
- struct icc_path *imem_path;
- struct icc_path *config_path;
- const struct ipa_interconnect_data *interconnect_data;
+ u32 interconnect_count;
+ struct ipa_interconnect *interconnect;
};
-static struct icc_path *
-ipa_interconnect_init_one(struct device *dev, const char *name)
+static int ipa_interconnect_init_one(struct device *dev,
+ struct ipa_interconnect *interconnect,
+ const struct ipa_interconnect_data *data)
{
struct icc_path *path;
- path = of_icc_get(dev, name);
- if (IS_ERR(path))
- dev_err(dev, "error %ld getting %s interconnect\n",
- PTR_ERR(path), name);
+ path = of_icc_get(dev, data->name);
+ if (IS_ERR(path)) {
+ int ret = PTR_ERR(path);
+
+ dev_err(dev, "error %d getting %s interconnect\n", ret,
+ data->name);
- return path;
+ return ret;
+ }
+
+ interconnect->path = path;
+ interconnect->average_bandwidth = data->average_bandwidth;
+ interconnect->peak_bandwidth = data->peak_bandwidth;
+
+ return 0;
}
-/* Initialize interconnects required for IPA operation */
-static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev)
+static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
{
- struct icc_path *path;
-
- path = ipa_interconnect_init_one(dev, "memory");
- if (IS_ERR(path))
- goto err_return;
- clock->memory_path = path;
+ icc_put(interconnect->path);
+ memset(interconnect, 0, sizeof(*interconnect));
+}
- path = ipa_interconnect_init_one(dev, "imem");
- if (IS_ERR(path))
- goto err_memory_path_put;
- clock->imem_path = path;
+/* Initialize interconnects required for IPA operation */
+static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev,
+ const struct ipa_interconnect_data *data)
+{
+ struct ipa_interconnect *interconnect;
+ u32 count;
+ int ret;
- path = ipa_interconnect_init_one(dev, "config");
- if (IS_ERR(path))
- goto err_imem_path_put;
- clock->config_path = path;
+ count = clock->interconnect_count;
+ interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
+ if (!interconnect)
+ return -ENOMEM;
+ clock->interconnect = interconnect;
+
+ while (count--) {
+ ret = ipa_interconnect_init_one(dev, interconnect, data++);
+ if (ret)
+ goto out_unwind;
+ interconnect++;
+ }
return 0;
-err_imem_path_put:
- icc_put(clock->imem_path);
-err_memory_path_put:
- icc_put(clock->memory_path);
-err_return:
- return PTR_ERR(path);
+out_unwind:
+ while (interconnect-- > clock->interconnect)
+ ipa_interconnect_exit_one(interconnect);
+ kfree(clock->interconnect);
+ clock->interconnect = NULL;
+
+ return ret;
}
/* Inverse of ipa_interconnect_init() */
static void ipa_interconnect_exit(struct ipa_clock *clock)
{
- icc_put(clock->config_path);
- icc_put(clock->imem_path);
- icc_put(clock->memory_path);
+ struct ipa_interconnect *interconnect;
+
+ interconnect = clock->interconnect + clock->interconnect_count;
+ while (interconnect-- > clock->interconnect)
+ ipa_interconnect_exit_one(interconnect);
+ kfree(clock->interconnect);
+ clock->interconnect = NULL;
}
/* Currently we only use one bandwidth level, so just "enable" interconnects */
static int ipa_interconnect_enable(struct ipa *ipa)
{
- const struct ipa_interconnect_data *data;
+ struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
int ret;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
- if (ret)
- return ret;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
- if (ret)
- goto err_memory_path_disable;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
- if (ret)
- goto err_imem_path_disable;
+ u32 i;
+
+ interconnect = clock->interconnect;
+ for (i = 0; i < clock->interconnect_count; i++) {
+ ret = icc_set_bw(interconnect->path,
+ interconnect->average_bandwidth,
+ interconnect->peak_bandwidth);
+ if (ret)
+ goto out_unwind;
+ interconnect++;
+ }
return 0;
-err_imem_path_disable:
- (void)icc_set_bw(clock->imem_path, 0, 0);
-err_memory_path_disable:
- (void)icc_set_bw(clock->memory_path, 0, 0);
+out_unwind:
+ while (interconnect-- > clock->interconnect)
+ (void)icc_set_bw(interconnect->path, 0, 0);
return ret;
}
/* To disable an interconnect, we just its bandwidth to 0 */
-static int ipa_interconnect_disable(struct ipa *ipa)
+static void ipa_interconnect_disable(struct ipa *ipa)
{
- const struct ipa_interconnect_data *data;
+ struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
+ int result = 0;
+ u32 count;
int ret;
- ret = icc_set_bw(clock->memory_path, 0, 0);
- if (ret)
- return ret;
-
- ret = icc_set_bw(clock->imem_path, 0, 0);
- if (ret)
- goto err_memory_path_reenable;
-
- ret = icc_set_bw(clock->config_path, 0, 0);
- if (ret)
- goto err_imem_path_reenable;
-
- return 0;
-
-err_imem_path_reenable:
- data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- (void)icc_set_bw(clock->imem_path, data->average_rate,
- data->peak_rate);
-err_memory_path_reenable:
- data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
- (void)icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
+ count = clock->interconnect_count;
+ interconnect = clock->interconnect + count;
+ while (count--) {
+ interconnect--;
+ ret = icc_set_bw(interconnect->path, 0, 0);
+ if (ret && !result)
+ result = ret;
+ }
- return ret;
+ if (result)
+ dev_err(&ipa->pdev->dev,
+ "error %d disabling IPA interconnects\n", ret);
}
/* Turn on IPA clocks, including interconnects */
@@ -189,7 +201,7 @@ static int ipa_clock_enable(struct ipa *ipa)
static void ipa_clock_disable(struct ipa *ipa)
{
clk_disable_unprepare(ipa->clock->core);
- (void)ipa_interconnect_disable(ipa);
+ ipa_interconnect_disable(ipa);
}
/* Get an IPA clock reference, but only if the reference count is
@@ -286,9 +298,9 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
goto err_clk_put;
}
clock->core = clk;
- clock->interconnect_data = data->interconnect;
+ clock->interconnect_count = data->interconnect_count;
- ret = ipa_interconnect_init(clock, dev);
+ ret = ipa_interconnect_init(clock, dev, data->interconnect_data);
if (ret)
goto err_kfree;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 002e51448510..97b50fee6008 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -529,7 +529,7 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
direction, opcode);
}
-static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
+static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
@@ -543,14 +543,14 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_tag_status;
- payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK);
+ payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
}
/* Issue a small command TX data transfer */
-static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
+static void ipa_cmd_transfer_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum dma_data_direction direction = DMA_TO_DEVICE;
@@ -558,8 +558,6 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
union ipa_cmd_payload *payload;
dma_addr_t payload_addr;
- /* assert(size <= sizeof(*payload)); */
-
/* Just transfer a zero-filled payload structure */
payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
@@ -567,34 +565,53 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
direction, opcode);
}
-void ipa_cmd_tag_process_add(struct gsi_trans *trans)
+/* Add immediate commands to a transaction to clear the hardware pipeline */
+void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
struct ipa_endpoint *endpoint;
- endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ /* This will complete when the transfer is received */
+ reinit_completion(&ipa->completion);
+ /* Issue a no-op register write command (mask 0 means no write) */
ipa_cmd_register_write_add(trans, 0, 0, 0, true);
+
+ /* Send a data packet through the IPA pipeline. The packet_init
+ * command says to send the next packet directly to the exception
+ * endpoint without any other IPA processing. The tag_status
+ * command requests that status be generated on completion of
+ * that transfer, and that it will be tagged with a value.
+ * Finally, the transfer command sends a small packet of data
+ * (instead of a command) using the command endpoint.
+ */
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
- ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
- ipa_cmd_transfer_add(trans, 4);
+ ipa_cmd_ip_tag_status_add(trans);
+ ipa_cmd_transfer_add(trans);
}
-/* Returns the number of commands required for the tag process */
-u32 ipa_cmd_tag_process_count(void)
+/* Returns the number of commands required to clear the pipeline */
+u32 ipa_cmd_pipeline_clear_count(void)
{
return 4;
}
-void ipa_cmd_tag_process(struct ipa *ipa)
+void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
+{
+ wait_for_completion(&ipa->completion);
+}
+
+void ipa_cmd_pipeline_clear(struct ipa *ipa)
{
- u32 count = ipa_cmd_tag_process_count();
+ u32 count = ipa_cmd_pipeline_clear_count();
struct gsi_trans *trans;
trans = ipa_cmd_trans_alloc(ipa, count);
if (trans) {
- ipa_cmd_tag_process_add(trans);
+ ipa_cmd_pipeline_clear_add(trans);
gsi_trans_commit_wait(trans);
+ ipa_cmd_pipeline_clear_wait(ipa);
} else {
dev_err(&ipa->pdev->dev,
"error allocating %u entry tag transaction\n", count);
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 4ed09c486abc..6dd3d35cf315 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -157,26 +157,30 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset,
u16 size, dma_addr_t addr, bool toward_ipa);
/**
- * ipa_cmd_tag_process_add() - Add IPA tag process commands to a transaction
+ * ipa_cmd_pipeline_clear_add() - Add pipeline clear commands to a transaction
* @trans: GSI transaction
*/
-void ipa_cmd_tag_process_add(struct gsi_trans *trans);
+void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans);
/**
- * ipa_cmd_tag_process_add_count() - Number of commands in a tag process
+ * ipa_cmd_pipeline_clear_count() - # commands required to clear pipeline
*
* Return: The number of elements to allocate in a transaction
- * to hold tag process commands
+ * to hold commands to clear the pipeline
*/
-u32 ipa_cmd_tag_process_count(void);
+u32 ipa_cmd_pipeline_clear_count(void);
/**
- * ipa_cmd_tag_process() - Perform a tag process
- *
- * @Return: The number of elements to allocate in a transaction
- * to hold tag process commands
+ * ipa_cmd_pipeline_clear_wait() - Wait pipeline clear to complete
+ * @ipa: - IPA pointer
+ */
+void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
+
+/**
+ * ipa_cmd_pipeline_clear() - Clear the hardware pipeline
+ * @ipa: - IPA pointer
*/
-void ipa_cmd_tag_process(struct ipa *ipa);
+void ipa_cmd_pipeline_clear(struct ipa *ipa);
/**
* ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
index 5cc0ed77edb9..997b51ceb7d7 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -309,24 +309,30 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+/* Interconnect bandwidths are in 1000 byte/second units */
+static struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 465000, /* 465 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 68570, /* 68.570 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 30000, /* 30 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
static struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 100 * 1000 * 1000, /* Hz */
- /* Interconnect rates are in 1000 byte/second units */
- .interconnect = {
- [IPA_INTERCONNECT_MEMORY] = {
- .peak_rate = 465000, /* 465 MBps */
- .average_rate = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- [IPA_INTERCONNECT_IMEM] = {
- .peak_rate = 68570, /* 68.570 MBps */
- .average_rate = 0, /* unused */
- },
- [IPA_INTERCONNECT_CONFIG] = {
- .peak_rate = 30000, /* 30 MBps */
- .average_rate = 0, /* unused */
- },
- },
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
};
/* Configuration data for the SC7180 SoC. */
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index f8fee8d3ca42..88c9c3562ab7 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -329,24 +329,30 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+/* Interconnect bandwidths are in 1000 byte/second units */
+static struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 350000, /* 350 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 40000, /* 40 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
static struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 75 * 1000 * 1000, /* Hz */
- /* Interconnect rates are in 1000 byte/second units */
- .interconnect = {
- [IPA_INTERCONNECT_MEMORY] = {
- .peak_rate = 600000, /* 600 MBps */
- .average_rate = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- [IPA_INTERCONNECT_IMEM] = {
- .peak_rate = 350000, /* 350 MBps */
- .average_rate = 0, /* unused */
- },
- [IPA_INTERCONNECT_CONFIG] = {
- .peak_rate = 40000, /* 40 MBps */
- .average_rate = 0, /* unused */
- },
- },
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
};
/* Configuration data for the SDM845 SoC. */
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 0ed5ffe2b8da..b476fc373f7f 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -258,32 +258,28 @@ struct ipa_mem_data {
u32 smem_size;
};
-/** enum ipa_interconnect_id - IPA interconnect identifier */
-enum ipa_interconnect_id {
- IPA_INTERCONNECT_MEMORY,
- IPA_INTERCONNECT_IMEM,
- IPA_INTERCONNECT_CONFIG,
- IPA_INTERCONNECT_COUNT, /* Last; not an interconnect */
-};
-
/**
- * struct ipa_interconnect_data - description of IPA interconnect rates
- * @peak_rate: Peak interconnect bandwidth (in 1000 byte/sec units)
- * @average_rate: Average interconnect bandwidth (in 1000 byte/sec units)
+ * struct ipa_interconnect_data - description of IPA interconnect bandwidths
+ * @name: Interconnect name (matches interconnect-name in DT)
+ * @peak_bandwidth: Peak interconnect bandwidth (in 1000 byte/sec units)
+ * @average_bandwidth: Average interconnect bandwidth (in 1000 byte/sec units)
*/
struct ipa_interconnect_data {
- u32 peak_rate;
- u32 average_rate;
+ const char *name;
+ u32 peak_bandwidth;
+ u32 average_bandwidth;
};
/**
* struct ipa_clock_data - description of IPA clock and interconnect rates
* @core_clock_rate: Core clock rate (Hz)
- * @interconnect: Array of interconnect bandwidth parameters
+ * @interconnect_count: Number of entries in the interconnect_data array
+ * @interconnect_data: IPA interconnect configuration data
*/
struct ipa_clock_data {
u32 core_clock_rate;
- struct ipa_interconnect_data interconnect[IPA_INTERCONNECT_COUNT];
+ u32 interconnect_count; /* # entries in interconnect_data[] */
+ const struct ipa_interconnect_data *interconnect_data;
};
/**
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 9f4be9812a1f..8313220d41e7 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -69,8 +69,11 @@ struct ipa_status {
};
/* Field masks for struct ipa_status structure fields */
+#define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
+#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
+#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
#ifdef IPA_VALIDATE
@@ -399,7 +402,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
* That won't happen, and we could be more precise, but this is fine
* for now. We need to end the transaction with a "tag process."
*/
- count = hweight32(initialized) + ipa_cmd_tag_process_count();
+ count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
@@ -428,11 +431,13 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
}
- ipa_cmd_tag_process_add(trans);
+ ipa_cmd_pipeline_clear_add(trans);
/* XXX This should have a 1 second timeout */
gsi_trans_commit_wait(trans);
+ ipa_cmd_pipeline_clear_wait(ipa);
+
return 0;
}
@@ -1172,11 +1177,45 @@ static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
return false; /* Don't skip this packet, process it */
}
+static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
+{
+ struct ipa_endpoint *command_endpoint;
+ struct ipa *ipa = endpoint->ipa;
+ u32 endpoint_id;
+
+ if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
+ return false; /* No valid tag */
+
+ /* The status contains a valid tag. We know the packet was sent to
+ * this endpoint (already verified by ipa_endpoint_status_skip()).
+ * If the packet came from the AP->command TX endpoint we know
+ * this packet was sent as part of the pipeline clear process.
+ */
+ endpoint_id = u8_get_bits(status->endp_src_idx,
+ IPA_STATUS_SRC_IDX_FMASK);
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ if (endpoint_id == command_endpoint->endpoint_id) {
+ complete(&ipa->completion);
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "unexpected tagged packet from endpoint %u\n",
+ endpoint_id);
+ }
+
+ return true;
+}
+
/* Return whether the status indicates the packet should be dropped */
-static bool ipa_status_drop_packet(const struct ipa_status *status)
+static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
{
u32 val;
+ /* If the status indicates a tagged transfer, we'll drop the packet */
+ if (ipa_endpoint_status_tag(endpoint, status))
+ return true;
+
/* Deaggregation exceptions we drop; all other types we consume */
if (status->exception)
return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
@@ -1213,12 +1252,11 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
continue;
}
- /* Compute the amount of buffer space consumed by the
- * packet, including the status element. If the hardware
- * is configured to pad packet data to an aligned boundary,
- * account for that. And if checksum offload is is enabled
- * a trailer containing computed checksum information will
- * be appended.
+ /* Compute the amount of buffer space consumed by the packet,
+ * including the status element. If the hardware is configured
+ * to pad packet data to an aligned boundary, account for that.
+ * And if checksum offload is enabled a trailer containing
+ * computed checksum information will be appended.
*/
align = endpoint->data->rx.pad_align ? : 1;
len = le16_to_cpu(status->pkt_len);
@@ -1226,16 +1264,21 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
if (endpoint->data->checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
- /* Charge the new packet with a proportional fraction of
- * the unused space in the original receive buffer.
- * XXX Charge a proportion of the *whole* receive buffer?
- */
- if (!ipa_status_drop_packet(status)) {
- u32 extra = unused * len / total_len;
- void *data2 = data + sizeof(*status);
- u32 len2 = le16_to_cpu(status->pkt_len);
+ if (!ipa_endpoint_status_drop(endpoint, status)) {
+ void *data2;
+ u32 extra;
+ u32 len2;
/* Client receives only packet data (no status) */
+ data2 = data + sizeof(*status);
+ len2 = le16_to_cpu(status->pkt_len);
+
+ /* Have the true size reflect the extra unused space in
+ * the original receive buffer. Distribute the "cost"
+ * proportionately across all aggregated packets in the
+ * buffer.
+ */
+ extra = DIV_ROUND_CLOSEST(unused * len, total_len);
ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
}
@@ -1378,7 +1421,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
do {
if (!ipa_endpoint_aggr_active(endpoint))
break;
- msleep(1);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
} while (retries--);
/* Check one last time */
@@ -1399,7 +1442,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
*/
gsi_channel_reset(gsi, endpoint->channel_id, true);
- msleep(1);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
goto out_suspend_again;
@@ -1564,7 +1607,7 @@ void ipa_endpoint_suspend(struct ipa *ipa)
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
- ipa_cmd_tag_process(ipa);
+ ipa_cmd_pipeline_clear(ipa);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 84bb8ae92725..c10e7340b031 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
-#include <linux/remoteproc.h>
#include <linux/qcom_scm.h>
#include <linux/soc/qcom/mdt_loader.h>
@@ -729,19 +728,6 @@ static const struct of_device_id ipa_match[] = {
};
MODULE_DEVICE_TABLE(of, ipa_match);
-static phandle of_property_read_phandle(const struct device_node *np,
- const char *name)
-{
- struct property *prop;
- int len = 0;
-
- prop = of_find_property(np, name, &len);
- if (!prop || len != sizeof(__be32))
- return 0;
-
- return be32_to_cpup(prop->value);
-}
-
/* Check things that can be validated at build time. This just
* groups these things BUILD_BUG_ON() calls don't clutter the rest
* of the code.
@@ -807,10 +793,8 @@ static int ipa_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct ipa_data *data;
struct ipa_clock *clock;
- struct rproc *rproc;
bool modem_init;
struct ipa *ipa;
- phandle ph;
int ret;
ipa_validate_build();
@@ -829,25 +813,12 @@ static int ipa_probe(struct platform_device *pdev)
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
- /* We rely on remoteproc to tell us about modem state changes */
- ph = of_property_read_phandle(dev->of_node, "modem-remoteproc");
- if (!ph) {
- dev_err(dev, "DT missing \"modem-remoteproc\" property\n");
- return -EINVAL;
- }
-
- rproc = rproc_get_by_phandle(ph);
- if (!rproc)
- return -EPROBE_DEFER;
-
/* The clock and interconnects might not be ready when we're
* probed, so might return -EPROBE_DEFER.
*/
clock = ipa_clock_init(dev, data->clock_data);
- if (IS_ERR(clock)) {
- ret = PTR_ERR(clock);
- goto err_rproc_put;
- }
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
/* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
@@ -858,9 +829,9 @@ static int ipa_probe(struct platform_device *pdev)
ipa->pdev = pdev;
dev_set_drvdata(dev, ipa);
- ipa->modem_rproc = rproc;
ipa->clock = clock;
ipa->version = data->version;
+ init_completion(&ipa->completion);
ret = ipa_reg_init(ipa);
if (ret)
@@ -935,8 +906,6 @@ err_kfree_ipa:
kfree(ipa);
err_clock_exit:
ipa_clock_exit(clock);
-err_rproc_put:
- rproc_put(rproc);
return ret;
}
@@ -944,7 +913,6 @@ err_rproc_put:
static int ipa_remove(struct platform_device *pdev)
{
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
- struct rproc *rproc = ipa->modem_rproc;
struct ipa_clock *clock = ipa->clock;
int ret;
@@ -970,7 +938,6 @@ static int ipa_remove(struct platform_device *pdev)
ipa_reg_exit(ipa);
kfree(ipa);
ipa_clock_exit(clock);
- rproc_put(rproc);
return 0;
}
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index e34fe2d77324..9b08eb823984 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -216,6 +216,7 @@ int ipa_modem_start(struct ipa *ipa)
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8801d093135c..6cd50106e611 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -651,8 +651,7 @@ int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Should not reach here */
- WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
- port->mode);
+ WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
out:
kfree_skb(skb);
return NET_XMIT_DROP;
@@ -749,8 +748,7 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
}
/* Should not reach here */
- WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
- port->mode);
+ WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fb51329f8964..9a9a5cf36a4b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1385,7 +1385,7 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
return ret;
}
- if (!data || !data[IFLA_MACVLAN_MACADDR_DATA])
+ if (!data[IFLA_MACVLAN_MACADDR_DATA])
return 0;
head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 5136275c8e73..d3915f831854 100644
--- a/drivers/net/mdio/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
@@ -149,7 +149,7 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
return dev_addr;
}
-static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+int mdiobb_read(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
@@ -180,8 +180,9 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
mdiobb_get_bit(ctrl);
return ret;
}
+EXPORT_SYMBOL(mdiobb_read);
-static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mdiobb_ctrl *ctrl = bus->priv;
@@ -201,6 +202,7 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
mdiobb_get_bit(ctrl);
return 0;
}
+EXPORT_SYMBOL(mdiobb_write);
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index fa41d8c42f05..a5a214d29849 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -122,7 +122,7 @@ static const struct net_device_ops mhi_netdev_ops = {
static void mhi_net_setup(struct net_device *ndev)
{
ndev->header_ops = NULL; /* No header */
- ndev->type = ARPHRD_NONE; /* QMAP... */
+ ndev->type = ARPHRD_RAWIP;
ndev->hard_header_len = 0;
ndev->addr_len = 0;
ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
@@ -158,7 +158,18 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
- skb->protocol = htons(ETH_P_MAP);
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+
skb_put(skb, mhi_res->bytes_xferd);
netif_rx(skb);
}
@@ -237,6 +248,10 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
}
+static struct device_type wwan_type = {
+ .name = "wwan",
+};
+
static int mhi_net_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
@@ -256,6 +271,7 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
mhi_netdev->ndev = ndev;
mhi_netdev->mdev = mhi_dev;
SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+ SET_NETDEV_DEVTYPE(ndev, &wwan_type);
/* All MHI net channels have 128 ring elements (at least for now) */
mhi_netdev->rx_queue_sz = 128;
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 45d8a7790bd5..f140bbca98c5 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -860,7 +860,7 @@ static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data,
nexthop = kzalloc(sizeof(*nexthop), GFP_KERNEL);
if (!nexthop)
- return NULL;
+ return ERR_PTR(-ENOMEM);
nexthop->id = info->id;
@@ -868,15 +868,20 @@ static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data,
* occupy.
*/
- if (!info->is_grp) {
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
occ = 1;
- goto out;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ for (i = 0; i < info->nh_grp->num_nh; i++)
+ occ += info->nh_grp->nh_entries[i].weight;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
+ kfree(nexthop);
+ return ERR_PTR(-EOPNOTSUPP);
}
- for (i = 0; i < info->nh_grp->num_nh; i++)
- occ += info->nh_grp->nh_entries[i].weight;
-
-out:
nexthop->occ = occ;
return nexthop;
}
@@ -972,8 +977,8 @@ static int nsim_nexthop_insert(struct nsim_fib_data *data,
int err;
nexthop = nsim_nexthop_create(data, info);
- if (!nexthop)
- return -ENOMEM;
+ if (IS_ERR(nexthop))
+ return PTR_ERR(nexthop);
nexthop_old = rhashtable_lookup_fast(&data->nexthop_ht, &info->id,
nsim_nexthop_ht_params);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 7178468302c8..aec92440eef1 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -258,8 +258,6 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
.ndo_bpf = nsim_bpf,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_devlink_port = nsim_get_devlink_port,
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index d0b36fd6c265..d67bddc111e3 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -132,6 +132,11 @@
#define AT803X_MIN_DOWNSHIFT 2
#define AT803X_MAX_DOWNSHIFT 9
+#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
+#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
+#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
+#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
+
#define ATH9331_PHY_ID 0x004dd041
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
@@ -146,8 +151,11 @@ MODULE_LICENSE("GPL");
struct at803x_priv {
int flags;
#define AT803X_KEEP_PLL_ENABLED BIT(0) /* don't turn off internal PLL */
+#define AT803X_DISABLE_SMARTEEE BIT(1)
u16 clk_25m_reg;
u16 clk_25m_mask;
+ u8 smarteee_lpi_tw_1g;
+ u8 smarteee_lpi_tw_100m;
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
struct regulator *vddio;
@@ -411,13 +419,32 @@ static int at803x_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
struct at803x_priv *priv = phydev->priv;
- u32 freq, strength;
+ u32 freq, strength, tw;
unsigned int sel;
int ret;
if (!IS_ENABLED(CONFIG_OF_MDIO))
return 0;
+ if (of_property_read_bool(node, "qca,disable-smarteee"))
+ priv->flags |= AT803X_DISABLE_SMARTEEE;
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_1g = tw;
+ }
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_100m = tw;
+ }
+
ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
if (!ret) {
switch (freq) {
@@ -526,22 +553,47 @@ static void at803x_remove(struct phy_device *phydev)
regulator_disable(priv->vddio);
}
-static int at803x_clk_out_config(struct phy_device *phydev)
+static int at803x_smarteee_config(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
- int val;
+ u16 mask = 0, val = 0;
+ int ret;
- if (!priv->clk_25m_mask)
+ if (priv->flags & AT803X_DISABLE_SMARTEEE)
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
+
+ if (priv->smarteee_lpi_tw_1g) {
+ mask |= 0xff00;
+ val |= priv->smarteee_lpi_tw_1g << 8;
+ }
+ if (priv->smarteee_lpi_tw_100m) {
+ mask |= 0x00ff;
+ val |= priv->smarteee_lpi_tw_100m;
+ }
+ if (!mask)
return 0;
- val = phy_read_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M);
- if (val < 0)
- return val;
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
+ mask, val);
+ if (ret)
+ return ret;
- val &= ~priv->clk_25m_mask;
- val |= priv->clk_25m_reg;
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
+}
+
+static int at803x_clk_out_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
- return phy_write_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M, val);
+ if (!priv->clk_25m_mask)
+ return 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
+ priv->clk_25m_mask, priv->clk_25m_reg);
}
static int at8031_pll_config(struct phy_device *phydev)
@@ -584,6 +636,10 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = at803x_smarteee_config(phydev);
+ if (ret < 0)
+ return ret;
+
ret = at803x_clk_out_config(phydev);
if (ret < 0)
return ret;
@@ -594,7 +650,13 @@ static int at803x_config_init(struct phy_device *phydev)
return ret;
}
- return 0;
+ /* Ar803x extended next page bit is enabled by default. Cisco
+ * multigig switches read this bit and attempt to negotiate 10Gbps
+ * rates even if the next page bit is disabled. This is incorrect
+ * behaviour but we still need to accommodate it. XNP is only needed
+ * for 10Gbps support, so disable XNP.
+ */
+ return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
}
static int at803x_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 15812001b3ff..e79297a4bae8 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -612,6 +612,7 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_EPHY(PHY_ID_BCM72113, "Broadcom BCM72113"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM72116, "Broadcom BCM72116"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
@@ -633,6 +634,7 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM72113, 0xfffffff0 },
+ { PHY_ID_BCM72116, 0xfffffff0, },
{ PHY_ID_BCM7250, 0xfffffff0, },
{ PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2b42e46066b4..040509b81f02 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -740,7 +740,7 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+ lockdep_assert_held_once(&bus->mdio_lock);
retval = bus->read(bus, addr, regnum);
@@ -766,7 +766,7 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+ lockdep_assert_held_once(&bus->mdio_lock);
err = bus->write(bus, addr, regnum, val);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 54e0d75203da..39c7c786a912 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1389,7 +1389,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ886X,
.phy_id_mask = MICREL_PHY_ID_MASK,
- .name = "Micrel KSZ886X Switch",
+ .name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
/* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 5a8c8eb18582..46160baaafe3 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -19,8 +19,6 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
-#define DEBUG
-
/* DP83865 phy identifier values */
#define DP83865_PHY_ID 0x20005c7a
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 45f75533c47c..9cb7e4dbf8f4 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -724,7 +724,7 @@ static int phy_check_link_status(struct phy_device *phydev)
{
int err;
- WARN_ON(!mutex_is_locked(&phydev->lock));
+ lockdep_assert_held(&phydev->lock);
/* Keep previous state if loopback is enabled because some PHYs
* report that Link is Down when loopback is enabled.
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 80c2e646c093..8447e56ba572 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1740,7 +1740,7 @@ int __phy_resume(struct phy_device *phydev)
struct phy_driver *phydrv = phydev->drv;
int ret;
- WARN_ON(!mutex_is_locked(&phydev->lock));
+ lockdep_assert_held(&phydev->lock);
if (!phydrv || !phydrv->resume)
return 0;
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 99ecd6c4c15a..821e85a97367 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -60,6 +60,9 @@
#define RTL_LPADV_5000FULL BIT(6)
#define RTL_LPADV_2500FULL BIT(5)
+#define RTL9000A_GINMR 0x14
+#define RTL9000A_GINMR_LINK_STATUS BIT(4)
+
#define RTLGEN_SPEED_MASK 0x0630
#define RTL_GENERIC_PHYID 0x001cc800
@@ -655,6 +658,122 @@ static int rtlgen_resume(struct phy_device *phydev)
return ret;
}
+static int rtl9000a_config_init(struct phy_device *phydev)
+{
+ phydev->autoneg = AUTONEG_DISABLE;
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static int rtl9000a_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+ u16 ctl = 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl |= CTL1000_AS_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
+ if (ret == 1)
+ ret = genphy_soft_reset(phydev);
+
+ return ret;
+}
+
+static int rtl9000a_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ ret = phy_read(phydev, MII_CTRL1000);
+ if (ret < 0)
+ return ret;
+ if (ret & CTL1000_AS_MASTER)
+ phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
+ else
+ phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
+
+ ret = phy_read(phydev, MII_STAT1000);
+ if (ret < 0)
+ return ret;
+ if (ret & LPA_1000MSRES)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
+ else
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+
+ return 0;
+}
+
+static int rtl9000a_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, RTL8211F_INSR);
+
+ return (err < 0) ? err : 0;
+}
+
+static int rtl9000a_config_intr(struct phy_device *phydev)
+{
+ u16 val;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl9000a_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ val = (u16)~RTL9000A_GINMR_LINK_STATUS;
+ err = phy_write_paged(phydev, 0xa42, RTL9000A_GINMR, val);
+ } else {
+ val = ~0;
+ err = phy_write_paged(phydev, 0xa42, RTL9000A_GINMR, val);
+ if (err)
+ return err;
+
+ err = rtl9000a_ack_interrupt(phydev);
+ }
+
+ return phy_write_paged(phydev, 0xa42, RTL9000A_GINMR, val);
+}
+
+static irqreturn_t rtl9000a_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, RTL8211F_INSR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & RTL8211F_INER_LINK_STATUS))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
@@ -823,6 +942,19 @@ static struct phy_driver realtek_drvs[] = {
.handle_interrupt = genphy_handle_interrupt_no_ack,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001ccb00),
+ .name = "RTL9000AA_RTL9000AN Ethernet",
+ .features = PHY_BASIC_T1_FEATURES,
+ .config_init = rtl9000a_config_init,
+ .config_aneg = rtl9000a_config_aneg,
+ .read_status = rtl9000a_read_status,
+ .config_intr = rtl9000a_config_intr,
+ .handle_interrupt = rtl9000a_handle_interrupt,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
},
};
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 20b91f5dfc6e..3cfd773ae5f4 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -44,6 +44,17 @@ static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
phylink_set(modes, 2500baseX_Full);
}
+static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
+ unsigned long *modes)
+{
+ /* Ubiquiti U-Fiber Instant module claims that support all transceiver
+ * types including 10G Ethernet which is not truth. So clear all claimed
+ * modes and set only one mode which module supports: 1000baseX_Full.
+ */
+ phylink_zero(modes);
+ phylink_set(modes, 1000baseX_Full);
+}
+
static const struct sfp_quirk sfp_quirks[] = {
{
// Alcatel Lucent G-010S-P can operate at 2500base-X, but
@@ -63,6 +74,10 @@ static const struct sfp_quirk sfp_quirks[] = {
.vendor = "HUAWEI",
.part = "MA5671A",
.modes = sfp_quirk_2500basex,
+ }, {
+ .vendor = "UBNT",
+ .part = "UF-INSTANT",
+ .modes = sfp_quirk_ubnt_uf_instant,
},
};
@@ -265,6 +280,12 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
br_min <= 1300 && br_max >= 1200)
phylink_set(modes, 1000baseX_Full);
+ /* 100Base-FX, 100Base-LX, 100Base-PX, 100Base-BX10 */
+ if (id->base.e100_base_fx || id->base.e100_base_lx)
+ phylink_set(modes, 100baseFX_Full);
+ if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100)
+ phylink_set(modes, 100baseFX_Full);
+
/* For active or passive cables, select the link modes
* based on the bit rates and the cable compliance bytes.
*/
@@ -337,11 +358,16 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
* the bitrate to determine supported modes. Some BiDi modules (eg,
* 1310nm/1550nm) are not 1000BASE-BX compliant due to the differing
* wavelengths, so do not set any transceiver bits.
+ *
+ * Do the same for modules supporting 2500BASE-X. Note that some
+ * modules use 2500Mbaud rather than 3100 or 3200Mbaud for
+ * 2500BASE-X, so we allow some slack here.
*/
- if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
- /* If the bit rate allows 1000baseX */
- if (br_nom && br_min <= 1300 && br_max >= 1200)
+ if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS) && br_nom) {
+ if (br_min <= 1300 && br_max >= 1200)
phylink_set(modes, 1000baseX_Full);
+ if (br_min <= 3200 && br_max >= 2500)
+ phylink_set(modes, 2500baseX_Full);
}
if (bus->sfp_quirk)
@@ -384,6 +410,9 @@ phy_interface_t sfp_select_interface(struct sfp_bus *bus,
if (phylink_test(link_modes, 1000baseX_Full))
return PHY_INTERFACE_MODE_1000BASEX;
+ if (phylink_test(link_modes, 100baseFX_Full))
+ return PHY_INTERFACE_MODE_100BASEX;
+
dev_warn(bus->sfp_dev, "Unable to ascertain link mode\n");
return PHY_INTERFACE_MODE_NA;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 91d74c1a920a..7998acc689b7 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <linux/ctype.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/hwmon.h>
@@ -258,6 +259,9 @@ struct sfp {
char *hwmon_name;
#endif
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dir;
+#endif
};
static bool sff_module_supported(const struct sfp_eeprom_id *id)
@@ -273,8 +277,21 @@ static const struct sff_data sff_data = {
static bool sfp_module_supported(const struct sfp_eeprom_id *id)
{
- return id->base.phys_id == SFF8024_ID_SFP &&
- id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+ if (id->base.phys_id == SFF8024_ID_SFP &&
+ id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP)
+ return true;
+
+ /* SFP GPON module Ubiquiti U-Fiber Instant has in its EEPROM stored
+ * phys id SFF instead of SFP. Therefore mark this module explicitly
+ * as supported based on vendor name and pn match.
+ */
+ if (id->base.phys_id == SFF8024_ID_SFF_8472 &&
+ id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP &&
+ !memcmp(id->base.vendor_name, "UBNT ", 16) &&
+ !memcmp(id->base.vendor_pn, "UF-INSTANT ", 16))
+ return true;
+
+ return false;
}
static const struct sff_data sfp_data = {
@@ -336,19 +353,11 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
size_t len)
{
struct i2c_msg msgs[2];
- size_t block_size;
+ u8 bus_addr = a2 ? 0x51 : 0x50;
+ size_t block_size = sfp->i2c_block_size;
size_t this_len;
- u8 bus_addr;
int ret;
- if (a2) {
- block_size = 16;
- bus_addr = 0x51;
- } else {
- block_size = sfp->i2c_block_size;
- bus_addr = 0x50;
- }
-
msgs[0].addr = bus_addr;
msgs[0].flags = 0;
msgs[0].len = 1;
@@ -1282,6 +1291,20 @@ static void sfp_hwmon_probe(struct work_struct *work)
struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
int err, i;
+ /* hwmon interface needs to access 16bit registers in atomic way to
+ * guarantee coherency of the diagnostic monitoring data. If it is not
+ * possible to guarantee coherency because EEPROM is broken in such way
+ * that does not support atomic 16bit read operation then we have to
+ * skip registration of hwmon device.
+ */
+ if (sfp->i2c_block_size < 2) {
+ dev_info(sfp->dev,
+ "skipping hwmon device registration due to broken EEPROM\n");
+ dev_info(sfp->dev,
+ "diagnostic EEPROM area cannot be read atomically to guarantee data coherency\n");
+ return;
+ }
+
err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
if (err < 0) {
if (sfp->hwmon_tries--) {
@@ -1390,6 +1413,54 @@ static void sfp_module_tx_enable(struct sfp *sfp)
sfp_set_state(sfp, sfp->state);
}
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int sfp_debug_state_show(struct seq_file *s, void *data)
+{
+ struct sfp *sfp = s->private;
+
+ seq_printf(s, "Module state: %s\n",
+ mod_state_to_str(sfp->sm_mod_state));
+ seq_printf(s, "Module probe attempts: %d %d\n",
+ R_PROBE_RETRY_INIT - sfp->sm_mod_tries_init,
+ R_PROBE_RETRY_SLOW - sfp->sm_mod_tries);
+ seq_printf(s, "Device state: %s\n",
+ dev_state_to_str(sfp->sm_dev_state));
+ seq_printf(s, "Main state: %s\n",
+ sm_state_to_str(sfp->sm_state));
+ seq_printf(s, "Fault recovery remaining retries: %d\n",
+ sfp->sm_fault_retries);
+ seq_printf(s, "PHY probe remaining retries: %d\n",
+ sfp->sm_phy_retries);
+ seq_printf(s, "moddef0: %d\n", !!(sfp->state & SFP_F_PRESENT));
+ seq_printf(s, "rx_los: %d\n", !!(sfp->state & SFP_F_LOS));
+ seq_printf(s, "tx_fault: %d\n", !!(sfp->state & SFP_F_TX_FAULT));
+ seq_printf(s, "tx_disable: %d\n", !!(sfp->state & SFP_F_TX_DISABLE));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(sfp_debug_state);
+
+static void sfp_debugfs_init(struct sfp *sfp)
+{
+ sfp->debugfs_dir = debugfs_create_dir(dev_name(sfp->dev), NULL);
+
+ debugfs_create_file("state", 0600, sfp->debugfs_dir, sfp,
+ &sfp_debug_state_fops);
+}
+
+static void sfp_debugfs_exit(struct sfp *sfp)
+{
+ debugfs_remove_recursive(sfp->debugfs_dir);
+}
+#else
+static void sfp_debugfs_init(struct sfp *sfp)
+{
+}
+
+static void sfp_debugfs_exit(struct sfp *sfp)
+{
+}
+#endif
+
static void sfp_module_tx_fault_reset(struct sfp *sfp)
{
unsigned int state = sfp->state;
@@ -1482,15 +1553,19 @@ static void sfp_sm_link_down(struct sfp *sfp)
static void sfp_sm_link_check_los(struct sfp *sfp)
{
- unsigned int los = sfp->state & SFP_F_LOS;
+ const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
+ const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
+ __be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
+ bool los = false;
/* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
- * are set, we assume that no LOS signal is available.
+ * are set, we assume that no LOS signal is available. If both are
+ * set, we assume LOS is not implemented (and is meaningless.)
*/
- if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
- los ^= SFP_F_LOS;
- else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
- los = 0;
+ if (los_options == los_inverted)
+ los = !(sfp->state & SFP_F_LOS);
+ else if (los_options == los_normal)
+ los = !!(sfp->state & SFP_F_LOS);
if (los)
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -1500,18 +1575,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
{
- return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
- event == SFP_E_LOS_LOW) ||
- (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
- event == SFP_E_LOS_HIGH);
+ const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
+ const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
+ __be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
+
+ return (los_options == los_inverted && event == SFP_E_LOS_LOW) ||
+ (los_options == los_normal && event == SFP_E_LOS_HIGH);
}
static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
{
- return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
- event == SFP_E_LOS_HIGH) ||
- (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
- event == SFP_E_LOS_LOW);
+ const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
+ const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
+ __be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
+
+ return (los_options == los_inverted && event == SFP_E_LOS_HIGH) ||
+ (los_options == los_normal && event == SFP_E_LOS_LOW);
}
static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
@@ -1642,26 +1721,30 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
return 0;
}
-/* Some modules (Nokia 3FE46541AA) lock up if byte 0x51 is read as a
- * single read. Switch back to reading 16 byte blocks unless we have
- * a CarlitoxxPro module (rebranded VSOL V2801F). Even more annoyingly,
- * some VSOL V2801F have the vendor name changed to OEM.
+/* GPON modules based on Realtek RTL8672 and RTL9601C chips (e.g. V-SOL
+ * V2801F, CarlitoxxPro CPGOS03-0490, Ubiquiti U-Fiber Instant, ...) do
+ * not support multibyte reads from the EEPROM. Each multi-byte read
+ * operation returns just one byte of EEPROM followed by zeros. There is
+ * no way to identify which modules are using Realtek RTL8672 and RTL9601C
+ * chips. Moreover every OEM of V-SOL V2801F module puts its own vendor
+ * name and vendor id into EEPROM, so there is even no way to detect if
+ * module is V-SOL V2801F. Therefore check for those zeros in the read
+ * data and then based on check switch to reading EEPROM to one byte
+ * at a time.
*/
-static int sfp_quirk_i2c_block_size(const struct sfp_eeprom_base *base)
+static bool sfp_id_needs_byte_io(struct sfp *sfp, void *buf, size_t len)
{
- if (!memcmp(base->vendor_name, "VSOL ", 16))
- return 1;
- if (!memcmp(base->vendor_name, "OEM ", 16) &&
- !memcmp(base->vendor_pn, "V2801F ", 16))
- return 1;
+ size_t i, block_size = sfp->i2c_block_size;
- /* Some modules can't cope with long reads */
- return 16;
-}
+ /* Already using byte IO */
+ if (block_size == 1)
+ return false;
-static void sfp_quirks_base(struct sfp *sfp, const struct sfp_eeprom_base *base)
-{
- sfp->i2c_block_size = sfp_quirk_i2c_block_size(base);
+ for (i = 1; i < len; i += block_size) {
+ if (memchr_inv(buf + i, '\0', min(block_size - 1, len - i)))
+ return false;
+ }
+ return true;
}
static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
@@ -1705,11 +1788,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
u8 check;
int ret;
- /* Some modules (CarlitoxxPro CPGOS03-0490) do not support multibyte
- * reads from the EEPROM, so start by reading the base identifying
- * information one byte at a time.
+ /* Some SFP modules and also some Linux I2C drivers do not like reads
+ * longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
+ * a time.
*/
- sfp->i2c_block_size = 1;
+ sfp->i2c_block_size = 16;
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
@@ -1723,6 +1806,33 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
return -EAGAIN;
}
+ /* Some SFP modules (e.g. Nokia 3FE46541AA) lock up if read from
+ * address 0x51 is just one byte at a time. Also SFF-8472 requires
+ * that EEPROM supports atomic 16bit read operation for diagnostic
+ * fields, so do not switch to one byte reading at a time unless it
+ * is really required and we have no other option.
+ */
+ if (sfp_id_needs_byte_io(sfp, &id.base, sizeof(id.base))) {
+ dev_info(sfp->dev,
+ "Detected broken RTL8672/RTL9601C emulated EEPROM\n");
+ dev_info(sfp->dev,
+ "Switching to reading EEPROM to one byte at a time\n");
+ sfp->i2c_block_size = 1;
+
+ ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
+ if (ret < 0) {
+ if (report)
+ dev_err(sfp->dev, "failed to read EEPROM: %d\n",
+ ret);
+ return -EAGAIN;
+ }
+
+ if (ret != sizeof(id.base)) {
+ dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ return -EAGAIN;
+ }
+ }
+
/* Cotsworks do not seem to update the checksums when they
* do the final programming with the final module part number,
* serial number and date code.
@@ -1757,9 +1867,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
}
}
- /* Apply any early module-specific quirks */
- sfp_quirks_base(sfp, &id.base);
-
ret = sfp_read(sfp, false, SFP_CC_BASE + 1, &id.ext, sizeof(id.ext));
if (ret < 0) {
if (report)
@@ -2483,6 +2590,8 @@ static int sfp_probe(struct platform_device *pdev)
if (!sfp->sfp_bus)
return -ENOMEM;
+ sfp_debugfs_init(sfp);
+
return 0;
}
@@ -2490,6 +2599,7 @@ static int sfp_remove(struct platform_device *pdev)
{
struct sfp *sfp = platform_get_drvdata(pdev);
+ sfp_debugfs_exit(sfp);
sfp_unregister_socket(sfp->sfp_bus);
rtnl_lock();
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 33372756a451..ddb78fb4d6dc 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -317,7 +317,8 @@ static int smsc_phy_probe(struct phy_device *phydev)
/* Make clk optional to keep DTB backward compatibility. */
priv->refclk = clk_get_optional(dev, NULL);
if (IS_ERR(priv->refclk))
- dev_err_probe(dev, PTR_ERR(priv->refclk), "Failed to request clock\n");
+ return dev_err_probe(dev, PTR_ERR(priv->refclk),
+ "Failed to request clock\n");
ret = clk_prepare_enable(priv->refclk);
if (ret)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 09c27f7773f9..d445ecb1d0c7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -623,6 +623,7 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
write_unlock_bh(&pch->upl);
return -EALREADY;
}
+ refcount_inc(&pchb->file.refcnt);
rcu_assign_pointer(pch->bridge, pchb);
write_unlock_bh(&pch->upl);
@@ -632,19 +633,24 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
write_unlock_bh(&pchb->upl);
goto err_unset;
}
+ refcount_inc(&pch->file.refcnt);
rcu_assign_pointer(pchb->bridge, pch);
write_unlock_bh(&pchb->upl);
- refcount_inc(&pch->file.refcnt);
- refcount_inc(&pchb->file.refcnt);
-
return 0;
err_unset:
write_lock_bh(&pch->upl);
+ /* Re-read pch->bridge with upl held in case it was modified concurrently */
+ pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
RCU_INIT_POINTER(pch->bridge, NULL);
write_unlock_bh(&pch->upl);
synchronize_rcu();
+
+ if (pchb)
+ if (refcount_dec_and_test(&pchb->file.refcnt))
+ ppp_destroy_channel(pchb);
+
return -EALREADY;
}
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index ee5058445d06..0fe78826c8fa 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -278,10 +278,8 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
header = (struct pptp_gre_header *)(skb->data);
/* ack in different place if S = 0 */
- ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq;
-
- ack = ntohl(ack);
-
+ ack = GRE_IS_SEQ(header->gre_hd.flags) ? ntohl(header->ack) :
+ ntohl(header->seq);
if (ack > opt->ack_recv)
opt->ack_recv = ack;
/* also handle sequence number wrap-around */
@@ -355,7 +353,7 @@ static int pptp_rcv(struct sk_buff *skb)
/* if invalid, discard this packet */
goto drop;
- po = lookup_chan(htons(header->call_id), iph->saddr);
+ po = lookup_chan(ntohs(header->call_id), iph->saddr);
if (po) {
skb_dst_drop(skb);
nf_reset_ct(skb);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 1f4bdd94407a..ff4aa35979a1 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -713,8 +713,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_probe_transport_header(skb);
/* Move network header to the right position for VLAN tagged packets */
- if ((skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD)) &&
+ if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
@@ -722,12 +721,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
tap = rcu_dereference(q->tap);
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
- skb_shinfo(skb)->destructor_arg = msg_control;
- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(uarg, false);
+ uarg->callback(NULL, uarg, false);
}
if (tap) {
@@ -1166,8 +1163,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
}
/* Move network header to the right position for VLAN tagged packets */
- if ((skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD)) &&
+ if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c19dac21c468..dd7917cab2b1 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -992,7 +992,8 @@ static void __team_compute_features(struct team *team)
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
- list_for_each_entry(port, &team->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
TEAM_VLAN_FEATURES);
@@ -1006,6 +1007,7 @@ static void __team_compute_features(struct team *team)
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
+ rcu_read_unlock();
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
@@ -1020,9 +1022,7 @@ static void __team_compute_features(struct team *team)
static void team_compute_features(struct team *team)
{
- mutex_lock(&team->lock);
__team_compute_features(team);
- mutex_unlock(&team->lock);
netdev_change_features(team->dev);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fbed05ae7b0f..62690baa19bc 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1365,7 +1365,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int i;
if (it->nr_segs > MAX_SKB_FRAGS + 1)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EMSGSIZE);
local_bh_disable();
skb = napi_get_frags(&tfile->napi);
@@ -1599,12 +1599,8 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
struct xdp_buff xdp;
u32 act;
- xdp.data_hard_start = buf;
- xdp.data = buf + pad;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + len;
- xdp.rxq = &tfile->xdp_rxq;
- xdp.frame_sz = buflen;
+ xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
+ xdp_prepare_buff(&xdp, buf, pad, len, false);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act == XDP_REDIRECT || act == XDP_TX) {
@@ -1814,12 +1810,10 @@ drop:
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
- skb_shinfo(skb)->destructor_arg = msg_control;
- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(uarg, false);
+ uarg->callback(NULL, uarg, false);
}
skb_reset_network_header(skb);
@@ -2344,9 +2338,9 @@ static int tun_xdp_one(struct tun_struct *tun,
skb_xdp = true;
goto build;
}
+
+ xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
xdp_set_data_meta_invalid(xdp);
- xdp->rxq = &tfile->xdp_rxq;
- xdp->frame_sz = buflen;
act = bpf_prog_run_xdp(xdp_prog, xdp);
err = tun_xdp_act(tun, xdp_prog, xdp, act);
@@ -2741,7 +2735,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
- /* free_netdev() won't check refcnt, to aovid race
+ /* free_netdev() won't check refcnt, to avoid race
* with dev_put() we need publish tun after registration.
*/
rcu_assign_pointer(tfile->tun, tun);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 1e3719028780..fbbe78643631 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -631,7 +631,6 @@ config USB_NET_AQC111
config USB_RTL8153_ECM
tristate "RTL8153 ECM support"
depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
- default y
help
This option supports ECM mode for RTL8153 ethernet adapter, when
CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8c1d61c2cbac..a9b551028659 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -793,6 +793,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
@@ -962,6 +969,12 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 2bac57d5e8d5..291e76d32abe 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+ delayed_ndp_size = ctx->max_ndp_size +
+ max_t(u32,
+ ctx->tx_ndp_modulus,
+ ctx->tx_modulus + ctx->tx_remainder) - 1;
else
delayed_ndp_size = 0;
@@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_curr_size - skb_out->len;
- skb_put_zero(skb_out, padding_count);
+ if (!WARN_ON(padding_count > ctx->tx_curr_size))
+ skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */
@@ -1823,6 +1827,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
+ /* if the speed hasn't changed, don't report it.
+ * RTL8156 shipped before 2021 sends notification about every 32ms.
+ */
+ if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
+ return;
+
+ dev->rx_speed = rx_speed;
+ dev->tx_speed = tx_speed;
+
/*
* Currently the USB-NET API does not support reporting the actual
* device speed. Do print it instead.
@@ -1863,10 +1876,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- netif_info(dev, link, dev->net,
- "network connection: %sconnected\n",
- !!event->wValue ? "" : "dis");
- usbnet_link_change(dev, !!event->wValue, 0);
+ if (netif_carrier_ok(dev->net) != !!event->wValue)
+ usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 2bb28db89432..ef6dd012b8c4 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -370,7 +370,7 @@ static struct usb_driver hso_driver;
static struct tty_driver *tty_drv;
static struct hso_device *serial_table[HSO_SERIAL_TTY_MINORS];
static struct hso_device *network_table[HSO_MAX_NET_DEVICES];
-static spinlock_t serial_table_lock;
+static DEFINE_SPINLOCK(serial_table_lock);
static const s32 default_port_spec[] = {
HSO_INTF_MUX | HSO_PORT_NETWORK,
@@ -3236,7 +3236,6 @@ static int __init hso_init(void)
pr_info("%s\n", version);
/* Initialise the serial table semaphore and table */
- spin_lock_init(&serial_table_lock);
for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++)
serial_table[i] = NULL;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d166c321ee9b..c8b2b60d2183 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -57,6 +57,7 @@ struct qmi_wwan_state {
enum qmi_wwan_flags {
QMI_WWAN_FLAG_RAWIP = 1 << 0,
QMI_WWAN_FLAG_MUX = 1 << 1,
+ QMI_WWAN_FLAG_PASS_THROUGH = 1 << 2,
};
enum qmi_wwan_quirks {
@@ -186,7 +187,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
net = qmimux_find_dev(dev, hdr->mux_id);
if (!net)
goto skip;
- skbn = netdev_alloc_skb(net, pkt_len);
+ skbn = netdev_alloc_skb(net, pkt_len + LL_MAX_HEADER);
if (!skbn)
return 0;
skbn->dev = net;
@@ -203,6 +204,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
goto skip;
}
+ skb_reserve(skbn, LL_MAX_HEADER);
skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, pkt_len);
if (netif_rx(skbn) != NET_RX_SUCCESS) {
net->stats.rx_errors++;
@@ -217,6 +219,28 @@ skip:
return 1;
}
+static ssize_t mux_id_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct qmimux_priv *priv;
+
+ priv = netdev_priv(dev);
+
+ return sysfs_emit(buf, "0x%02x\n", priv->mux_id);
+}
+
+static DEVICE_ATTR_RO(mux_id);
+
+static struct attribute *qmi_wwan_sysfs_qmimux_attrs[] = {
+ &dev_attr_mux_id.attr,
+ NULL,
+};
+
+static struct attribute_group qmi_wwan_sysfs_qmimux_attr_group = {
+ .name = "qmap",
+ .attrs = qmi_wwan_sysfs_qmimux_attrs,
+};
+
static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
{
struct net_device *new_dev;
@@ -239,6 +263,8 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
goto out_free_newdev;
}
+ new_dev->sysfs_groups[0] = &qmi_wwan_sysfs_qmimux_attr_group;
+
err = register_netdevice(new_dev);
if (err < 0)
goto out_free_newdev;
@@ -325,6 +351,13 @@ static ssize_t raw_ip_store(struct device *d, struct device_attribute *attr, co
if (enable == (info->flags & QMI_WWAN_FLAG_RAWIP))
return len;
+ /* ip mode cannot be cleared when pass through mode is set */
+ if (!enable && (info->flags & QMI_WWAN_FLAG_PASS_THROUGH)) {
+ netdev_err(dev->net,
+ "Cannot clear ip mode on pass through device\n");
+ return -EINVAL;
+ }
+
if (!rtnl_trylock())
return restart_syscall();
@@ -455,14 +488,59 @@ err:
return ret;
}
+static ssize_t pass_through_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct qmi_wwan_state *info;
+
+ info = (void *)&dev->data;
+ return sprintf(buf, "%c\n",
+ info->flags & QMI_WWAN_FLAG_PASS_THROUGH ? 'Y' : 'N');
+}
+
+static ssize_t pass_through_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct qmi_wwan_state *info;
+ bool enable;
+
+ if (strtobool(buf, &enable))
+ return -EINVAL;
+
+ info = (void *)&dev->data;
+
+ /* no change? */
+ if (enable == (info->flags & QMI_WWAN_FLAG_PASS_THROUGH))
+ return len;
+
+ /* pass through mode can be set for raw ip devices only */
+ if (!(info->flags & QMI_WWAN_FLAG_RAWIP)) {
+ netdev_err(dev->net,
+ "Cannot set pass through mode on non ip device\n");
+ return -EINVAL;
+ }
+
+ if (enable)
+ info->flags |= QMI_WWAN_FLAG_PASS_THROUGH;
+ else
+ info->flags &= ~QMI_WWAN_FLAG_PASS_THROUGH;
+
+ return len;
+}
+
static DEVICE_ATTR_RW(raw_ip);
static DEVICE_ATTR_RW(add_mux);
static DEVICE_ATTR_RW(del_mux);
+static DEVICE_ATTR_RW(pass_through);
static struct attribute *qmi_wwan_sysfs_attrs[] = {
&dev_attr_raw_ip.attr,
&dev_attr_add_mux.attr,
&dev_attr_del_mux.attr,
+ &dev_attr_pass_through.attr,
NULL,
};
@@ -509,6 +587,11 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (info->flags & QMI_WWAN_FLAG_MUX)
return qmimux_rx_fixup(dev, skb);
+ if (info->flags & QMI_WWAN_FLAG_PASS_THROUGH) {
+ skb->protocol = htons(ETH_P_MAP);
+ return (netif_rx(skb) == NET_RX_SUCCESS);
+ }
+
switch (skb->data[0] & 0xf0) {
case 0x40:
proto = htons(ETH_P_IP);
@@ -1013,6 +1096,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
@@ -1301,6 +1385,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c448d6089821..67cd6986634f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -6877,6 +6877,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
index 2c3fabd38b16..20b2df8d74ae 100644
--- a/drivers/net/usb/r8153_ecm.c
+++ b/drivers/net/usb/r8153_ecm.c
@@ -122,12 +122,20 @@ static const struct driver_info r8153_info = {
};
static const struct usb_device_id products[] = {
+/* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */
{
USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&r8153_info,
},
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0x721e, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&r8153_info,
+},
+
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 6609d21ef894..f813ca9dec53 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -387,7 +387,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
reply_len = sizeof *phym;
retval = rndis_query(dev, intf, u.buf,
RNDIS_OID_GEN_PHYSICAL_MEDIUM,
- 0, (void **) &phym, &reply_len);
+ reply_len, (void **)&phym, &reply_len);
if (retval != 0 || !phym) {
/* OID is optional so don't fail here. */
phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1447da1d5729..b4c8080e6f87 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1539,11 +1539,11 @@ static void usbnet_bh (struct timer_list *t)
}
}
-static void usbnet_bh_tasklet(unsigned long data)
+static void usbnet_bh_tasklet(struct tasklet_struct *t)
{
- struct timer_list *t = (struct timer_list *)data;
+ struct usbnet *dev = from_tasklet(dev, t, bh);
- usbnet_bh(t);
+ usbnet_bh(&dev->delay);
}
@@ -1673,8 +1673,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- dev->bh.func = usbnet_bh_tasklet;
- dev->bh.data = (unsigned long)&dev->delay;
+ tasklet_setup(&dev->bh, usbnet_bh_tasklet);
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
timer_setup(&dev->delay, usbnet_bh, 0);
@@ -1964,12 +1963,12 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, buf, size,
USB_CTRL_GET_TIMEOUT);
if (err > 0 && err <= size) {
- if (data)
- memcpy(data, buf, err);
- else
- netdev_dbg(dev->net,
- "Huh? Data requested but thrown away.\n");
- }
+ if (data)
+ memcpy(data, buf, err);
+ else
+ netdev_dbg(dev->net,
+ "Huh? Data requested but thrown away.\n");
+ }
kfree(buf);
out:
return err;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 02bfcdf50a7a..99caae7d1641 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -654,7 +654,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
struct veth_xdp_tx_bq *bq,
struct veth_stats *stats)
{
- u32 pktlen, headroom, act, metalen;
+ u32 pktlen, headroom, act, metalen, frame_sz;
void *orig_data, *orig_data_end;
struct bpf_prog *xdp_prog;
int mac_len, delta, off;
@@ -710,15 +710,11 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
skb = nskb;
}
- xdp.data_hard_start = skb->head;
- xdp.data = skb_mac_header(skb);
- xdp.data_end = xdp.data + pktlen;
- xdp.data_meta = xdp.data;
- xdp.rxq = &rq->xdp_rxq;
-
/* SKB "head" area always have tailroom for skb_shared_info */
- xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
- xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ frame_sz = skb_end_pointer(skb) - skb->head;
+ frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
orig_data = xdp.data;
orig_data_end = xdp.data_end;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4c41df624dbb..ba8e63792549 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -689,12 +689,9 @@ static struct sk_buff *receive_small(struct net_device *dev,
page = xdp_page;
}
- xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
- xdp.data = xdp.data_hard_start + xdp_headroom;
- xdp.data_end = xdp.data + len;
- xdp.data_meta = xdp.data;
- xdp.rxq = &rq->xdp_rxq;
- xdp.frame_sz = buflen;
+ xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
+ xdp_headroom, len, true);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -859,12 +856,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
* the descriptor on if we get an XDP_TX return code.
*/
data = page_address(xdp_page) + offset;
- xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
- xdp.data = data + vi->hdr_len;
- xdp.data_end = xdp.data + (len - vi->hdr_len);
- xdp.data_meta = xdp.data;
- xdp.rxq = &rq->xdp_rxq;
- xdp.frame_sz = frame_sz - vi->hdr_len;
+ xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
+ VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -2093,14 +2087,16 @@ static int virtnet_set_channels(struct net_device *dev,
get_online_cpus();
err = _virtnet_set_queues(vi, queue_pairs);
- if (!err) {
- netif_set_real_num_tx_queues(dev, queue_pairs);
- netif_set_real_num_rx_queues(dev, queue_pairs);
-
- virtnet_set_affinity(vi);
+ if (err) {
+ put_online_cpus();
+ goto err;
}
+ virtnet_set_affinity(vi);
put_online_cpus();
+ netif_set_real_num_tx_queues(dev, queue_pairs);
+ netif_set_real_num_rx_queues(dev, queue_pairs);
+ err:
return err;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a8ad710629e6..3929e437382b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3283,12 +3283,13 @@ static void vxlan_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->vlan_features = dev->features;
- dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
@@ -4521,17 +4522,12 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
- if (event == NETDEV_UNREGISTER) {
- if (!dev->udp_tunnel_nic_info)
- vxlan_offload_rx_ports(dev, false);
+ if (event == NETDEV_UNREGISTER)
vxlan_handle_lowerdev_unregister(vn, dev);
- } else if (event == NETDEV_REGISTER) {
- if (!dev->udp_tunnel_nic_info)
- vxlan_offload_rx_ports(dev, true);
- } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
- event == NETDEV_UDP_TUNNEL_DROP_INFO) {
- vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
- }
+ else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
+ vxlan_offload_rx_ports(dev, true);
+ else if (event == NETDEV_UDP_TUNNEL_DROP_INFO)
+ vxlan_offload_rx_ports(dev, false);
return NOTIFY_DONE;
}
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 4029fde71a9e..83c9481995dd 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -282,6 +282,7 @@ config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
+ select BITREVERSE
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 64f855651336..261b53fc8e04 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -569,6 +569,13 @@ static void ppp_timer(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&ppp->lock, flags);
+ /* mod_timer could be called after we entered this function but
+ * before we got the lock.
+ */
+ if (timer_pending(&proto->timer)) {
+ spin_unlock_irqrestore(&ppp->lock, flags);
+ return;
+ }
switch (proto->state) {
case STOPPING:
case REQ_SENT:
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 7c5cf77e9ef1..ecea09fd21cb 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -323,7 +323,7 @@ struct desc {
static int ports_open;
static struct dma_pool *dma_pool;
-static spinlock_t npe_lock;
+static DEFINE_SPINLOCK(npe_lock);
static const struct {
int tx, txdone, rx, rxfree;
@@ -1402,8 +1402,6 @@ static int __init hss_init_module(void)
(IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
return -ENODEV;
- spin_lock_init(&npe_lock);
-
return platform_driver_register(&ixp4xx_hss_driver);
}
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 2fde439543fb..3092a09d3eaa 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -1535,7 +1535,7 @@ sbni_setup( char *p )
goto bad_param;
for( n = 0, parm = 0; *p && n < 8; ) {
- (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
+ (*dest[ parm ])[ n ] = simple_strtoul( p, &p, 0 );
if( !*p || *p == ')' )
return 1;
if( *p == ';' ) {
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index b97c38b9a270..350b7913622c 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -185,7 +185,7 @@ int ath11k_core_suspend(struct ath11k_base *ab)
ath11k_hif_ce_irq_disable(ab);
ret = ath11k_hif_suspend(ab);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 205c0f1a40e9..920e5026a635 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -2294,6 +2294,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
{
u8 channel_num;
u32 center_freq;
+ struct ieee80211_channel *channel;
rx_status->freq = 0;
rx_status->rate_idx = 0;
@@ -2314,9 +2315,12 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_status->band = NL80211_BAND_5GHZ;
} else {
spin_lock_bh(&ar->data_lock);
- rx_status->band = ar->rx_channel->band;
- channel_num =
- ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+ channel = ar->rx_channel;
+ if (channel) {
+ rx_status->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
spin_unlock_bh(&ar->data_lock);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
rx_desc, sizeof(struct hal_rx_desc));
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 5c175e3e09b2..c1608f64ea95 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3021,6 +3021,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
ret = ath11k_start_vdev_delay(ar->hw, vif);
if (ret) {
@@ -5284,7 +5285,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
goto out;
@@ -5295,7 +5297,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params.vdev_start_delay) {
+ if (ab->hw_params.vdev_start_delay &&
+ (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
param.vdev_id = arvif->vdev_id;
param.peer_type = WMI_PEER_TYPE_DEFAULT;
param.peer_addr = ar->mac_addr;
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 857647aa57c8..20b415cd96c4 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -274,7 +274,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
return ret;
}
@@ -283,7 +283,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
return ret;
}
@@ -292,7 +292,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
return ret;
}
@@ -301,7 +301,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
return ret;
}
@@ -886,6 +886,32 @@ static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
pci_disable_device(pci_dev);
}
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ &ab_pci->link_ctl);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+ ab_pci->link_ctl,
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+ /* disable L0s and L1 */
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+ set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+ if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl);
+}
+
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -895,6 +921,11 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath11k_pci_sw_reset(ab_pci->ab, true);
+ /* Disable ASPM during firmware download due to problems switching
+ * to AMSS state.
+ */
+ ath11k_pci_aspm_disable(ab_pci);
+
ret = ath11k_mhi_start(ab_pci);
if (ret) {
ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -908,6 +939,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ /* restore aspm in case firmware bootup fails */
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_force_wake(ab_pci->ab);
ath11k_mhi_stop(ab_pci);
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
@@ -965,6 +999,8 @@ static int ath11k_pci_start(struct ath11k_base *ab)
set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index 0432a702416b..fe44d0dfce19 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -63,6 +63,7 @@ struct ath11k_msi_config {
enum ath11k_pci_flags {
ATH11K_PCI_FLAG_INIT_DONE,
ATH11K_PCI_FLAG_IS_MSI_64,
+ ATH11K_PCI_ASPM_RESTORE,
};
struct ath11k_pci {
@@ -80,6 +81,7 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
+ u16 link_ctl;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 1866d82678fa..b69e7ebfa930 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -76,6 +76,23 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
return NULL;
}
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&ab->base_lock);
+ return peer;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+ return NULL;
+}
+
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
{
struct ath11k_peer *peer;
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index bba2e00b6944..8553ed061aea 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -43,5 +43,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param);
int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index f0b5c50974f3..0db623ff4bb9 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1660,6 +1660,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0, i;
+ bool delayed;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
@@ -1672,11 +1673,13 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
* failure to FW and FW will then request mulitple blocks of small
* chunk size memory.
*/
- if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+ if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
+ delayed = true;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
memset(req, 0, sizeof(*req));
} else {
+ delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
for (i = 0; i < req->mem_seg_len ; i++) {
@@ -1708,6 +1711,12 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* the error response is expected when
+ * target_mem_delayed is true.
+ */
+ if (delayed && resp.resp.error == 0)
+ goto out;
+
ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
@@ -1742,6 +1751,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
int i;
struct target_mem_chunk *chunk;
+ ab->qmi.target_mem_delayed = false;
+
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
chunk->vaddr = dma_alloc_coherent(ab->dev,
@@ -1749,6 +1760,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
&chunk->paddr,
GFP_KERNEL);
if (!chunk->vaddr) {
+ if (ab->qmi.mem_seg_count <= 2) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
chunk->size,
chunk->type);
@@ -2517,7 +2537,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
ret);
return;
}
- } else if (msg->mem_seg_len > 2) {
+ } else {
ret = ath11k_qmi_alloc_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 92925c9eac67..7bad374cc23a 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -125,6 +125,7 @@ struct ath11k_qmi {
struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
u32 mem_seg_count;
u32 target_mem_mode;
+ bool target_mem_delayed;
u8 cal_done;
struct target_info target;
struct m3_mem_region m3_mem;
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index b876fec7fa1b..e1a1df169034 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -247,7 +247,9 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
}
rtnl_lock();
- ret = regulatory_set_wiphy_regd_sync_rtnl(ar->hw->wiphy, regd_copy);
+ wiphy_lock(ar->hw->wiphy);
+ ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
+ wiphy_unlock(ar->hw->wiphy);
rtnl_unlock();
kfree(regd_copy);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index da4b546b62cb..73869d445c5b 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -3460,6 +3460,9 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 9c83e9a4299b..29527e8dcced 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3648,7 +3648,7 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
kfree(mc_filter);
}
- unregister_netdevice(vif->ndev);
+ cfg80211_unregister_netdevice(vif->ndev);
ar->num_vif--;
}
@@ -3821,7 +3821,7 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
netdev_set_default_ethtool_ops(ndev, &ath6kl_ethtool_ops);
- if (register_netdevice(ndev))
+ if (cfg80211_register_netdevice(ndev))
goto err;
ar->avail_idx_map &= ~BIT(fw_vif_idx);
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index ebb9f163710f..4f0a7a185fc9 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -212,11 +212,13 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
ar->avail_idx_map |= BIT(i);
rtnl_lock();
+ wiphy_lock(ar->wiphy);
/* Add an initial station interface */
wdev = ath6kl_interface_add(ar, "wlan%d", NET_NAME_ENUM,
NL80211_IFTYPE_STATION, 0, INFRA_NETWORK);
+ wiphy_unlock(ar->wiphy);
rtnl_unlock();
if (!wdev) {
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 39bf19686175..9b5c7d8f2b95 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1904,7 +1904,9 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
spin_unlock_bh(&ar->list_lock);
ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
rtnl_lock();
+ wiphy_lock(ar->wiphy);
ath6kl_cfg80211_vif_cleanup(vif);
+ wiphy_unlock(ar->wiphy);
rtnl_unlock();
spin_lock_bh(&ar->list_lock);
}
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 6a95b199bf62..f074e9c31aa2 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -2,6 +2,7 @@
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP
+ select CRC32
depends on CFG80211
depends on PCI
default n
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 1c42410d68e1..60bba5b491e0 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2820,7 +2820,9 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
mutex_unlock(&wil->vif_mutex);
if (p2p_wdev) {
+ wiphy_lock(wil->wiphy);
cfg80211_unregister_wdev(p2p_wdev);
+ wiphy_unlock(wil->wiphy);
kfree(p2p_wdev);
}
}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 07b4a252a23c..0913f0bf60e7 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -424,7 +424,7 @@ int wil_vif_add(struct wil6210_priv *wil, struct wil6210_vif *vif)
if (rc)
return rc;
}
- rc = register_netdevice(ndev);
+ rc = cfg80211_register_netdevice(ndev);
if (rc < 0) {
dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
if (any_active && vif->mid != 0)
@@ -473,7 +473,9 @@ int wil_if_add(struct wil6210_priv *wil)
wil_update_net_queues_bh(wil, vif, NULL, true);
rtnl_lock();
+ wiphy_lock(wiphy);
rc = wil_vif_add(wil, vif);
+ wiphy_unlock(wiphy);
rtnl_unlock();
if (rc < 0)
goto out_wiphy;
@@ -511,7 +513,7 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid)
/* during unregister_netdevice cfg80211_leave may perform operations
* such as stop AP, disconnect, so we only clear the VIF afterwards
*/
- unregister_netdevice(ndev);
+ cfg80211_unregister_netdevice(ndev);
if (any_active && vif->mid != 0)
wmi_port_delete(wil, vif->mid);
@@ -543,15 +545,18 @@ void wil_if_remove(struct wil6210_priv *wil)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
wil_dbg_misc(wil, "if_remove\n");
rtnl_lock();
+ wiphy_lock(wiphy);
wil_vif_remove(wil, 0);
+ wiphy_unlock(wiphy);
rtnl_unlock();
netif_napi_del(&wil->napi_tx);
netif_napi_del(&wil->napi_rx);
- wiphy_unregister(wdev->wiphy);
+ wiphy_unregister(wiphy);
}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index c174323c5c0b..ce40d94909ad 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -473,8 +473,10 @@ static void wil_pcie_remove(struct pci_dev *pdev)
wil6210_debugfs_remove(wil);
rtnl_lock();
+ wiphy_lock(wil->wiphy);
wil_p2p_wdev_free(wil);
wil_remove_all_additional_vifs(wil);
+ wiphy_unlock(wil->wiphy);
rtnl_unlock();
wil_if_remove(wil);
wil_if_pcie_disable(wil);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 3dd28f5fef19..ea78fe527c5d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -633,7 +633,7 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
};
-int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
+int brcmf_net_attach(struct brcmf_if *ifp, bool locked)
{
struct brcmf_pub *drvr = ifp->drvr;
struct net_device *ndev;
@@ -656,8 +656,8 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
INIT_WORK(&ifp->ndoffload_work, _brcmf_update_ndtable);
- if (rtnl_locked)
- err = register_netdevice(ndev);
+ if (locked)
+ err = cfg80211_register_netdevice(ndev);
else
err = register_netdev(ndev);
if (err != 0) {
@@ -677,11 +677,11 @@ fail:
return -EBADE;
}
-void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
+void brcmf_net_detach(struct net_device *ndev, bool locked)
{
if (ndev->reg_state == NETREG_REGISTERED) {
- if (rtnl_locked)
- unregister_netdevice(ndev);
+ if (locked)
+ cfg80211_unregister_netdevice(ndev);
else
unregister_netdev(ndev);
} else {
@@ -758,7 +758,7 @@ int brcmf_net_mon_attach(struct brcmf_if *ifp)
ndev = ifp->ndev;
ndev->netdev_ops = &brcmf_netdev_ops_mon;
- err = register_netdevice(ndev);
+ err = cfg80211_register_netdevice(ndev);
if (err)
bphy_err(drvr, "Failed to register %s device\n", ndev->name);
@@ -909,7 +909,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
}
static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
- bool rtnl_locked)
+ bool locked)
{
struct brcmf_if *ifp;
int ifidx;
@@ -938,7 +938,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
cancel_work_sync(&ifp->multicast_work);
cancel_work_sync(&ifp->ndoffload_work);
}
- brcmf_net_detach(ifp->ndev, rtnl_locked);
+ brcmf_net_detach(ifp->ndev, locked);
} else {
/* Only p2p device interfaces which get dynamically created
* end up here. In this case the p2p module should be informed
@@ -947,7 +947,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
* serious troublesome side effects. The p2p module will clean
* up the ifp if needed.
*/
- brcmf_p2p_ifp_removed(ifp, rtnl_locked);
+ brcmf_p2p_ifp_removed(ifp, locked);
kfree(ifp);
}
@@ -956,14 +956,14 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID;
}
-void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
+void brcmf_remove_interface(struct brcmf_if *ifp, bool locked)
{
if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bsscfgidx] != ifp))
return;
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", ifp->bsscfgidx,
ifp->ifidx);
brcmf_proto_del_if(ifp->drvr, ifp);
- brcmf_del_if(ifp->drvr, ifp->bsscfgidx, rtnl_locked);
+ brcmf_del_if(ifp->drvr, ifp->bsscfgidx, locked);
}
static int brcmf_psm_watchdog_notify(struct brcmf_if *ifp,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 5767d665cee5..8212c9de14f1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -201,16 +201,16 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
char *brcmf_ifname(struct brcmf_if *ifp);
struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx);
void brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable);
-int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+int brcmf_net_attach(struct brcmf_if *ifp, bool locked);
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
bool is_p2pdev, const char *name, u8 *mac_addr);
-void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked);
+void brcmf_remove_interface(struct brcmf_if *ifp, bool locked);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
-void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked);
+void brcmf_net_detach(struct net_device *ndev, bool locked);
int brcmf_net_mon_attach(struct brcmf_if *ifp);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
int __init brcmf_core_init(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index ec6fc7a150a6..6d30a0fcecea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2430,7 +2430,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
return err;
}
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool locked)
{
struct brcmf_cfg80211_info *cfg;
struct brcmf_cfg80211_vif *vif;
@@ -2439,11 +2439,15 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
vif = ifp->vif;
cfg = wdev_to_cfg(&vif->wdev);
cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
- if (!rtnl_locked)
+ if (locked) {
rtnl_lock();
- cfg80211_unregister_wdev(&vif->wdev);
- if (!rtnl_locked)
+ wiphy_lock(cfg->wiphy);
+ cfg80211_unregister_wdev(&vif->wdev);
+ wiphy_unlock(cfg->wiphy);
rtnl_unlock();
+ } else {
+ cfg80211_unregister_wdev(&vif->wdev);
+ }
brcmf_free_vif(vif);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 7220fc8fd9b0..8280092066e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -314,6 +314,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
@@ -340,6 +341,18 @@ const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_qu_b0_hr_b0 = {
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
@@ -366,6 +379,18 @@ const struct iwl_cfg iwl_qu_c0_hr1_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_qu_c0_hr_b0 = {
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 15248b064380..d8b7776a8dde 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -80,19 +80,45 @@ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
}
/*
- * Evaluate a DSM with no arguments and a single u8 return value (inside a
- * buffer object), verify and return that value.
+ * Generic function to evaluate a DSM with no arguments
+ * and an integer return value,
+ * (as an integer object or inside a buffer object),
+ * verify and assign the value in the "value" parameter.
+ * return 0 in success and the appropriate errno otherwise.
*/
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ u64 *value, size_t expected_size)
{
union acpi_object *obj;
- int ret;
+ int ret = 0;
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "Failed to get DSM object. func= %d\n",
+ func);
return -ENOENT;
+ }
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *value = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ __le64 le_value = 0;
- if (obj->type != ACPI_TYPE_BUFFER) {
+ if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
+ return -EINVAL;
+
+ /* if the buffer size doesn't match the expected size */
+ if (obj->buffer.length != expected_size)
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM invalid buffer size, padding or truncating (%d)\n",
+ obj->buffer.length);
+
+ /* assuming LE from Intel BIOS spec */
+ memcpy(&le_value, obj->buffer.pointer,
+ min_t(size_t, expected_size, (size_t)obj->buffer.length));
+ *value = le64_to_cpu(le_value);
+ } else {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method did not return a valid object, type=%d\n",
obj->type);
@@ -100,15 +126,6 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
goto out;
}
- if (obj->buffer.length != sizeof(u8)) {
- IWL_DEBUG_DEV_RADIO(dev,
- "ACPI: DSM method returned invalid buffer, length=%d\n",
- obj->buffer.length);
- ret = -EINVAL;
- goto out;
- }
-
- ret = obj->buffer.pointer[0];
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method evaluated: func=%d, ret=%d\n",
func, ret);
@@ -116,6 +133,24 @@ out:
ACPI_FREE(obj);
return ret;
}
+
+/*
+ * Evaluate a DSM with no arguments and a u8 return value,
+ */
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
+{
+ int ret;
+ u64 val;
+
+ ret = iwl_acpi_get_dsm_integer(dev, rev, func, &val, sizeof(u8));
+
+ if (ret < 0)
+ return ret;
+
+ /* cast val (u64) to be u8 */
+ *value = (u8)val;
+ return 0;
+}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 042dd247d387..1cce30d1ef55 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
@@ -99,7 +99,7 @@ struct iwl_fw_runtime;
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
@@ -159,7 +159,8 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
return ERR_PTR(-ENOENT);
}
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static inline
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
{
return -ENOENT;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 6d8f7bff1243..895a907acdf0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -224,40 +224,46 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait)
{
- const struct firmware *pnvm;
struct iwl_notification_wait pnvm_wait;
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
PNVM_INIT_COMPLETE_NTFY) };
- char pnvm_name[64];
- int ret;
/* if the SKU_ID is empty, there's nothing to do */
if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
return 0;
- /* if we already have it, nothing to do either */
- if (trans->pnvm_loaded)
- return 0;
+ /* load from disk only if we haven't done it (or tried) before */
+ if (!trans->pnvm_loaded) {
+ const struct firmware *pnvm;
+ char pnvm_name[64];
+ int ret;
+
+ /*
+ * The prefix unfortunately includes a hyphen at the end, so
+ * don't add the dot here...
+ */
+ snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
+ trans->cfg->fw_name_pre);
+
+ /* ...but replace the hyphen with the dot here. */
+ if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
+ pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
+
+ ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
+ if (ret) {
+ IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
+ pnvm_name, ret);
+ /*
+ * Pretend we've loaded it - at least we've tried and
+ * couldn't load it at all, so there's no point in
+ * trying again over and over.
+ */
+ trans->pnvm_loaded = true;
+ } else {
+ iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
- /*
- * The prefix unfortunately includes a hyphen at the end, so
- * don't add the dot here...
- */
- snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
- trans->cfg->fw_name_pre);
-
- /* ...but replace the hyphen with the dot here. */
- if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
- pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
-
- ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
- if (ret) {
- IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
- pnvm_name, ret);
- } else {
- iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
-
- release_firmware(pnvm);
+ release_firmware(pnvm);
+ }
}
iwl_init_notification_wait(notif_wait, &pnvm_wait,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 27cb0406ba9a..86e1d57df65e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __IWL_CONFIG_H__
@@ -445,7 +445,7 @@ struct iwl_cfg {
#define IWL_CFG_CORES_BT_GNSS 0x5
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0100) >> 9)
+#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {
@@ -491,6 +491,7 @@ extern const char iwl9260_killer_1550_name[];
extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
+extern const char iwl_ax203_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
@@ -574,6 +575,8 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
+extern const struct iwl_cfg iwl_qu_b0_hr_b0;
+extern const struct iwl_cfg iwl_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index a654147d3cd6..a80a35a7740f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -180,13 +180,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
- /* For safe using a string from FW make sure we have a
- * null terminator
- */
- reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
-
- IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
-
if (id >= IWL_FW_INI_MAX_REGION_ID) {
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 2ac20d0a30eb..2b7ef1583e7f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -150,16 +150,17 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
}
IWL_EXPORT_SYMBOL(iwl_read_prph);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
+ mdelay(delay_ms);
iwl_write_prph_no_grab(trans, ofs, val);
iwl_trans_release_nic_access(trans, &flags);
}
}
-IWL_EXPORT_SYMBOL(iwl_write_prph);
+IWL_EXPORT_SYMBOL(iwl_write_prph_delay);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
@@ -219,8 +220,8 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
void iwl_force_nmi(struct iwl_trans *trans)
{
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
- iwl_write_prph(trans, DEVICE_SET_NMI_REG,
- DEVICE_SET_NMI_VAL_DRV);
+ iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG,
+ DEVICE_SET_NMI_VAL_DRV, 1);
else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 39bceee4e9e7..3c21c0e081f8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#ifndef __iwl_io_h__
#define __iwl_io_h__
@@ -37,7 +37,13 @@ u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs,
+ u32 val, u32 delay_ms);
+static inline void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ iwl_write_prph_delay(trans, ofs, val, 0);
+}
+
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 0b03fdedc1f7..1158e256f601 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -301,6 +301,12 @@
#define RADIO_RSP_ADDR_POS (6)
#define RADIO_RSP_RD_CMD (3)
+/* LTR control (Qu only) */
+#define HPM_MAC_LTR_CSR 0xa0348c
+#define HPM_MAC_LRT_ENABLE_ALL 0xf
+/* also uses CSR_LTR_* for values */
+#define HPM_UMAC_LTR 0xa03480
+
/* FW monitor */
#define MON_BUFF_SAMPLE_CTL (0xa03c00)
#define MON_BUFF_BASE_ADDR (0xa03c1c)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index c025188fa9bc..64c10ca0d31e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -2032,8 +2032,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
- clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
-
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
@@ -2143,11 +2141,13 @@ err:
out_iterate:
if (!test)
- ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
+ ieee80211_iterate_active_interfaces_mtx(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
out:
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
/* no need to reset the device in unified images, if successful */
if (unified_image && !ret) {
/* nothing else to do if we already sent D0I3_END_CMD */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 573e46956c14..38d0bfb649cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -459,7 +459,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
const size_t bufsz = sizeof(buf);
int pos = 0;
+ mutex_lock(&mvm->mutex);
iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ mutex_unlock(&mvm->mutex);
+
do_div(curr_os, NSEC_PER_USEC);
diff = curr_os - curr_gp2;
pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 0637eb1cff4e..313e9f106f46 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1090,20 +1090,22 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
{
+ u8 value;
+
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2);
+ DSM_FUNC_ENABLE_INDONESIA_5G2, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
ret);
- else if (ret >= DSM_VALUE_INDONESIA_MAX)
+ else if (value >= DSM_VALUE_INDONESIA_MAX)
IWL_DEBUG_RADIO(mvm,
- "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n",
- ret);
+ "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n",
+ value);
- else if (ret == DSM_VALUE_INDONESIA_ENABLE) {
+ else if (value == DSM_VALUE_INDONESIA_ENABLE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
return DSM_VALUE_INDONESIA_ENABLE;
@@ -1114,25 +1116,26 @@ static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
{
+ u8 value;
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_DISABLE_SRD);
+ DSM_FUNC_DISABLE_SRD, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
ret);
- else if (ret >= DSM_VALUE_SRD_MAX)
+ else if (value >= DSM_VALUE_SRD_MAX)
IWL_DEBUG_RADIO(mvm,
- "DSM function DISABLE_SRD return invalid value, ret=%d\n",
- ret);
+ "DSM function DISABLE_SRD return invalid value, value=%d\n",
+ value);
- else if (ret == DSM_VALUE_SRD_PASSIVE) {
+ else if (value == DSM_VALUE_SRD_PASSIVE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
return DSM_VALUE_SRD_PASSIVE;
- } else if (ret == DSM_VALUE_SRD_DISABLE) {
+ } else if (value == DSM_VALUE_SRD_DISABLE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function DISABLE_SRD: disabling SRD\n");
return DSM_VALUE_SRD_DISABLE;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index da32937ba9a7..bcbd77ef68df 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -260,7 +260,7 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
int ret;
bool changed;
const struct ieee80211_regdomain *r =
- rtnl_dereference(mvm->hw->wiphy->regd);
+ wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd);
if (!r)
return -ENOENT;
@@ -282,7 +282,7 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
/* update cfg80211 if the regdomain was changed */
if (changed)
- ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+ ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd);
else
ret = 0;
@@ -4194,6 +4194,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
iwl_mvm_binding_remove_vif(mvm, vif);
out:
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) &&
+ switching_chanctx)
+ return;
mvmvif->phy_ctxt = NULL;
iwl_mvm_power_update_mac(mvm);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index abb8c1088c2f..7fb4e618f76e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -545,7 +545,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return -EIO;
}
- retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+ retval = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd);
kfree(regd);
return retval;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 98f62d78cf9c..61618f607927 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -791,6 +791,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!mvm->scan_cmd)
goto out_free;
+ /* invalidate ids to prevent accidental removal of sta_id 0 */
+ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
+
/* Set EBS as successful as long as not stated otherwise by the FW. */
mvm->last_ebs_successful = true;
@@ -1205,6 +1209,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
if (device_reprobe(reprobe->dev))
dev_err(reprobe->dev, "reprobe failed!\n");
+ put_device(reprobe->dev);
kfree(reprobe);
module_put(THIS_MODULE);
}
@@ -1255,7 +1260,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
module_put(THIS_MODULE);
return;
}
- reprobe->dev = mvm->trans->dev;
+ reprobe->dev = get_device(mvm->trans->dev);
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index dc174410bf9c..578c353ae02c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2057,6 +2057,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
+ return -EINVAL;
+
iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
@@ -2071,6 +2074,9 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
+ return -EINVAL;
+
iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index a983c215df31..3712adc3ccc2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -773,6 +773,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
next = skb_gso_segment(skb, netdev_flags);
skb_shinfo(skb)->gso_size = mss;
+ skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL;
else if (next)
@@ -795,6 +796,8 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
if (tcp_payload_len > mss) {
skb_shinfo(tmp)->gso_size = mss;
+ skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
+ SKB_GSO_TCPV6;
} else {
if (qos) {
u8 *qc;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 36bf414a388a..5b5134dd49af 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -75,6 +75,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+ u32_encode_bits(250,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+ CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+ u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
struct iwl_context_info_gen3 *ctxt_info_gen3;
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@@ -189,8 +198,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
/* Allocate IML */
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
&trans_pcie->iml_dma_addr, GFP_KERNEL);
- if (!iml_img)
- return -ENOMEM;
+ if (!iml_img) {
+ ret = -ENOMEM;
+ goto err_free_ctxt_info;
+ }
memcpy(iml_img, trans->iml, trans->iml_len);
@@ -206,23 +217,19 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
- /*
- * The firmware initializes this again later (to a smaller
- * value), but for the boot process initialize the LTR to
- * ~250 usec.
- */
- u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
- u32_encode_bits(250,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
- CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
- u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
-
- iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val);
+ /*
+ * To workaround hardware latency issues during the boot process,
+ * initialize the LTR to ~250 usec (see ltr_val above).
+ * The firmware initializes this again later (to a smaller value).
+ */
+ if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+ !trans->trans_cfg->integrated) {
+ iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+ } else if (trans->trans_cfg->integrated &&
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+ iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+ iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
@@ -232,6 +239,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
return 0;
+err_free_ctxt_info:
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+ trans_pcie->ctxt_info_gen3,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_gen3 = NULL;
err_free_prph_info:
dma_free_coherent(trans->dev,
sizeof(*prph_info),
@@ -294,6 +306,9 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
return ret;
}
+ if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
+ return -EBUSY;
+
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
cpu_to_le64(trans_pcie->pnvm_dram.physical);
prph_sc_ctrl->pnvm_cfg.pnvm_size =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 965982612e74..ed3f5b7aa71e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -910,6 +910,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -917,6 +922,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_c0_hr_b0, iwl_ax203_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 285e0d586021..ab93a848a466 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2107,7 +2107,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
while (offs < dwords) {
/* limit the time we spin here under lock to 1/2s */
- ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
+ unsigned long end = jiffies + HZ / 2;
+ bool resched = false;
if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR,
@@ -2118,14 +2119,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
HBUS_TARG_MEM_RDAT);
offs++;
- /* calling ktime_get is expensive so
- * do it once in 128 reads
- */
- if (offs % 128 == 0 && ktime_after(ktime_get(),
- timeout))
+ if (time_after(jiffies, end)) {
+ resched = true;
break;
+ }
}
iwl_trans_release_nic_access(trans, &flags);
+
+ if (resched)
+ cond_resched();
} else {
return -EBUSY;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 5dda0015522d..83f4964f3cb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -201,6 +201,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ if (!txq) {
+ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
+ return;
+ }
+
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 27eea909e32d..7ff1bb0ccc9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -142,26 +142,25 @@ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
* idx is bounded by n_window
*/
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb;
lockdep_assert_held(&txq->lock);
+ if (!txq->entries)
+ return;
+
iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
iwl_txq_get_tfd(trans, txq, idx));
- /* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
+ skb = txq->entries[idx].skb;
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
}
}
@@ -841,10 +840,8 @@ void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb;
- if (WARN_ON_ONCE(!skb))
- continue;
-
- iwl_txq_free_tso_page(trans, skb);
+ if (!WARN_ON_ONCE(!skb))
+ iwl_txq_free_tso_page(trans, skb);
}
iwl_txq_gen2_free_tfd(trans, txq);
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
@@ -1494,28 +1491,28 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
*/
int rd_ptr = txq->read_ptr;
int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
+ struct sk_buff *skb;
lockdep_assert_held(&txq->lock);
+ if (!txq->entries)
+ return;
+
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
+ skb = txq->entries[idx].skb;
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
}
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3b3fc7c9c91d..fa7d4c20dc13 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -311,6 +311,12 @@ static struct net_device *hwsim_mon; /* global monitor netdev */
.hw_value = (_freq), \
}
+#define CHAN6G(_freq) { \
+ .band = NL80211_BAND_6GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_freq), \
+}
+
static const struct ieee80211_channel hwsim_channels_2ghz[] = {
CHAN2G(2412), /* Channel 1 */
CHAN2G(2417), /* Channel 2 */
@@ -377,6 +383,68 @@ static const struct ieee80211_channel hwsim_channels_5ghz[] = {
CHAN5G(5925), /* Channel 185 */
};
+static const struct ieee80211_channel hwsim_channels_6ghz[] = {
+ CHAN6G(5955), /* Channel 1 */
+ CHAN6G(5975), /* Channel 5 */
+ CHAN6G(5995), /* Channel 9 */
+ CHAN6G(6015), /* Channel 13 */
+ CHAN6G(6035), /* Channel 17 */
+ CHAN6G(6055), /* Channel 21 */
+ CHAN6G(6075), /* Channel 25 */
+ CHAN6G(6095), /* Channel 29 */
+ CHAN6G(6115), /* Channel 33 */
+ CHAN6G(6135), /* Channel 37 */
+ CHAN6G(6155), /* Channel 41 */
+ CHAN6G(6175), /* Channel 45 */
+ CHAN6G(6195), /* Channel 49 */
+ CHAN6G(6215), /* Channel 53 */
+ CHAN6G(6235), /* Channel 57 */
+ CHAN6G(6255), /* Channel 61 */
+ CHAN6G(6275), /* Channel 65 */
+ CHAN6G(6295), /* Channel 69 */
+ CHAN6G(6315), /* Channel 73 */
+ CHAN6G(6335), /* Channel 77 */
+ CHAN6G(6355), /* Channel 81 */
+ CHAN6G(6375), /* Channel 85 */
+ CHAN6G(6395), /* Channel 89 */
+ CHAN6G(6415), /* Channel 93 */
+ CHAN6G(6435), /* Channel 97 */
+ CHAN6G(6455), /* Channel 181 */
+ CHAN6G(6475), /* Channel 105 */
+ CHAN6G(6495), /* Channel 109 */
+ CHAN6G(6515), /* Channel 113 */
+ CHAN6G(6535), /* Channel 117 */
+ CHAN6G(6555), /* Channel 121 */
+ CHAN6G(6575), /* Channel 125 */
+ CHAN6G(6595), /* Channel 129 */
+ CHAN6G(6615), /* Channel 133 */
+ CHAN6G(6635), /* Channel 137 */
+ CHAN6G(6655), /* Channel 141 */
+ CHAN6G(6675), /* Channel 145 */
+ CHAN6G(6695), /* Channel 149 */
+ CHAN6G(6715), /* Channel 153 */
+ CHAN6G(6735), /* Channel 157 */
+ CHAN6G(6755), /* Channel 161 */
+ CHAN6G(6775), /* Channel 165 */
+ CHAN6G(6795), /* Channel 169 */
+ CHAN6G(6815), /* Channel 173 */
+ CHAN6G(6835), /* Channel 177 */
+ CHAN6G(6855), /* Channel 181 */
+ CHAN6G(6875), /* Channel 185 */
+ CHAN6G(6895), /* Channel 189 */
+ CHAN6G(6915), /* Channel 193 */
+ CHAN6G(6935), /* Channel 197 */
+ CHAN6G(6955), /* Channel 201 */
+ CHAN6G(6975), /* Channel 205 */
+ CHAN6G(6995), /* Channel 209 */
+ CHAN6G(7015), /* Channel 213 */
+ CHAN6G(7035), /* Channel 217 */
+ CHAN6G(7055), /* Channel 221 */
+ CHAN6G(7075), /* Channel 225 */
+ CHAN6G(7095), /* Channel 229 */
+ CHAN6G(7115), /* Channel 233 */
+};
+
#define NUM_S1G_CHANS_US 51
static struct ieee80211_channel hwsim_channels_s1g[NUM_S1G_CHANS_US];
@@ -548,6 +616,7 @@ struct mac80211_hwsim_data {
struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
+ struct ieee80211_channel channels_6ghz[ARRAY_SIZE(hwsim_channels_6ghz)];
struct ieee80211_channel channels_s1g[ARRAY_SIZE(hwsim_channels_s1g)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
struct ieee80211_iface_combination if_combination;
@@ -578,7 +647,8 @@ struct mac80211_hwsim_data {
struct ieee80211_channel *channel;
unsigned long next_start, start, end;
} survey_data[ARRAY_SIZE(hwsim_channels_2ghz) +
- ARRAY_SIZE(hwsim_channels_5ghz)];
+ ARRAY_SIZE(hwsim_channels_5ghz) +
+ ARRAY_SIZE(hwsim_channels_6ghz)];
struct ieee80211_channel *channel;
u64 beacon_int /* beacon interval in us */;
@@ -3149,6 +3219,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sizeof(hwsim_channels_2ghz));
memcpy(data->channels_5ghz, hwsim_channels_5ghz,
sizeof(hwsim_channels_5ghz));
+ memcpy(data->channels_6ghz, hwsim_channels_6ghz,
+ sizeof(hwsim_channels_6ghz));
memcpy(data->channels_s1g, hwsim_channels_s1g,
sizeof(hwsim_channels_s1g));
memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index a6b9dc6700b1..5553df913290 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2097,7 +2097,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
if (!mwifiex_stop_bg_scan(priv))
- cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0);
+ cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0);
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
@@ -2366,7 +2366,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
(int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
if (!mwifiex_stop_bg_scan(priv))
- cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0);
+ cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
@@ -2576,7 +2576,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
priv->scan_block = false;
if (!mwifiex_stop_bg_scan(priv))
- cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0);
+ cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0);
user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
if (!user_scan_cfg)
@@ -3081,7 +3081,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mutex_init(&priv->async_mutex);
/* Register network device */
- if (register_netdevice(dev)) {
+ if (cfg80211_register_netdevice(dev)) {
mwifiex_dbg(adapter, ERROR, "cannot register network device\n");
ret = -EFAULT;
goto err_reg_netdev;
@@ -3160,7 +3160,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
netif_carrier_off(priv->netdev);
if (wdev->netdev->reg_state == NETREG_REGISTERED)
- unregister_netdevice(wdev->netdev);
+ cfg80211_unregister_netdevice(wdev->netdev);
if (priv->dfs_cac_workqueue) {
flush_workqueue(priv->dfs_cac_workqueue);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index ee52fb839ef7..529dfd8b7ae8 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -598,12 +598,14 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
}
rtnl_lock();
+ wiphy_lock(adapter->wiphy);
/* Create station interface by default */
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM,
NL80211_IFTYPE_STATION, NULL);
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create default STA interface\n");
+ wiphy_unlock(adapter->wiphy);
rtnl_unlock();
goto err_add_intf;
}
@@ -614,6 +616,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create AP interface\n");
+ wiphy_unlock(adapter->wiphy);
rtnl_unlock();
goto err_add_intf;
}
@@ -625,10 +628,12 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create p2p client interface\n");
+ wiphy_unlock(adapter->wiphy);
rtnl_unlock();
goto err_add_intf;
}
}
+ wiphy_unlock(adapter->wiphy);
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -1440,9 +1445,11 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
if (!priv)
continue;
rtnl_lock();
+ wiphy_lock(adapter->wiphy);
if (priv->netdev &&
priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
+ wiphy_unlock(adapter->wiphy);
rtnl_unlock();
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index a44b7766dec6..c13547841a4e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -231,7 +231,7 @@ mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *seq)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- enum mt76_txq_id qid;
+ enum mt76_mcuq_id qid;
mt7615_mcu_fill_msg(dev, skb, cmd, seq);
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
index 13d77f8fca86..9fb506f2ace6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
@@ -83,7 +83,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
{
struct mt76_queue *q = &dev->q_rx[qid];
struct mt76_sdio *sdio = &dev->sdio;
- int len = 0, err, i, order;
+ int len = 0, err, i;
struct page *page;
u8 *buf;
@@ -96,8 +96,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
- order = get_order(len);
- page = __dev_alloc_pages(GFP_KERNEL, order);
+ page = __dev_alloc_pages(GFP_KERNEL, get_order(len));
if (!page)
return -ENOMEM;
@@ -106,7 +105,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
- __free_pages(page, order);
+ put_page(page);
return err;
}
@@ -123,7 +122,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
if (q->queued + i + 1 == q->ndesc)
break;
}
- __free_pages(page, order);
+ put_page(page);
spin_lock_bh(&q->lock);
q->head = (q->head + i) % q->ndesc;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index ed4635bd151a..102a8f14c22d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -40,9 +40,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
.types = BIT(NL80211_IFTYPE_ADHOC)
}, {
.max = 16,
- .types = BIT(NL80211_IFTYPE_AP) |
+ .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT)
+ | BIT(NL80211_IFTYPE_MESH_POINT)
#endif
}, {
.max = MT7915_MAX_INTERFACES,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 5fdd1a6d32ee..e211a2bd4d3c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -256,7 +256,7 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_mcu_txd *mcu_txd;
u8 seq, pkt_fmt, qidx;
- enum mt76_txq_id txq;
+ enum mt76_mcuq_id qid;
__le32 *txd;
u32 val;
@@ -268,18 +268,18 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (cmd == -MCU_CMD_FW_SCATTER) {
- txq = MT_MCUQ_FWDL;
+ qid = MT_MCUQ_FWDL;
goto exit;
}
mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
- txq = MT_MCUQ_WA;
+ qid = MT_MCUQ_WA;
qidx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
} else {
- txq = MT_MCUQ_WM;
+ qid = MT_MCUQ_WM;
qidx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
}
@@ -326,7 +326,7 @@ exit:
if (wait_seq)
*wait_seq = seq;
- return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
+ return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 62b5b912818f..0b6facb17ff7 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -157,10 +157,14 @@ static void mt76s_net_worker(struct mt76_worker *w)
static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- bool wake, mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_queue_entry entry;
int nframes = 0;
+ bool mcu;
+ if (!q)
+ return 0;
+
+ mcu = q == dev->q_mcu[MT_MCUQ_WM];
while (q->queued > 0) {
if (!q->entry[q->tail].done)
break;
@@ -177,21 +181,12 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
nframes++;
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
- if (mcu)
- goto out;
-
- mt76_txq_schedule(&dev->phy, q->qid);
+ if (!mcu)
+ mt76_txq_schedule(&dev->phy, q->qid);
- if (wake)
- ieee80211_wake_queue(dev->hw, q->qid);
-out:
return nframes;
}
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index dc850109de22..b95d093728b9 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -811,11 +811,12 @@ static void mt76u_status_worker(struct mt76_worker *w)
struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
struct mt76_queue_entry entry;
struct mt76_queue *q;
- bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->phy.q_tx[i];
+ if (!q)
+ continue;
while (q->queued > 0) {
if (!q->entry[q->tail].done)
@@ -827,10 +828,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
mt76_queue_tx_complete(dev, q, &entry);
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
@@ -839,8 +836,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
queue_work(dev->wq, &dev->usb.stat_work);
- if (wake)
- ieee80211_wake_queue(dev->hw, i);
}
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 5f99054f535b..af7d1ecb777c 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -152,8 +152,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
if (new_p) {
/* we have one extra ref from the allocator */
- __free_pages(e->p, MT_RX_ORDER);
-
+ put_page(e->p);
e->p = new_p;
}
}
@@ -310,7 +309,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
}
e = &q->e[q->end];
- e->skb = skb;
usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
mt7601u_complete_tx, q);
ret = usb_submit_urb(e->urb, GFP_ATOMIC);
@@ -328,6 +326,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
q->end = (q->end + 1) % q->entries;
q->used++;
+ e->skb = skb;
if (q->used >= q->entries)
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index e3dd205cbbe5..96973ec7bd9a 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1538,7 +1538,7 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
wilc_wfi_deinit_mon_interface(wl, true);
vif = netdev_priv(wdev->netdev);
cfg80211_stop_iface(wiphy, wdev, GFP_KERNEL);
- unregister_netdevice(vif->ndev);
+ cfg80211_unregister_netdevice(vif->ndev);
vif->monitor_flag = 0;
wilc_set_operation_mode(vif, 0, 0, 0);
diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
index b5a1b65c087c..6bd63934c2d8 100644
--- a/drivers/net/wireless/microchip/wilc1000/mon.c
+++ b/drivers/net/wireless/microchip/wilc1000/mon.c
@@ -233,7 +233,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops;
wl->monitor_dev->needs_free_netdev = true;
- if (register_netdevice(wl->monitor_dev)) {
+ if (cfg80211_register_netdevice(wl->monitor_dev)) {
netdev_err(real_dev, "register_netdevice failed\n");
free_netdev(wl->monitor_dev);
return NULL;
@@ -251,7 +251,7 @@ void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked)
return;
if (rtnl_locked)
- unregister_netdevice(wl->monitor_dev);
+ cfg80211_unregister_netdevice(wl->monitor_dev);
else
unregister_netdev(wl->monitor_dev);
wl->monitor_dev = NULL;
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 2a1fbbdd6a4b..643cbb155439 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -950,7 +950,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
vif->priv.dev = ndev;
if (rtnl_locked)
- ret = register_netdevice(ndev);
+ ret = cfg80211_register_netdevice(ndev);
else
ret = register_netdev(ndev);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 54cdf3ad09d7..504b4d0b98c4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -180,7 +180,7 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
cancel_work_sync(&vif->high_pri_tx_work);
if (netdev->reg_state == NETREG_REGISTERED)
- unregister_netdevice(netdev);
+ cfg80211_unregister_netdevice(netdev);
if (qtnf_cmd_send_del_intf(vif))
pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
@@ -267,7 +267,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE)) {
ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
if (ret) {
- unregister_netdevice(vif->netdev);
+ cfg80211_unregister_netdevice(vif->netdev);
vif->netdev = NULL;
goto error_del_vif;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index ad726bd100ec..b4dd60b2ebc9 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -492,7 +492,7 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
- ret = register_netdevice(dev);
+ ret = cfg80211_register_netdevice(dev);
if (ret) {
free_netdev(dev);
vif->netdev = NULL;
@@ -611,8 +611,9 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
mac->wiphy_registered = 1;
rtnl_lock();
-
+ wiphy_lock(priv_to_wiphy(mac));
ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM);
+ wiphy_unlock(priv_to_wiphy(mac));
rtnl_unlock();
if (ret) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index a7259dbc953d..965bd9589045 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -78,7 +78,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
- complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
err = request_firmware(&firmware,
@@ -91,13 +90,13 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
}
pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
- return;
+ goto exit;
}
found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
release_firmware(firmware);
- return;
+ goto exit;
}
if (!is_wow) {
memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
@@ -109,6 +108,9 @@ found_alt:
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
release_firmware(firmware);
+
+exit:
+ complete(&rtlpriv->firmware_loading_complete);
}
void rtl_fw_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8ee24e351bdc..4a16d6e33c09 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -399,7 +399,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
+ bool zerocopy_success);
/* Unmap a pending page and release it back to the guest */
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index acb786d8b1d8..08b0e3d0b7eb 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -47,7 +47,7 @@
/* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
-/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
+/* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as
* increasing the inflight counter. We need to increase the inflight
* counter because core driver calls into xenvif_zerocopy_callback
* which calls xenvif_skb_zerocopy_complete.
@@ -55,7 +55,7 @@
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb)
{
- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE;
atomic_inc(&queue->inflight_packets);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index bc3421d14576..19a27dce79d2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1091,7 +1091,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
- uarg->callback(uarg, true);
+ uarg->callback(NULL, uarg, true);
skb_shinfo(skb)->destructor_arg = NULL;
/* Fill the skb with the new (local) frags. */
@@ -1228,7 +1228,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
return work_done;
}
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
+void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
+ bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 6f10e0998f1c..a5439c130130 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -411,7 +411,7 @@ static void read_xenbus_frontend_xdp(struct backend_info *be,
vif->xdp_headroom = headroom;
}
-/**
+/*
* Callback received when the frontend's state changes.
*/
static void frontend_changed(struct xenbus_device *dev,
@@ -996,7 +996,7 @@ static int netback_remove(struct xenbus_device *dev)
return 0;
}
-/**
+/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and switch to InitWait.
*/
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b01848ef4649..6ef2adbd283a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -864,12 +864,10 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
u32 act;
int err;
- xdp->data_hard_start = page_address(pdata);
- xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &queue->xdp_rxq;
- xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
+ xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
+ &queue->xdp_rxq);
+ xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
+ len, false);
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
@@ -1582,7 +1580,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
return ERR_PTR(err);
}
-/**
+/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffers for communication with the backend, and
* inform the backend of the appropriate details for those.
@@ -1659,7 +1657,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
}
}
-/**
+/*
* We are reconnecting to the backend, due to a suspend/resume, or a backend
* driver restart. We tear down our netif structure and recreate it, but
* leave the device-layer structures intact so that this is transparent to the
@@ -2305,7 +2303,7 @@ static int xennet_connect(struct net_device *dev)
return 0;
}
-/**
+/*
* Callback received when the backend's state changes.
*/
static void netback_changed(struct xenbus_device *dev,
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 75c65d339018..288c6f1c6979 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -49,6 +49,17 @@ config NFC_PORT100
If unsure, say N.
+config NFC_VIRTUAL_NCI
+ tristate "NCI device simulator driver"
+ depends on NFC_NCI
+ help
+ NCI virtual device simulates a NCI device to the user.
+ It can be used to validate the NCI module and applications.
+ This driver supports communication between the virtual NCI device and
+ module.
+
+ If unsure, say N.
+
source "drivers/nfc/fdp/Kconfig"
source "drivers/nfc/pn544/Kconfig"
source "drivers/nfc/pn533/Kconfig"
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 5393ba59b17d..7b1bfde1d971 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_NFC_ST_NCI) += st-nci/
obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci/
obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5/
obj-$(CONFIG_NFC_ST95HF) += st95hf/
+obj-$(CONFIG_NFC_VIRTUAL_NCI) += virtual_ncidev.o
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index ad0abb1f0bae..adaa1a7147f9 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -155,7 +155,7 @@ static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb)
/*
* LRC check failed. This may due to transmission error or
- * desynchronization between driver and FDP. Drop the paquet
+ * desynchronization between driver and FDP. Drop the packet
* and force resynchronization
*/
if (lrc) {
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index c70f62fe321e..33978022ae47 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -169,7 +169,7 @@
/* Bits determining whether its a direct command or register R/W,
* whether to use a continuous SPI transaction or not, and the actual
- * direct cmd opcode or regster address.
+ * direct cmd opcode or register address.
*/
#define TRF7970A_CMD_BIT_CTRL BIT(7)
#define TRF7970A_CMD_BIT_RW BIT(6)
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
new file mode 100644
index 000000000000..f73ee0bf3593
--- /dev/null
+++ b/drivers/nfc/virtual_ncidev.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtual NCI device simulation driver
+ *
+ * Copyright (C) 2020 Samsung Electrnoics
+ * Bongsu Jeon <bongsu.jeon@samsung.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <net/nfc/nci_core.h>
+
+enum virtual_ncidev_mode {
+ virtual_ncidev_enabled,
+ virtual_ncidev_disabled,
+ virtual_ncidev_disabling,
+};
+
+#define IOCTL_GET_NCIDEV_IDX 0
+#define VIRTUAL_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+ NFC_PROTO_MIFARE_MASK | \
+ NFC_PROTO_FELICA_MASK | \
+ NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_ISO14443_B_MASK | \
+ NFC_PROTO_ISO15693_MASK)
+
+static enum virtual_ncidev_mode state;
+static struct miscdevice miscdev;
+static struct sk_buff *send_buff;
+static struct nci_dev *ndev;
+static DEFINE_MUTEX(nci_mutex);
+
+static int virtual_nci_open(struct nci_dev *ndev)
+{
+ return 0;
+}
+
+static int virtual_nci_close(struct nci_dev *ndev)
+{
+ mutex_lock(&nci_mutex);
+ kfree_skb(send_buff);
+ send_buff = NULL;
+ mutex_unlock(&nci_mutex);
+
+ return 0;
+}
+
+static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ mutex_lock(&nci_mutex);
+ if (state != virtual_ncidev_enabled) {
+ mutex_unlock(&nci_mutex);
+ return 0;
+ }
+
+ if (send_buff) {
+ mutex_unlock(&nci_mutex);
+ return -1;
+ }
+ send_buff = skb_copy(skb, GFP_KERNEL);
+ mutex_unlock(&nci_mutex);
+
+ return 0;
+}
+
+static struct nci_ops virtual_nci_ops = {
+ .open = virtual_nci_open,
+ .close = virtual_nci_close,
+ .send = virtual_nci_send
+};
+
+static ssize_t virtual_ncidev_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ size_t actual_len;
+
+ mutex_lock(&nci_mutex);
+ if (!send_buff) {
+ mutex_unlock(&nci_mutex);
+ return 0;
+ }
+
+ actual_len = min_t(size_t, count, send_buff->len);
+
+ if (copy_to_user(buf, send_buff->data, actual_len)) {
+ mutex_unlock(&nci_mutex);
+ return -EFAULT;
+ }
+
+ skb_pull(send_buff, actual_len);
+ if (send_buff->len == 0) {
+ consume_skb(send_buff);
+ send_buff = NULL;
+ }
+ mutex_unlock(&nci_mutex);
+
+ return actual_len;
+}
+
+static ssize_t virtual_ncidev_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(count, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ if (copy_from_user(skb_put(skb, count), buf, count)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ nci_recv_frame(ndev, skb);
+ return count;
+}
+
+static int virtual_ncidev_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ mutex_lock(&nci_mutex);
+ if (state != virtual_ncidev_disabled) {
+ mutex_unlock(&nci_mutex);
+ return -EBUSY;
+ }
+
+ ndev = nci_allocate_device(&virtual_nci_ops, VIRTUAL_NFC_PROTOCOLS,
+ 0, 0);
+ if (!ndev) {
+ mutex_unlock(&nci_mutex);
+ return -ENOMEM;
+ }
+
+ ret = nci_register_device(ndev);
+ if (ret < 0) {
+ nci_free_device(ndev);
+ mutex_unlock(&nci_mutex);
+ return ret;
+ }
+ state = virtual_ncidev_enabled;
+ mutex_unlock(&nci_mutex);
+
+ return 0;
+}
+
+static int virtual_ncidev_close(struct inode *inode, struct file *file)
+{
+ mutex_lock(&nci_mutex);
+
+ if (state == virtual_ncidev_enabled) {
+ state = virtual_ncidev_disabling;
+ mutex_unlock(&nci_mutex);
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+
+ mutex_lock(&nci_mutex);
+ }
+
+ state = virtual_ncidev_disabled;
+ mutex_unlock(&nci_mutex);
+
+ return 0;
+}
+
+static long virtual_ncidev_ioctl(struct file *flip, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nfc_dev *nfc_dev = ndev->nfc_dev;
+ void __user *p = (void __user *)arg;
+
+ if (cmd != IOCTL_GET_NCIDEV_IDX)
+ return -ENOTTY;
+
+ if (copy_to_user(p, &nfc_dev->idx, sizeof(nfc_dev->idx)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static const struct file_operations virtual_ncidev_fops = {
+ .owner = THIS_MODULE,
+ .read = virtual_ncidev_read,
+ .write = virtual_ncidev_write,
+ .open = virtual_ncidev_open,
+ .release = virtual_ncidev_close,
+ .unlocked_ioctl = virtual_ncidev_ioctl
+};
+
+static int __init virtual_ncidev_init(void)
+{
+ state = virtual_ncidev_disabled;
+ miscdev.minor = MISC_DYNAMIC_MINOR;
+ miscdev.name = "virtual_nci";
+ miscdev.fops = &virtual_ncidev_fops;
+ miscdev.mode = S_IALLUGO;
+
+ return misc_register(&miscdev);
+}
+
+static void __exit virtual_ncidev_exit(void)
+{
+ misc_deregister(&miscdev);
+}
+
+module_init(virtual_ncidev_init);
+module_exit(virtual_ncidev_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Virtual NCI device simulation driver");
+MODULE_AUTHOR("Bongsu Jeon <bongsu.jeon@samsung.com>");
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ce1b61519441..8caf9b34734d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
-EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
@@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
- nvme_trace_bio_complete(req, status);
+ nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
@@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
+static struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
struct request *req;
@@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
nvme_init_request(req, cmd);
return req;
}
-EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
@@ -1545,8 +1543,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
}
length = (io.nblocks + 1) << ns->lba_shift;
- meta_len = (io.nblocks + 1) * ns->ms;
- metadata = nvme_to_user_ptr(io.metadata);
+
+ if ((io.control & NVME_RW_PRINFO_PRACT) &&
+ ns->ms == sizeof(struct t10_pi_tuple)) {
+ /*
+ * Protection information is stripped/inserted by the
+ * controller.
+ */
+ if (nvme_to_user_ptr(io.metadata))
+ return -EINVAL;
+ meta_len = 0;
+ metadata = NULL;
+ } else {
+ meta_len = (io.nblocks + 1) * ns->ms;
+ metadata = nvme_to_user_ptr(io.metadata);
+ }
if (ns->features & NVME_NS_EXT_LBAS) {
length += meta_len;
@@ -2858,6 +2869,11 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts && ctrl->opts->discovery_nqn;
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -2877,7 +2893,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
}
if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
- (ctrl->opts && ctrl->opts->discovery_nqn))
+ nvme_discovery_ctrl(ctrl))
continue;
dev_err(ctrl->device,
@@ -3146,7 +3162,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
- if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+ if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
@@ -3186,7 +3202,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
- if (!ctrl->identified) {
+ if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
ret = nvme_hwmon_init(ctrl);
if (ret < 0)
return ret;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 38373a0e86ef..5f36cfa8136c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
+ struct work_struct ioerr_work;
struct delayed_work connect_work;
struct kref ref;
@@ -1889,6 +1890,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
}
static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+ nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
+static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
@@ -2046,7 +2056,7 @@ done:
check_error:
if (terminate_assoc)
- nvme_fc_error_recovery(ctrl, "transport detected io error");
+ queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
static int
@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7e49f61f81df..88a6b97247f5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
@@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
@@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
}
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b4385cb0ff60..856aa31931c1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -23,6 +23,7 @@
#include <linux/t10-pi.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/sed-opal.h>
#include <linux/pci-p2pdma.h>
@@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
return true;
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
- dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
int i;
- if (iod->dma_len) {
- dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
- rq_dma_dir(req));
- return;
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+ dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+ dma_addr = next_dma_addr;
}
- WARN_ON_ONCE(!iod->nents);
+}
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
+{
+ const int last_sg = SGES_PER_PAGE - 1;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
+ int i;
+ for (i = 0; i < iod->npages; i++) {
+ struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
- if (iod->npages == 0)
- dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
- dma_addr);
+ dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
- for (i = 0; i < iod->npages; i++) {
- void *addr = nvme_pci_iod_list(req)[i];
+}
- if (iod->use_sgl) {
- struct nvme_sgl_desc *sg_list = addr;
+static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- next_dma_addr =
- le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
- } else {
- __le64 *prp_list = addr;
+ if (is_pci_p2pdma_page(sg_page(iod->sg)))
+ pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
+ rq_dma_dir(req));
+ else
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+}
- next_dma_addr = le64_to_cpu(prp_list[last_prp]);
- }
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- dma_pool_free(dev->prp_page_pool, addr, dma_addr);
- dma_addr = next_dma_addr;
+ if (iod->dma_len) {
+ dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+ rq_dma_dir(req));
+ return;
}
+ WARN_ON_ONCE(!iod->nents);
+
+ nvme_unmap_sg(dev, req);
+ if (iod->npages == 0)
+ dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+ iod->first_dma);
+ else if (iod->use_sgl)
+ nvme_free_sgls(dev, req);
+ else
+ nvme_free_prps(dev, req);
mempool_free(iod->sg, dev->iod_mempool);
}
@@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
- return BLK_STS_RESOURCE;
+ goto free_prps;
list[iod->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
-
done:
cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
-
return BLK_STS_OK;
-
- bad_sgl:
+free_prps:
+ nvme_free_prps(dev, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
"Invalid SGL for payload:%d nents:%d\n",
blk_rq_payload_bytes(req), iod->nents);
@@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list)
- return BLK_STS_RESOURCE;
+ goto free_sgls;
i = 0;
nvme_pci_iod_list(req)[iod->npages++] = sg_list;
@@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
} while (--entries > 0);
return BLK_STS_OK;
+free_sgls:
+ nvme_free_sgls(dev, req);
+ return BLK_STS_RESOURCE;
}
static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
@@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
- goto out;
+ goto out_free_sg;
if (is_pci_p2pdma_page(sg_page(iod->sg)))
nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
@@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
rq_dma_dir(req), DMA_ATTR_NO_WARN);
if (!nr_mapped)
- goto out;
+ goto out_free_sg;
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-out:
if (ret != BLK_STS_OK)
- nvme_unmap_data(dev, req);
+ goto out_unmap_sg;
+ return BLK_STS_OK;
+
+out_unmap_sg:
+ nvme_unmap_sg(dev, req);
+out_free_sg:
+ mempool_free(iod->sg, dev->iod_mempool);
return ret;
}
@@ -967,6 +997,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
/*
@@ -975,17 +1006,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
+ req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
- cqe->command_id, le16_to_cpu(cqe->sq_id));
+ command_id, le16_to_cpu(cqe->sq_id));
return;
}
@@ -1794,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if (dev->cmb_size)
return;
+ if (NVME_CAP_CMBS(dev->ctrl.cap))
+ writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
+
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!dev->cmbsz)
return;
@@ -1808,6 +1842,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
/*
+ * Tell the controller about the host side address mapping the CMB,
+ * and enable CMB decoding for the NVMe 1.4+ scheme:
+ */
+ if (NVME_CAP_CMBS(dev->ctrl.cap)) {
+ hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
+ (pci_bus_address(pdev, bar) + offset),
+ dev->bar + NVME_REG_CMBMSC);
+ }
+
+ /*
* Controllers may support a CMB size larger than their BAR,
* for example, due to being behind a bridge. Reduce the CMB to
* the reported size of the BAR
@@ -3196,7 +3240,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index cf6c49d09c82..b7ce4f221d99 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -97,6 +97,7 @@ struct nvme_rdma_queue {
struct completion cm_done;
bool pi_support;
int cq_size;
+ struct mutex queue_lock;
};
struct nvme_rdma_ctrl {
@@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
int ret;
queue = &ctrl->queues[idx];
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
if (idx && ctrl->ctrl.max_integrity_segments)
queue->pi_support = true;
@@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
if (IS_ERR(queue->cm_id)) {
dev_info(ctrl->ctrl.device,
"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
- return PTR_ERR(queue->cm_id);
+ ret = PTR_ERR(queue->cm_id);
+ goto out_destroy_mutex;
}
if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
@@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
out_destroy_cm_id:
rdma_destroy_id(queue->cm_id);
nvme_rdma_destroy_queue_ib(queue);
+out_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
- if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
- return;
- __nvme_rdma_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
+ mutex_destroy(&queue->queue_lock);
}
static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1ba659927442..881d28eb15e9 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -76,6 +76,7 @@ struct nvme_tcp_queue {
struct work_struct io_work;
int io_cpu;
+ struct mutex queue_lock;
struct mutex send_mutex;
struct llist_head req_list;
struct list_head send_list;
@@ -201,7 +202,7 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
{
- return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
+ return min_t(size_t, iov_iter_single_seg_count(&req->iter),
req->pdu_len - req->pdu_sent);
}
@@ -262,6 +263,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
}
}
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+ int ret;
+
+ /* drain the send queue as much as we can... */
+ do {
+ ret = nvme_tcp_try_send(queue);
+ } while (ret > 0);
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -276,10 +287,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
* directly, otherwise queue io_work. Also, only do that if we
* are on the same cpu, so we don't introduce contention.
*/
- if (queue->io_cpu == smp_processor_id() &&
+ if (queue->io_cpu == __smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last;
- nvme_tcp_try_send(queue);
+ nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
} else if (last) {
@@ -1209,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
sock_release(queue->sock);
kfree(queue->pdu);
+ mutex_destroy(&queue->queue_lock);
}
static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
@@ -1370,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
int ret, rcv_pdu_size;
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
init_llist_head(&queue->req_list);
INIT_LIST_HEAD(&queue->send_list);
@@ -1388,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (ret) {
dev_err(nctrl->device,
"failed to create socket: %d\n", ret);
- return ret;
+ goto err_destroy_mutex;
}
/* Single syn retry */
@@ -1497,6 +1510,8 @@ err_crypto:
err_sock:
sock_release(queue->sock);
queue->sock = NULL;
+err_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -1524,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
- return;
- __nvme_tcp_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 8d90235e4fcc..dc1ea468b182 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -487,8 +487,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
/* return an all zeroed buffer if we can't find an active namespace */
ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
- if (!ns)
+ if (!ns) {
+ status = NVME_SC_INVALID_NS;
goto done;
+ }
nvmet_ns_revalidate(ns);
@@ -541,7 +543,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
id->nsattr |= (1 << 0);
nvmet_put_namespace(ns);
done:
- status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ if (!status)
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
kfree(id);
out:
nvmet_req_complete(req, status);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 733d9363900e..68213f0a052b 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1501,7 +1501,8 @@ static ssize_t
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int opcode, starting, amount;
+ unsigned int opcode;
+ int starting, amount;
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
return -EBADRQC;
@@ -1588,8 +1589,8 @@ out_destroy_class:
static void __exit fcloop_exit(void)
{
- struct fcloop_lport *lport;
- struct fcloop_nport *nport;
+ struct fcloop_lport *lport = NULL;
+ struct fcloop_nport *nport = NULL;
struct fcloop_tport *tport;
struct fcloop_rport *rport;
unsigned long flags;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 5c1e7cb7fe0d..06b6b742bb21 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1220,6 +1220,14 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
}
ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count;
+
+ if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags &
+ IB_DEVICE_INTEGRITY_HANDOVER)) {
+ pr_warn("T10-PI is not supported by device %s. Disabling it\n",
+ cm_id->device->name);
+ nport->pi_enable = false;
+ }
+
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@@ -1641,6 +1649,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
spin_lock_irqsave(&queue->state_lock, flags);
switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING:
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+
+ rsp = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp,
+ wait_list);
+ list_del(&rsp->wait_list);
+ nvmet_rdma_put_rsp(rsp);
+ }
+ fallthrough;
case NVMET_RDMA_Q_LIVE:
queue->state = NVMET_RDMA_Q_DISCONNECTING;
disconnect = true;
@@ -1845,14 +1863,6 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
- if (port->nport->pi_enable &&
- !(cm_id->device->attrs.device_cap_flags &
- IB_DEVICE_INTEGRITY_HANDOVER)) {
- pr_err("T10-PI is not supported for %pISpcs\n", addr);
- ret = -EINVAL;
- goto out_destroy_id;
- }
-
port->cm_id = cm_id;
return 0;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 4268eb359915..8c905aabacc0 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1092,7 +1092,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
if (IS_ERR(opp_table->clk)) {
ret = PTR_ERR(opp_table->clk);
if (ret == -EPROBE_DEFER)
- goto err;
+ goto remove_opp_dev;
dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
}
@@ -1101,7 +1101,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
if (ret) {
if (ret == -EPROBE_DEFER)
- goto err;
+ goto put_clk;
dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
__func__, ret);
@@ -1113,6 +1113,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
return opp_table;
+put_clk:
+ if (!IS_ERR(opp_table->clk))
+ clk_put(opp_table->clk);
+remove_opp_dev:
+ _remove_opp_dev(opp_dev, opp_table);
err:
kfree(opp_table);
return ERR_PTR(ret);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 794a37d50853..cb2f55f450e4 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -726,11 +726,6 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
return per_cpu(hw_events->irq, cpu);
}
-bool arm_pmu_irq_is_nmi(void)
-{
- return has_nmi;
-}
-
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
diff --git a/drivers/phy/ingenic/Makefile b/drivers/phy/ingenic/Makefile
index 65d5ea00fc9d..1cb158d7233f 100644
--- a/drivers/phy/ingenic/Makefile
+++ b/drivers/phy/ingenic/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += phy-ingenic-usb.o
+obj-$(CONFIG_PHY_INGENIC_USB) += phy-ingenic-usb.o
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index d38def43b1bf..55f8e6c048ab 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -49,7 +49,9 @@ config PHY_MTK_HDMI
config PHY_MTK_MIPI_DSI
tristate "MediaTek MIPI-DSI Driver"
- depends on ARCH_MEDIATEK && OF
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on OF
select GENERIC_PHY
help
Support MIPI DSI for Mediatek SoCs.
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index 442522ba487f..4728e2bff662 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -662,35 +662,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
error = PTR_ERR(generic_phy);
- return PTR_ERR(generic_phy);
+ goto out_reg_disable;
}
phy_set_drvdata(generic_phy, ddata);
phy_provider = devm_of_phy_provider_register(ddata->dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
+ if (IS_ERR(phy_provider)) {
+ error = PTR_ERR(phy_provider);
+ goto out_reg_disable;
+ }
error = cpcap_usb_init_optional_pins(ddata);
if (error)
- return error;
+ goto out_reg_disable;
cpcap_usb_init_optional_gpios(ddata);
error = cpcap_usb_init_iio(ddata);
if (error)
- return error;
+ goto out_reg_disable;
error = cpcap_usb_init_interrupts(pdev, ddata);
if (error)
- return error;
+ goto out_reg_disable;
usb_add_phy_dev(&ddata->phy);
atomic_set(&ddata->active, 1);
schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
return 0;
+
+out_reg_disable:
+ regulator_disable(ddata->vusb);
+
+ return error;
}
static int cpcap_usb_phy_remove(struct platform_device *pdev)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 34803a6c7664..5c1a109842a7 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -347,7 +347,7 @@ FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
#define D22 40
SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
-SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8));
+SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8));
PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8);
GROUP_DECL(PWM8G0, D22);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 7aeb552d16ce..72f17f26acd8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -920,6 +920,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
err = hw->soc->bias_set(hw, desc, pullup);
if (err)
return err;
+ } else if (hw->soc->bias_set_combo) {
+ err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
+ if (err)
+ return err;
} else {
return -ENOTSUPP;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index d4ea10803fd9..abfe11c7b49f 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -949,7 +949,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
} else {
int irq = chip->to_irq(chip, offset);
const int pullidx = pull ? 1 : 0;
- bool wake;
int val;
static const char * const pulls[] = {
"none ",
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 53a6a24bd052..3ea163498647 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -37,11 +37,11 @@
#define JZ4740_GPIO_TRIG 0x70
#define JZ4740_GPIO_FLAG 0x80
-#define JZ4760_GPIO_INT 0x10
-#define JZ4760_GPIO_PAT1 0x30
-#define JZ4760_GPIO_PAT0 0x40
-#define JZ4760_GPIO_FLAG 0x50
-#define JZ4760_GPIO_PEN 0x70
+#define JZ4770_GPIO_INT 0x10
+#define JZ4770_GPIO_PAT1 0x30
+#define JZ4770_GPIO_PAT0 0x40
+#define JZ4770_GPIO_FLAG 0x50
+#define JZ4770_GPIO_PEN 0x70
#define X1830_GPIO_PEL 0x110
#define X1830_GPIO_PEH 0x120
@@ -1688,8 +1688,8 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
u8 offset, int value)
{
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
}
@@ -1718,9 +1718,9 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
break;
}
- if (jzgc->jzpc->info->version >= ID_JZ4760) {
- reg1 = JZ4760_GPIO_PAT1;
- reg2 = JZ4760_GPIO_PAT0;
+ if (jzgc->jzpc->info->version >= ID_JZ4770) {
+ reg1 = JZ4770_GPIO_PAT1;
+ reg2 = JZ4770_GPIO_PAT0;
} else {
reg1 = JZ4740_GPIO_TRIG;
reg2 = JZ4740_GPIO_DIR;
@@ -1758,8 +1758,8 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
@@ -1774,8 +1774,8 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_irq_mask(irqd);
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
}
@@ -1799,8 +1799,8 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
}
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
}
@@ -1856,8 +1856,8 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
else
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
@@ -1938,9 +1938,9 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->info->version >= ID_JZ4760) {
- if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
- ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+ if (jzpc->info->version >= ID_JZ4770) {
+ if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) ||
+ ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
@@ -1991,20 +1991,20 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
'A' + offt, idx, func);
if (jzpc->info->version >= ID_X1000) {
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
}
return 0;
@@ -2057,14 +2057,14 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
'A' + offt, idx, input ? "in" : "out");
if (jzpc->info->version >= ID_X1000) {
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
@@ -2091,8 +2091,8 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
bool pull;
- if (jzpc->info->version >= ID_JZ4760)
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
+ if (jzpc->info->version >= ID_JZ4770)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
else
pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
@@ -2141,8 +2141,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
REG_SET(X1830_GPIO_PEH), bias << idxh);
}
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
}
@@ -2151,8 +2151,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
- if (jzpc->info->version >= ID_JZ4760)
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+ if (jzpc->info->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
else
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e051aecf95c4..d70caecd21d2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -51,6 +51,7 @@
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
* @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
+ * @disabled_for_mux: These IRQs were disabled because we muxed away.
* @soc: Reference to soc_data of platform specific data.
* @regs: Base addresses for the TLMM tiles.
* @phys_base: Physical base address
@@ -72,6 +73,7 @@ struct msm_pinctrl {
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
@@ -96,6 +98,14 @@ MSM_ACCESSOR(intr_cfg)
MSM_ACCESSOR(intr_status)
MSM_ACCESSOR(intr_target)
+static void msm_ack_intr_status(struct msm_pinctrl *pctrl,
+ const struct msm_pingroup *g)
+{
+ u32 val = g->intr_ack_high ? BIT(g->intr_status_bit) : 0;
+
+ msm_writel_intr_status(val, pctrl, g);
+}
+
static int msm_get_groups_count(struct pinctrl_dev *pctldev)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -171,6 +181,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned group)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *gc = &pctrl->chip;
+ unsigned int irq = irq_find_mapping(gc->irq.domain, group);
+ struct irq_data *d = irq_get_irq_data(irq);
+ unsigned int gpio_func = pctrl->soc->gpio_func;
const struct msm_pingroup *g;
unsigned long flags;
u32 val, mask;
@@ -187,6 +201,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
+ /*
+ * If an GPIO interrupt is setup on this pin then we need special
+ * handling. Specifically interrupt detection logic will still see
+ * the pin twiddle even when we're muxed away.
+ *
+ * When we see a pin with an interrupt setup on it then we'll disable
+ * (mask) interrupts on it when we mux away until we mux back. Note
+ * that disable_irq() refcounts and interrupts are disabled as long as
+ * at least one disable_irq() has been called.
+ */
+ if (d && i != gpio_func &&
+ !test_and_set_bit(d->hwirq, pctrl->disabled_for_mux))
+ disable_irq(irq);
+
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = msm_readl_ctl(pctrl, g);
@@ -196,6 +224,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ if (d && i == gpio_func &&
+ test_and_clear_bit(d->hwirq, pctrl->disabled_for_mux)) {
+ /*
+ * Clear interrupts detected while not GPIO since we only
+ * masked things.
+ */
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false);
+ else
+ msm_ack_intr_status(pctrl, g);
+
+ enable_irq(irq);
+ }
+
return 0;
}
@@ -210,8 +252,7 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
if (!g->nfuncs)
return 0;
- /* For now assume function 0 is GPIO because it always is */
- return msm_pinmux_set_mux(pctldev, g->funcs[0], offset);
+ return msm_pinmux_set_mux(pctldev, g->funcs[pctrl->soc->gpio_func], offset);
}
static const struct pinmux_ops msm_pinmux_ops = {
@@ -774,7 +815,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
-static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+static void msm_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
@@ -792,17 +833,6 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- if (status_clear) {
- /*
- * clear the interrupt status bit before unmask to avoid
- * any erroneous interrupts that would have got latched
- * when the interrupt is not in use.
- */
- val = msm_readl_intr_status(pctrl, g);
- val &= ~BIT(g->intr_status_bit);
- msm_writel_intr_status(val, pctrl, g);
- }
-
val = msm_readl_intr_cfg(pctrl, g);
val |= BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_enable_bit);
@@ -822,7 +852,7 @@ static void msm_gpio_irq_enable(struct irq_data *d)
irq_chip_enable_parent(d);
if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
- msm_gpio_irq_clear_unmask(d, true);
+ msm_gpio_irq_unmask(d);
}
static void msm_gpio_irq_disable(struct irq_data *d)
@@ -837,11 +867,6 @@ static void msm_gpio_irq_disable(struct irq_data *d)
msm_gpio_irq_mask(d);
}
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
- msm_gpio_irq_clear_unmask(d, false);
-}
-
/**
* msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
* @d: The irq dta.
@@ -894,7 +919,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val;
if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
@@ -906,12 +930,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = msm_readl_intr_status(pctrl, g);
- if (g->intr_ack_high)
- val |= BIT(g->intr_status_bit);
- else
- val &= ~BIT(g->intr_status_bit);
- msm_writel_intr_status(val, pctrl, g);
+ msm_ack_intr_status(pctrl, g);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -936,6 +955,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
+ bool was_enabled;
u32 val;
if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
@@ -997,6 +1017,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
* could cause the INTR_STATUS to be set for EDGE interrupts.
*/
val = msm_readl_intr_cfg(pctrl, g);
+ was_enabled = val & BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_raw_status_bit);
if (g->intr_detection_width == 2) {
val &= ~(3 << g->intr_detection_bit);
@@ -1046,6 +1067,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
}
msm_writel_intr_cfg(val, pctrl, g);
+ /*
+ * The first time we set RAW_STATUS_EN it could trigger an interrupt.
+ * Clear the interrupt. This is safe because we have
+ * IRQCHIP_SET_TYPE_MASKED.
+ */
+ if (!was_enabled)
+ msm_ack_intr_status(pctrl, g);
+
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -1099,16 +1128,11 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
}
/*
- * Clear the interrupt that may be pending before we enable
- * the line.
- * This is especially a problem with the GPIOs routed to the
- * PDC. These GPIOs are direct-connect interrupts to the GIC.
- * Disabling the interrupt line at the PDC does not prevent
- * the interrupt from being latched at the GIC. The state at
- * GIC needs to be cleared before enabling.
+ * The disable / clear-enable workaround we do in msm_pinmux_set_mux()
+ * only works if disable is not lazy since we only clear any bogus
+ * interrupt in hardware. Explicitly mark the interrupt as UNLAZY.
*/
- if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
- irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
return 0;
out:
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 333f99243c43..e31a5167c91e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -118,6 +118,7 @@ struct msm_gpio_wakeirq_map {
* @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
* to be aware that their parent can't handle dual
* edge interrupts.
+ * @gpio_func: Which function number is GPIO (usually 0).
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -134,6 +135,7 @@ struct msm_pinctrl_soc_data {
const struct msm_gpio_wakeirq_map *wakeirq_map;
unsigned int nwakeirq_map;
bool wakeirq_dual_edge_errata;
+ unsigned int gpio_func;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index 33040b0b3b79..2c941cdac9ee 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -5,6 +5,7 @@
menuconfig SURFACE_PLATFORMS
bool "Microsoft Surface Platform-Specific Device Drivers"
+ depends on ACPI
default y
help
Say Y here to get to see options for platform-specific device drivers
@@ -29,20 +30,19 @@ config SURFACE3_WMI
config SURFACE_3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
- depends on ACPI && KEYBOARD_GPIO && I2C
+ depends on KEYBOARD_GPIO && I2C
help
This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
config SURFACE_3_POWER_OPREGION
tristate "Surface 3 battery platform operation region support"
- depends on ACPI && I2C
+ depends on I2C
help
This driver provides support for ACPI operation
region of the Surface 3 battery platform driver.
config SURFACE_GPE
tristate "Surface GPE/Lid Support Driver"
- depends on ACPI
depends on DMI
help
This driver marks the GPEs related to the ACPI lid device found on
@@ -52,7 +52,7 @@ config SURFACE_GPE
config SURFACE_PRO3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
- depends on ACPI && INPUT
+ depends on INPUT
help
This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
index e49e5d6d5d4e..86f6991b1215 100644
--- a/drivers/platform/surface/surface_gpe.c
+++ b/drivers/platform/surface/surface_gpe.c
@@ -181,12 +181,12 @@ static int surface_lid_enable_wakeup(struct device *dev, bool enable)
return 0;
}
-static int surface_gpe_suspend(struct device *dev)
+static int __maybe_unused surface_gpe_suspend(struct device *dev)
{
return surface_lid_enable_wakeup(dev, true);
}
-static int surface_gpe_resume(struct device *dev)
+static int __maybe_unused surface_gpe_resume(struct device *dev)
{
return surface_lid_enable_wakeup(dev, false);
}
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index 0102bf1c7916..ef8342572463 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -85,7 +85,7 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
iowrite32(val, dev->regbase + reg_offset);
}
-#if CONFIG_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index ecd477964d11..18bf8aeb5f87 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -247,7 +247,8 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
ret = bios_return->return_code;
if (ret) {
- if (ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+ if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
+ ret != HPWMI_RET_UNKNOWN_CMDTYPE)
pr_warn("query 0x%x returned error 0x%x\n", query, ret);
goto out_free;
}
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index b457b0babde3..2cce82579d09 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -164,13 +164,29 @@ static const struct i2c_inst_data bsg2150_data[] = {
{}
};
-static const struct i2c_inst_data int3515_data[] = {
- { "tps6598x", IRQ_RESOURCE_APIC, 0 },
- { "tps6598x", IRQ_RESOURCE_APIC, 1 },
- { "tps6598x", IRQ_RESOURCE_APIC, 2 },
- { "tps6598x", IRQ_RESOURCE_APIC, 3 },
- {}
-};
+/*
+ * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt
+ * issues. The most common problem seen is interrupt flood.
+ *
+ * There are at least two known causes. Firstly, on some boards, the
+ * I2CSerialBus resource index does not match the Interrupt resource, i.e. they
+ * are not one-to-one mapped like in the array below. Secondly, on some boards
+ * the IRQ line from the PD controller is not actually connected at all. But the
+ * interrupt flood is also seen on some boards where those are not a problem, so
+ * there are some other problems as well.
+ *
+ * Because of the issues with the interrupt, the device is disabled for now. If
+ * you wish to debug the issues, uncomment the below, and add an entry for the
+ * INT3515 device to the i2c_multi_instance_ids table.
+ *
+ * static const struct i2c_inst_data int3515_data[] = {
+ * { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ * { }
+ * };
+ */
/*
* Note new device-ids must also be added to i2c_multi_instantiate_ids in
@@ -179,7 +195,6 @@ static const struct i2c_inst_data int3515_data[] = {
static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
{ "BSG1160", (unsigned long)bsg1160_data },
{ "BSG2150", (unsigned long)bsg2150_data },
- { "INT3515", (unsigned long)int3515_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 7598cd46cf60..5b81bafa5c16 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -92,6 +92,7 @@ struct ideapad_private {
struct dentry *debug;
unsigned long cfg;
bool has_hw_rfkill_switch;
+ bool has_touchpad_switch;
const char *fnesc_guid;
};
@@ -535,7 +536,9 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
} else if (attr == &dev_attr_fn_lock.attr) {
supported = acpi_has_method(priv->adev->handle, "HALS") &&
acpi_has_method(priv->adev->handle, "SALS");
- } else
+ } else if (attr == &dev_attr_touchpad.attr)
+ supported = priv->has_touchpad_switch;
+ else
supported = true;
return supported ? attr->mode : 0;
@@ -867,6 +870,9 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
{
unsigned long value;
+ if (!priv->has_touchpad_switch)
+ return;
+
/* Without reading from EC touchpad LED doesn't switch state */
if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
/* Some IdeaPads don't really turn off touchpad - they only
@@ -989,6 +995,9 @@ static int ideapad_acpi_add(struct platform_device *pdev)
priv->platform_device = pdev;
priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
+ /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
+ priv->has_touchpad_switch = !acpi_dev_present("ELAN0634", NULL, -1);
+
ret = ideapad_sysfs_init(priv);
if (ret)
return ret;
@@ -1006,6 +1015,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
if (!priv->has_hw_rfkill_switch)
write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
+ /* The same for Touchpad */
+ if (!priv->has_touchpad_switch)
+ write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1);
+
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
ideapad_register_rfkill(priv, i);
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 3b49a1f4061b..30a9062d2b4b 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -207,19 +207,19 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
},
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
},
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
},
},
{} /* Array terminator */
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e03df2881dc6..f3e8eca8d86d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8783,6 +8783,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL), /* P71 */
TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL), /* P51 */
TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */
+ TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL), /* P53 / P73 */
TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
@@ -9951,9 +9952,9 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
if ((palm_err == -ENODEV) && (lap_err == -ENODEV))
return 0;
/* Otherwise, if there was an error return it */
- if (palm_err && (palm_err != ENODEV))
+ if (palm_err && (palm_err != -ENODEV))
return palm_err;
- if (lap_err && (lap_err != ENODEV))
+ if (lap_err && (lap_err != -ENODEV))
return lap_err;
if (has_palmsensor) {
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 5783139d0a11..c4de932302d6 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -263,6 +263,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
.properties = digma_citi_e200_props,
};
+static const struct property_entry estar_beauty_hd_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ { }
+};
+
+static const struct ts_dmi_data estar_beauty_hd_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = estar_beauty_hd_props,
+};
+
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -943,6 +953,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Estar Beauty HD (MID 7316R) */
+ .driver_data = (void *)&estar_beauty_hd_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+ },
+ },
+ {
/* GP-electronic T701 */
.driver_data = (void *)&gp_electronic_t701_data,
.matches = {
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 476d7c7fe70a..f2edef0df40f 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -64,6 +64,7 @@ config DP83640_PHY
depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB
depends on PTP_1588_CLOCK
+ select CRC32
help
Supports the DP83640 PHYTER with IEEE 1588 features.
@@ -78,6 +79,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_INES
tristate "ZHAW InES PTP time stamping IP core"
depends on NETWORK_PHY_TIMESTAMPING
+ depends on HAS_IOMEM
depends on PHYLIB
depends on PTP_1588_CLOCK
help
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 53fa84f4d1e1..5abdd29fb9f3 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -881,6 +881,7 @@ config REGULATOR_QCOM_RPM
config REGULATOR_QCOM_RPMH
tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
depends on QCOM_RPMH || (QCOM_RPMH=n && COMPILE_TEST)
+ depends on QCOM_COMMAND_DB || (QCOM_COMMAND_DB=n && COMPILE_TEST)
help
This driver supports control of PMIC regulators via the RPMh hardware
block found on Qualcomm Technologies Inc. SoCs. RPMh regulator
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index e6d5d98c3cea..9309765d0450 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -15,6 +15,36 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
+/* Typical regulator startup times as per data sheet in uS */
+#define BD71847_BUCK1_STARTUP_TIME 144
+#define BD71847_BUCK2_STARTUP_TIME 162
+#define BD71847_BUCK3_STARTUP_TIME 162
+#define BD71847_BUCK4_STARTUP_TIME 240
+#define BD71847_BUCK5_STARTUP_TIME 270
+#define BD71847_BUCK6_STARTUP_TIME 200
+#define BD71847_LDO1_STARTUP_TIME 440
+#define BD71847_LDO2_STARTUP_TIME 370
+#define BD71847_LDO3_STARTUP_TIME 310
+#define BD71847_LDO4_STARTUP_TIME 400
+#define BD71847_LDO5_STARTUP_TIME 530
+#define BD71847_LDO6_STARTUP_TIME 400
+
+#define BD71837_BUCK1_STARTUP_TIME 160
+#define BD71837_BUCK2_STARTUP_TIME 180
+#define BD71837_BUCK3_STARTUP_TIME 180
+#define BD71837_BUCK4_STARTUP_TIME 180
+#define BD71837_BUCK5_STARTUP_TIME 160
+#define BD71837_BUCK6_STARTUP_TIME 240
+#define BD71837_BUCK7_STARTUP_TIME 220
+#define BD71837_BUCK8_STARTUP_TIME 200
+#define BD71837_LDO1_STARTUP_TIME 440
+#define BD71837_LDO2_STARTUP_TIME 370
+#define BD71837_LDO3_STARTUP_TIME 310
+#define BD71837_LDO4_STARTUP_TIME 400
+#define BD71837_LDO5_STARTUP_TIME 310
+#define BD71837_LDO6_STARTUP_TIME 400
+#define BD71837_LDO7_STARTUP_TIME 530
+
/*
* BD718(37/47/50) have two "enable control modes". ON/OFF can either be
* controlled by software - or by PMIC internal HW state machine. Whether
@@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_buck3_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
.linear_range_selectors = bd71847_buck4_volt_range_sel,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_ldo5_volt_range_sel,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd71837_buck5_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_BUCK6_MASK,
.enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK8_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO5_MASK,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO7_MASK,
.enable_reg = BD71837_REG_LDO7_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ca03d8e70bd1..67a768fe5b2a 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1813,13 +1813,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
{
struct regulator_dev *r;
struct device *dev = rdev->dev.parent;
- int ret;
+ int ret = 0;
/* No supply to resolve? */
if (!rdev->supply_name)
return 0;
- /* Supply already resolved? */
+ /* Supply already resolved? (fast-path without locking contention) */
if (rdev->supply)
return 0;
@@ -1829,7 +1829,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
/* Did the lookup explicitly defer for us? */
if (ret == -EPROBE_DEFER)
- return ret;
+ goto out;
if (have_full_constraints()) {
r = dummy_regulator_rdev;
@@ -1837,15 +1837,18 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
rdev->supply_name, rdev->desc->name);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
if (r == rdev) {
dev_err(dev, "Supply for %s (%s) resolved to itself\n",
rdev->desc->name, rdev->supply_name);
- if (!have_full_constraints())
- return -EINVAL;
+ if (!have_full_constraints()) {
+ ret = -EINVAL;
+ goto out;
+ }
r = dummy_regulator_rdev;
get_device(&r->dev);
}
@@ -1859,7 +1862,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
if (!device_is_bound(r->dev.parent)) {
put_device(&r->dev);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
@@ -1867,15 +1871,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
ret = regulator_resolve_supply(r);
if (ret < 0) {
put_device(&r->dev);
- return ret;
+ goto out;
+ }
+
+ /*
+ * Recheck rdev->supply with rdev->mutex lock held to avoid a race
+ * between rdev->supply null check and setting rdev->supply in
+ * set_supply() from concurrent tasks.
+ */
+ regulator_lock(rdev);
+
+ /* Supply just resolved by a concurrent task? */
+ if (rdev->supply) {
+ regulator_unlock(rdev);
+ put_device(&r->dev);
+ goto out;
}
ret = set_supply(rdev, r);
if (ret < 0) {
+ regulator_unlock(rdev);
put_device(&r->dev);
- return ret;
+ goto out;
}
+ regulator_unlock(rdev);
+
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
@@ -1886,11 +1907,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (ret < 0) {
_regulator_put(rdev->supply);
rdev->supply = NULL;
- return ret;
+ goto out;
}
}
- return 0;
+out:
+ return ret;
}
/* Internal regulator request function */
diff --git a/drivers/regulator/pf8x00-regulator.c b/drivers/regulator/pf8x00-regulator.c
index 308c27fa6ea8..af9918cd27aa 100644
--- a/drivers/regulator/pf8x00-regulator.c
+++ b/drivers/regulator/pf8x00-regulator.c
@@ -469,13 +469,17 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
}
static const struct of_device_id pf8x00_dt_ids[] = {
- { .compatible = "nxp,pf8x00",},
+ { .compatible = "nxp,pf8100",},
+ { .compatible = "nxp,pf8121a",},
+ { .compatible = "nxp,pf8200",},
{ }
};
MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
static const struct i2c_device_id pf8x00_i2c_id[] = {
- { "pf8x00", 0 },
+ { "pf8100", 0 },
+ { "pf8121a", 0 },
+ { "pf8200", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index fe030ec4b7db..c395a8dda6f7 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
+ .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
.n_voltages = 5,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6f5ddc3eab8c..a1da83b0b0ef 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -956,24 +956,6 @@ static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
return PFN_UP(end) - PFN_DOWN(start);
}
-static inline int qeth_get_ip_version(struct sk_buff *skb)
-{
- struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
- __be16 prot = veth->h_vlan_proto;
-
- if (prot == htons(ETH_P_8021Q))
- prot = veth->h_vlan_encapsulated_proto;
-
- switch (prot) {
- case htons(ETH_P_IPV6):
- return 6;
- case htons(ETH_P_IP):
- return 4;
- default:
- return 0;
- }
-}
-
static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
{
u8 *addr = eth_hdr(skb)->h_dest;
@@ -984,14 +966,20 @@ static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
return RTN_UNICAST;
}
-static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
+static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
+ __be16 proto)
{
struct dst_entry *dst = skb_dst(skb);
struct rt6_info *rt;
rt = (struct rt6_info *) dst;
- if (dst)
- dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
+ if (dst) {
+ if (proto == htons(ETH_P_IPV6))
+ dst = dst_check(dst, rt6_get_cookie(rt));
+ else
+ dst = dst_check(dst, 0);
+ }
+
return dst;
}
@@ -1014,11 +1002,11 @@ static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
return &ipv6_hdr(skb)->daddr;
}
-static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
+static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, __be16 proto)
{
*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
- if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
- (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
+ if ((proto == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (proto == htons(ETH_P_IPV6) && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
*flags |= QETH_HDR_EXT_UDP;
}
@@ -1067,8 +1055,8 @@ extern const struct device_type qeth_generic_devtype;
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
-int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
-void qeth_core_free_discipline(struct qeth_card *);
+int qeth_setup_discipline(struct qeth_card *card, enum qeth_discipline_id disc);
+void qeth_remove_discipline(struct qeth_card *card);
/* exports for qeth discipline device drivers */
extern struct kmem_cache *qeth_core_header_cache;
@@ -1079,7 +1067,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_set_offline(struct qeth_card *card, bool resetting);
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
@@ -1144,10 +1133,10 @@ int qeth_stop(struct net_device *dev);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv,
+ struct qeth_qdio_out_q *queue, __be16 proto,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, unsigned int data_len));
+ __be16 proto, unsigned int data_len));
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f4b60294a969..89b223885b0c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -825,7 +825,8 @@ static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
return false;
rcu_read_lock();
- next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
+ next_hop = qeth_next_hop_v4_rcu(skb,
+ qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
key = ipv4_addr_hash(next_hop);
hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
@@ -851,7 +852,8 @@ static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
return false;
rcu_read_lock();
- next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
+ next_hop = qeth_next_hop_v6_rcu(skb,
+ qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
key = ipv6_addr_hash(next_hop);
hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
@@ -1407,10 +1409,12 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
struct sk_buff *skb;
skb_queue_walk(&buf->skb_list, skb) {
+ struct sock *sk = skb->sk;
+
QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
- if (skb->sk && skb->sk->sk_family == PF_IUCV)
- iucv_sk(skb->sk)->sk_txnotify(skb, notification);
+ if (sk && sk->sk_family == PF_IUCV)
+ iucv_sk(sk)->sk_txnotify(sk, notification);
}
}
@@ -3690,24 +3694,27 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
- /* Fake the TX completion interrupt: */
- if (IS_IQD(card)) {
- unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
- unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
+ switch (rc) {
+ case 0:
+ case -ENOBUFS:
+ /* ignore temporary SIGA errors without busy condition */
- if (frames && queue->coalesced_frames >= frames) {
- napi_schedule(&queue->napi);
- queue->coalesced_frames = 0;
- QETH_TXQ_STAT_INC(queue, coal_frames);
- } else if (usecs) {
- qeth_tx_arm_timer(queue, usecs);
+ /* Fake the TX completion interrupt: */
+ if (IS_IQD(card)) {
+ unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
+ unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
+
+ if (frames && queue->coalesced_frames >= frames) {
+ napi_schedule(&queue->napi);
+ queue->coalesced_frames = 0;
+ QETH_TXQ_STAT_INC(queue, coal_frames);
+ } else if (usecs) {
+ qeth_tx_arm_timer(queue, usecs);
+ }
}
- }
- if (rc) {
- /* ignore temporary SIGA errors without busy condition */
- if (rc == -ENOBUFS)
- return;
+ break;
+ default:
QETH_CARD_TEXT(queue->card, 2, "flushbuf");
QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
@@ -3717,7 +3724,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
qeth_schedule_recovery(queue->card);
- return;
}
}
@@ -3896,11 +3902,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_TOS:
case QETH_PRIO_Q_ING_PREC:
- switch (qeth_get_ip_version(skb)) {
- case 4:
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
tos = ipv4_get_dsfield(ip_hdr(skb));
break;
- case 6:
+ case htons(ETH_P_IPV6):
tos = ipv6_get_dsfield(ipv6_hdr(skb));
break;
default:
@@ -4365,10 +4371,10 @@ static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
}
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv,
+ struct qeth_qdio_out_q *queue, __be16 proto,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, unsigned int data_len))
+ __be16 proto, unsigned int data_len))
{
unsigned int proto_len, hw_hdr_len;
unsigned int frame_len = skb->len;
@@ -4401,7 +4407,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
- fill_header(queue, hdr, skb, ipv, frame_len);
+ fill_header(queue, hdr, skb, proto, frame_len);
if (is_tso)
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
@@ -5507,12 +5513,12 @@ out:
return rc;
}
-static int qeth_set_online(struct qeth_card *card)
+static int qeth_set_online(struct qeth_card *card,
+ const struct qeth_discipline *disc)
{
bool carrier_ok;
int rc;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
@@ -5529,7 +5535,7 @@ static int qeth_set_online(struct qeth_card *card)
/* no need for locking / error handling at this early stage: */
qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
- rc = card->discipline->set_online(card, carrier_ok);
+ rc = disc->set_online(card, carrier_ok);
if (rc)
goto err_online;
@@ -5537,7 +5543,6 @@ static int qeth_set_online(struct qeth_card *card)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
err_online:
@@ -5552,15 +5557,14 @@ err_hardsetup:
qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-int qeth_set_offline(struct qeth_card *card, bool resetting)
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting)
{
int rc, rc2, rc3;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 3, "setoffl");
@@ -5581,7 +5585,7 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
cancel_work_sync(&card->rx_mode_work);
- card->discipline->set_offline(card);
+ disc->set_offline(card);
qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
@@ -5602,16 +5606,19 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);
static int qeth_do_reset(void *data)
{
+ const struct qeth_discipline *disc;
struct qeth_card *card = data;
int rc;
+ /* Lock-free, other users will block until we are done. */
+ disc = card->discipline;
+
QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
@@ -5619,8 +5626,8 @@ static int qeth_do_reset(void *data)
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_offline(card, true);
- rc = qeth_set_online(card);
+ qeth_set_offline(card, disc, true);
+ rc = qeth_set_online(card, disc);
if (!rc) {
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
@@ -6348,9 +6355,11 @@ static int qeth_register_dbf_views(void)
static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
-int qeth_core_load_discipline(struct qeth_card *card,
- enum qeth_discipline_id discipline)
+int qeth_setup_discipline(struct qeth_card *card,
+ enum qeth_discipline_id discipline)
{
+ int rc;
+
mutex_lock(&qeth_mod_mutex);
switch (discipline) {
case QETH_DISCIPLINE_LAYER3:
@@ -6372,12 +6381,25 @@ int qeth_core_load_discipline(struct qeth_card *card,
return -EINVAL;
}
+ rc = card->discipline->setup(card->gdev);
+ if (rc) {
+ if (discipline == QETH_DISCIPLINE_LAYER2)
+ symbol_put(qeth_l2_discipline);
+ else
+ symbol_put(qeth_l3_discipline);
+ card->discipline = NULL;
+
+ return rc;
+ }
+
card->options.layer = discipline;
return 0;
}
-void qeth_core_free_discipline(struct qeth_card *card)
+void qeth_remove_discipline(struct qeth_card *card)
{
+ card->discipline->remove(card->gdev);
+
if (IS_LAYER2(card))
symbol_put(qeth_l2_discipline);
else
@@ -6584,23 +6606,19 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
- rc = qeth_core_load_discipline(card, enforced_disc);
+ /* It's so early that we don't need the discipline_mutex yet. */
+ rc = qeth_setup_discipline(card, enforced_disc);
if (rc)
- goto err_load;
+ goto err_setup_disc;
gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
card->discipline->devtype;
- rc = card->discipline->setup(card->gdev);
- if (rc)
- goto err_disc;
break;
}
return 0;
-err_disc:
- qeth_core_free_discipline(card);
-err_load:
+err_setup_disc:
err_chp_desc:
free_netdev(card->dev);
err_card:
@@ -6616,10 +6634,10 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
- if (card->discipline) {
- card->discipline->remove(gdev);
- qeth_core_free_discipline(card);
- }
+ mutex_lock(&card->discipline_mutex);
+ if (card->discipline)
+ qeth_remove_discipline(card);
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6634,29 +6652,32 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
- rc = qeth_core_load_discipline(card, def_discipline);
+ rc = qeth_setup_discipline(card, def_discipline);
if (rc)
goto err;
- rc = card->discipline->setup(card->gdev);
- if (rc) {
- qeth_core_free_discipline(card);
- goto err;
- }
}
- rc = qeth_set_online(card);
+ rc = qeth_set_online(card, card->discipline);
+
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
+
+ mutex_lock(&card->discipline_mutex);
+ rc = qeth_set_offline(card, card->discipline, false);
+ mutex_unlock(&card->discipline_mutex);
- return qeth_set_offline(card, false);
+ return rc;
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index a0f777f76f66..5815114da468 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -384,19 +384,13 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
goto out;
}
- card->discipline->remove(card->gdev);
- qeth_core_free_discipline(card);
+ qeth_remove_discipline(card);
free_netdev(card->dev);
card->dev = ndev;
}
- rc = qeth_core_load_discipline(card, newdis);
- if (rc)
- goto out;
+ rc = qeth_setup_discipline(card, newdis);
- rc = card->discipline->setup(card->gdev);
- if (rc)
- qeth_core_free_discipline(card);
out:
mutex_unlock(&card->discipline_mutex);
return rc ? rc : count;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 4ed0fb0705a5..ca44421a6d6e 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -157,7 +157,7 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, unsigned int data_len)
+ __be16 proto, unsigned int data_len)
{
int cast_type = qeth_get_ether_cast_type(skb);
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
@@ -169,7 +169,7 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
} else {
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
if (skb->ip_summed == CHECKSUM_PARTIAL)
- qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
+ qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], proto);
}
/* set byte byte 3 to casting flags */
@@ -551,7 +551,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue);
else
- rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
+ rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
qeth_l2_fill_header);
if (!rc)
@@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d138ac432d01..dd441eaec66e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1576,7 +1576,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
- int ipv)
+ __be16 proto)
{
struct neighbour *n = NULL;
@@ -1595,30 +1595,31 @@ static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
}
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
- switch (ipv) {
- case 4:
+ switch (proto) {
+ case htons(ETH_P_IP):
if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
return RTN_BROADCAST;
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
- case 6:
+ case htons(ETH_P_IPV6):
return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
+ case htons(ETH_P_AF_IUCV):
+ return RTN_UNICAST;
default:
- /* ... and MAC address */
+ /* OSA only: ... and MAC address */
return qeth_get_ether_cast_type(skb);
}
}
-static int qeth_l3_get_cast_type(struct sk_buff *skb)
+static int qeth_l3_get_cast_type(struct sk_buff *skb, __be16 proto)
{
- int ipv = qeth_get_ip_version(skb);
struct dst_entry *dst;
int cast_type;
rcu_read_lock();
- dst = qeth_dst_check_rcu(skb, ipv);
- cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
+ dst = qeth_dst_check_rcu(skb, proto);
+ cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
rcu_read_unlock();
return cast_type;
@@ -1637,7 +1638,7 @@ static u8 qeth_l3_cast_type_to_flag(int cast_type)
static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, unsigned int data_len)
+ __be16 proto, unsigned int data_len)
{
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
@@ -1652,23 +1653,15 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
} else {
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
- if (skb->protocol == htons(ETH_P_AF_IUCV)) {
- l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
- l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
- memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
- iucv_trans_hdr(skb)->destUserID, 8);
- return;
- }
-
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
+ qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, proto);
/* some HW requires combined L3+L4 csum offload: */
- if (ipv == 4)
+ if (proto == htons(ETH_P_IP))
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
}
}
- if (ipv == 4 || IS_IQD(card)) {
+ if (proto == htons(ETH_P_IP) || IS_IQD(card)) {
/* NETIF_F_HW_VLAN_CTAG_TX */
if (skb_vlan_tag_present(skb)) {
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME;
@@ -1680,24 +1673,33 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
}
rcu_read_lock();
- dst = qeth_dst_check_rcu(skb, ipv);
+ dst = qeth_dst_check_rcu(skb, proto);
if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ)
cast_type = RTN_UNICAST;
else
- cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
+ cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
- if (ipv == 4) {
+ switch (proto) {
+ case htons(ETH_P_IP):
l3_hdr->next_hop.addr.s6_addr32[3] =
qeth_next_hop_v4_rcu(skb, dst);
- } else if (ipv == 6) {
+ break;
+ case htons(ETH_P_IPV6):
l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst);
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
if (!IS_IQD(card))
hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
- } else {
+ break;
+ case htons(ETH_P_AF_IUCV):
+ l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
+ memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
+ iucv_trans_hdr(skb)->destUserID, 8);
+ l3_hdr->flags |= QETH_HDR_IPV6;
+ break;
+ default:
/* OSA only: */
l3_hdr->flags |= QETH_HDR_PASSTHRU;
}
@@ -1719,7 +1721,7 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
}
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv)
+ struct qeth_qdio_out_q *queue, __be16 proto)
{
unsigned int hw_hdr_len;
int rc;
@@ -1733,15 +1735,15 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb);
- return qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
+ return qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
}
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
+ __be16 proto = vlan_get_protocol(skb);
u16 txq = skb_get_queue_mapping(skb);
- int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int rc;
@@ -1752,22 +1754,32 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (card->options.sniffer)
goto tx_drop;
- if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
- (card->options.cq == QETH_CQ_ENABLED &&
- skb->protocol != htons(ETH_P_AF_IUCV)))
+
+ switch (proto) {
+ case htons(ETH_P_AF_IUCV):
+ if (card->options.cq != QETH_CQ_ENABLED)
+ goto tx_drop;
+ break;
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ if (card->options.cq == QETH_CQ_ENABLED)
+ goto tx_drop;
+ break;
+ default:
goto tx_drop;
+ }
} else {
queue = card->qdio.out_qs[txq];
}
if (!(dev->flags & IFF_BROADCAST) &&
- qeth_l3_get_cast_type(skb) == RTN_BROADCAST)
+ qeth_l3_get_cast_type(skb, proto) == RTN_BROADCAST)
goto tx_drop;
- if (ipv == 4 || IS_IQD(card))
- rc = qeth_l3_xmit(card, skb, queue, ipv);
+ if (proto == htons(ETH_P_IP) || IS_IQD(card))
+ rc = qeth_l3_xmit(card, skb, queue, proto);
else
- rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
+ rc = qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
if (!rc)
return NETDEV_TX_OK;
@@ -1813,7 +1825,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- if (qeth_get_ip_version(skb) != 4)
+ if (vlan_get_protocol(skb) != htons(ETH_P_IP))
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return qeth_features_check(skb, dev, features);
}
@@ -1821,8 +1833,10 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
- return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb),
- sb_dev);
+ __be16 proto = vlan_get_protocol(skb);
+
+ return qeth_iqd_select_queue(dev, skb,
+ qeth_l3_get_cast_type(skb, proto), sb_dev);
}
static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -1971,7 +1985,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index b206e266b4e7..8b0deece9758 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
depends on PCI && INET && (IPV6 || IPV6=n)
depends on THERMAL || !THERMAL
depends on ETHERNET
+ depends on TLS || TLS=n
select NET_VENDOR_CHELSIO
select CHELSIO_T4
select CHELSIO_LIB
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index a2beee6e09f0..5988c300cc82 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -444,7 +444,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
pr_err("error in devcmd2 init");
- return -ENODEV;
+ err = -ENODEV;
+ goto err_free_wq;
}
/*
@@ -460,7 +461,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
if (err)
- goto err_free_wq;
+ goto err_disable_wq;
vdev->devcmd2->result =
(struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
@@ -481,8 +482,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err_free_desc_ring:
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
-err_free_wq:
+err_disable_wq:
vnic_wq_disable(&vdev->devcmd2->wq);
+err_free_wq:
vnic_wq_free(&vdev->devcmd2->wq);
err_free_devcmd2:
kfree(vdev->devcmd2);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2b28dd405600..e821dd32dd28 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -14,6 +14,7 @@
#include <linux/debugfs.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/lcm.h>
#include <linux/libata.h>
#include <linux/mfd/syscon.h>
@@ -294,6 +295,7 @@ enum {
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
+ int (*interrupt_preinit)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
@@ -393,6 +395,8 @@ struct hisi_hba {
u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
+ int *irq_map; /* v2 hw */
+
int n_phy;
spinlock_t lock;
struct semaphore sem;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index b6d4419c32f2..cf0bfac920a8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -2614,6 +2614,13 @@ err_out:
return NULL;
}
+static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ if (hisi_hba->hw->interrupt_preinit)
+ return hisi_hba->hw->interrupt_preinit(hisi_hba);
+ return 0;
+}
+
int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *hw)
{
@@ -2671,6 +2678,10 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
+ rc = hisi_sas_interrupt_preinit(hisi_hba);
+ if (rc)
+ goto err_out_ha;
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b57177b52fac..9adfdefef9ca 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3302,6 +3302,28 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
fatal_axi_int_v2_hw
};
+#define CQ0_IRQ_INDEX (96)
+
+static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ struct platform_device *pdev = hisi_hba->platform_dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct irq_affinity desc = {
+ .pre_vectors = CQ0_IRQ_INDEX,
+ .post_vectors = 16,
+ };
+ int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec;
+
+ nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128,
+ &hisi_hba->irq_map);
+ if (nvec < 0)
+ return nvec;
+
+ shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv;
+
+ return 0;
+}
+
/*
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
@@ -3310,14 +3332,11 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
{
struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
- int irq, rc = 0, irq_map[128];
+ int irq, rc = 0;
int i, phy_no, fatal_no, queue_no;
- for (i = 0; i < 128; i++)
- irq_map[i] = platform_get_irq(pdev, i);
-
for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
- irq = irq_map[i + 1]; /* Phy up/down is irq1 */
+ irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */
rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
@@ -3331,7 +3350,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- irq = irq_map[phy_no + 72];
+ irq = hisi_hba->irq_map[phy_no + 72];
rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
DRV_NAME " sata", phy);
if (rc) {
@@ -3343,7 +3362,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
- irq = irq_map[fatal_no + 81];
+ irq = hisi_hba->irq_map[fatal_no + 81];
rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
@@ -3354,24 +3373,22 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
- for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
+ for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
- cq->irq_no = irq_map[queue_no + 96];
+ cq->irq_no = hisi_hba->irq_map[queue_no + 96];
rc = devm_request_threaded_irq(dev, cq->irq_no,
cq_interrupt_v2_hw,
cq_thread_v2_hw, IRQF_ONESHOT,
DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
- irq, rc);
+ cq->irq_no, rc);
rc = -ENOENT;
goto err_out;
}
+ cq->irq_mask = irq_get_affinity_mask(cq->irq_no);
}
-
- hisi_hba->cq_nvecs = hisi_hba->queue_count;
-
err_out:
return rc;
}
@@ -3529,6 +3546,26 @@ static struct device_attribute *host_attrs_v2_hw[] = {
NULL
};
+static int map_queues_v2_hw(struct Scsi_Host *shost)
+{
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
+ mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
+ if (!mask)
+ continue;
+
+ for_each_cpu(cpu, mask)
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
+ }
+
+ return 0;
+
+}
+
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
@@ -3553,10 +3590,13 @@ static struct scsi_host_template sht_v2_hw = {
#endif
.shost_attrs = host_attrs_v2_hw,
.host_reset = hisi_sas_host_reset,
+ .map_queues = map_queues_v2_hw,
+ .host_tagset = 1,
};
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
+ .interrupt_preinit = hisi_sas_v2_interrupt_preinit,
.setup_itct = setup_itct_v2_hw,
.slot_index_alloc = slot_index_alloc_quirk_v2_hw,
.alloc_dev = alloc_dev_quirk_v2_hw,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 42e4d35e0d35..65f168c41d23 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1744,7 +1744,7 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
}
- vfc_cmd->correlation = cpu_to_be64(evt);
+ vfc_cmd->correlation = cpu_to_be64((u64)evt);
if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
return ibmvfc_send_event(evt, vhost, 0);
@@ -2418,7 +2418,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
evt->sync_iu = &rsp_iu;
- tmf->correlation = cpu_to_be64(evt);
+ tmf->correlation = cpu_to_be64((u64)evt);
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
@@ -3007,8 +3007,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (sdev->type == TYPE_DISK)
+ if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
+ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index d71afae6191c..841000445b9a 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1623,8 +1623,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
}
/*
@@ -1643,6 +1648,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp);
+skip_resp:
fc_exch_release(ep);
return;
rel:
@@ -1899,10 +1905,16 @@ static void fc_exch_reset(struct fc_exch *ep)
fc_exch_hold(ep);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+skip_resp:
fc_seq_set_resp(sp, NULL, ep->arg);
fc_exch_release(ep);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 6e4bf05c6d77..63a4f48bdc75 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,6 +37,7 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -113,6 +114,10 @@ unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -3119,6 +3124,19 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
+static int megasas_map_queues(struct Scsi_Host *shost)
+{
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+
+ if (shost->nr_hw_queues == 1)
+ return 0;
+
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ instance->pdev, instance->low_latency_index_start);
+}
+
static void megasas_aen_polling(struct work_struct *work);
/**
@@ -3427,6 +3445,7 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
+ .map_queues = megasas_map_queues,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
};
@@ -6808,6 +6827,26 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
+ /* Use shared host tagset only for fusion adaptors
+ * if there are managed interrupts (smp affinity enabled case).
+ * Single msix_vectors in kdump, so shared host tag is also disabled.
+ */
+
+ host->host_tagset = 0;
+ host->nr_hw_queues = 1;
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ (instance->msix_vectors > instance->low_latency_index_start) &&
+ host_tagset_enable &&
+ instance->smp_affinity_enable) {
+ host->host_tagset = 1;
+ host->nr_hw_queues = instance->msix_vectors -
+ instance->low_latency_index_start;
+ }
+
+ dev_info(&instance->pdev->dev,
+ "Max firmware commands: %d shared with nr_hw_queues = %d\n",
+ instance->max_fw_cmds, host->nr_hw_queues);
/*
* Notify the mid-layer about the new controller
*/
@@ -8205,11 +8244,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
goto out;
}
+ /* always store 64 bits regardless of addressing */
sense_ptr = (void *)cmd->frame + ioc->sense_off;
- if (instance->consistent_mask_64bit)
- put_unaligned_le64(sense_handle, sense_ptr);
- else
- put_unaligned_le32(sense_handle, sense_ptr);
+ put_unaligned_le64(sense_handle, sense_ptr);
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index b0c01cf0428f..fd607287608e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -359,24 +359,29 @@ megasas_get_msix_index(struct megasas_instance *instance,
{
int sdev_busy;
- /* nr_hw_queue = 1 for MegaRAID */
- struct blk_mq_hw_ctx *hctx =
- scmd->device->request_queue->queue_hw_ctx[0];
-
- sdev_busy = atomic_read(&hctx->nr_active);
+ /* TBD - if sml remove device_busy in future, driver
+ * should track counter in internal structure.
+ */
+ sdev_busy = atomic_read(&scmd->device->device_busy);
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
- sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
+ } else if (instance->msix_load_balance) {
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
- else
+ } else if (instance->host->nr_hw_queues > 1) {
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
+ instance->low_latency_index_start;
+ } else {
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
+ }
}
/**
@@ -956,9 +961,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_cmdlist_fusion(instance))
goto fail_exit;
- dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
- instance->max_fw_cmds);
-
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1102,8 +1104,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
instance->perf_mode = MR_BALANCED_PERF_MODE;
- dev_info(&instance->pdev->dev, "Performance mode :%s\n",
- MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+ dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode),
+ instance->low_latency_index_start);
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index 86209455172d..c299f7e078fb 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -79,5 +79,5 @@ config SCSI_MPT2SAS
select SCSI_MPT3SAS
depends on PCI && SCSI
help
- Dummy config option for backwards compatiblity: configure the MPT3SAS
+ Dummy config option for backwards compatibility: configure the MPT3SAS
driver instead.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 969baf4cd3f5..6e23dc3209fe 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5034,7 +5034,7 @@ _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
static void
_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
- u16 trigger_flags;
+ int trigger_flags;
/*
* Default setting of master trigger.
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index f5fc7f518f8a..47ad64b06623 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2245,7 +2245,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
chap_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
@@ -2253,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
mchap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 24c0f7ec0351..4a08c450b756 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6740,7 +6740,7 @@ static int __init scsi_debug_init(void)
k = sdeb_zbc_model_str(sdeb_zbc_model_s);
if (k < 0) {
ret = k;
- goto free_vm;
+ goto free_q_arr;
}
sdeb_zbc_model = k;
switch (sdeb_zbc_model) {
@@ -6753,7 +6753,8 @@ static int __init scsi_debug_init(void)
break;
default:
pr_err("Invalid ZBC model\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_q_arr;
}
}
if (sdeb_zbc_model != BLK_ZONED_NONE) {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4848ae3c7b56..b3f14f05340a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -249,7 +249,8 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req = blk_get_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
@@ -1206,6 +1207,8 @@ static blk_status_t
scsi_device_state_check(struct scsi_device *sdev, struct request *req)
{
switch (sdev->sdev_state) {
+ case SDEV_CREATED:
+ return BLK_STS_OK;
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
/*
@@ -1232,18 +1235,18 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
return BLK_STS_RESOURCE;
case SDEV_QUIESCE:
/*
- * If the devices is blocked we defer normal commands.
+ * If the device is blocked we only accept power management
+ * commands.
*/
- if (req && !(req->rq_flags & RQF_PREEMPT))
+ if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
return BLK_STS_RESOURCE;
return BLK_STS_OK;
default:
/*
* For any other not fully online state we only allow
- * special commands. In particular any user initiated
- * command is not allowed.
+ * power management commands.
*/
- if (req && !(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PM))
return BLK_STS_IOERR;
return BLK_STS_OK;
}
@@ -2516,15 +2519,13 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
- * scsi_device_quiesce - Block user issued commands.
+ * scsi_device_quiesce - Block all commands except power management.
* @sdev: scsi device to quiesce.
*
* This works by trying to transition to the SDEV_QUIESCE state
* (which must be a legal transition). When the device is in this
- * state, only special requests will be accepted, all others will
- * be deferred. Since special requests may also be requeued requests,
- * a successful return doesn't guarantee the device will be
- * totally quiescent.
+ * state, only power management requests will be accepted, all others will
+ * be deferred.
*
* Must be called with user context, may sleep.
*
@@ -2586,12 +2587,12 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
+ if (sdev->sdev_state == SDEV_QUIESCE)
+ scsi_device_set_state(sdev, SDEV_RUNNING);
if (sdev->quiesced_by) {
sdev->quiesced_by = NULL;
blk_clear_pm_only(sdev->request_queue);
}
- if (sdev->sdev_state == SDEV_QUIESCE)
- scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f3d5b1bbd5aa..c37dd15d16d2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -117,12 +117,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
sshdr = &sshdr_tmp;
for(i = 0; i < DV_RETRIES; i++) {
+ /*
+ * The purpose of the RQF_PM flag below is to bypass the
+ * SDEV_QUIESCE state.
+ */
result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
sshdr, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
- 0, NULL);
+ RQF_PM, NULL);
if (driver_byte(result) != DRIVER_SENSE ||
sshdr->sense_key != UNIT_ATTENTION)
break;
@@ -1005,23 +1009,26 @@ spi_dv_device(struct scsi_device *sdev)
*/
lock_system_sleep();
+ if (scsi_autopm_get_device(sdev))
+ goto unlock_system_sleep;
+
if (unlikely(spi_dv_in_progress(starget)))
- goto unlock;
+ goto put_autopm;
if (unlikely(scsi_device_get(sdev)))
- goto unlock;
+ goto put_autopm;
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
if (unlikely(!buffer))
- goto out_put;
+ goto put_sdev;
/* We need to verify that the actual device will quiesce; the
* later target quiesce is just a nice to have */
if (unlikely(scsi_device_quiesce(sdev)))
- goto out_free;
+ goto free_buffer;
scsi_target_quiesce(starget);
@@ -1041,12 +1048,16 @@ spi_dv_device(struct scsi_device *sdev)
spi_initial_dv(starget) = 1;
- out_free:
+free_buffer:
kfree(buffer);
- out_put:
+
+put_sdev:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
-unlock:
+put_autopm:
+ scsi_autopm_put_device(sdev);
+
+unlock_system_sleep:
unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index cba1cf6a1c12..1e939a2a387f 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
res = mutex_lock_interruptible(&rport->mutex);
if (res)
goto out;
- scsi_target_block(&shost->shost_gendev);
+ if (rport->state != SRP_RPORT_FAIL_FAST)
+ /*
+ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+ * later is ok though, scsi_internal_device_unblock_nowait()
+ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
+ */
+ scsi_target_block(&shost->shost_gendev);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 679c2c025047..a3d2d4bc4a3d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -984,8 +984,10 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
}
}
- if (sdp->no_write_same)
+ if (sdp->no_write_same) {
+ rq->rq_flags |= RQF_QUIET;
return BLK_STS_TARGET;
+ }
if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
return sd_setup_write_same16_cmnd(cmd, false);
@@ -3510,10 +3512,8 @@ static int sd_probe(struct device *dev)
static int sd_remove(struct device *dev)
{
struct scsi_disk *sdkp;
- dev_t devt;
sdkp = dev_get_drvdata(dev);
- devt = disk_devt(sdkp->disk);
scsi_autopm_get_device(sdkp->device);
async_synchronize_full_domain(&scsi_sd_pm_domain);
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 3f6dfed4fe84..b915b38c2b27 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -72,6 +72,7 @@ config SCSI_UFS_DWC_TC_PCI
config SCSI_UFSHCD_PLATFORM
tristate "Platform bus based UFS Controller support"
depends on SCSI_UFSHCD
+ depends on HAS_IOMEM
help
This selects the UFS host controller support. Select this if
you have an UFS controller on Platform bus.
diff --git a/drivers/scsi/ufs/ufs-mediatek-trace.h b/drivers/scsi/ufs/ufs-mediatek-trace.h
index fd6f84c1b4e2..895e82ea6ece 100644
--- a/drivers/scsi/ufs/ufs-mediatek-trace.h
+++ b/drivers/scsi/ufs/ufs-mediatek-trace.h
@@ -31,6 +31,6 @@ TRACE_EVENT(ufs_mtk_event,
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/
#define TRACE_INCLUDE_FILE ufs-mediatek-trace
#include <trace/define_trace.h>
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 3522458db3bb..80618af7c872 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -70,6 +70,13 @@ static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
}
+static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+}
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -514,6 +521,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
host->caps |= UFS_MTK_CAP_DISABLE_AH8;
+ if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
+ host->caps |= UFS_MTK_CAP_BROKEN_VCC;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
@@ -1003,6 +1013,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+ if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
+ hba->vreg_info.vcc->always_on = true;
+ /*
+ * VCC will be kept always-on thus we don't
+ * need any delay during regulator operations
+ */
+ hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+ UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
+ }
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 93d35097dfb0..3f0d3bb769e8 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -81,6 +81,7 @@ enum ufs_mtk_host_caps {
UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0,
UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1,
UFS_MTK_CAP_DISABLE_AH8 = 1 << 2,
+ UFS_MTK_CAP_BROKEN_VCC = 1 << 3,
};
struct ufs_mtk_crypt_cfg {
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index d593edb48767..14dfda735adf 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -330,7 +330,6 @@ enum {
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
};
-#define POWER_DESC_MAX_SIZE 0x62
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
/* Attribute bActiveICCLevel parameter bit masks definitions */
@@ -513,6 +512,7 @@ struct ufs_query_res {
struct ufs_vreg {
struct regulator *reg;
const char *name;
+ bool always_on;
bool enabled;
int min_uV;
int max_uV;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index df3a564c3e33..fadd566025b8 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -148,6 +148,8 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
{
struct intel_host *host;
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
@@ -163,6 +165,41 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
intel_ltr_hide(hba->dev);
}
+static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
+{
+ /*
+ * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
+ * address registers must be restored because the restore kernel can
+ * have used different addresses.
+ */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ int ret = ufshcd_uic_hibern8_exit(hba);
+
+ if (!ret) {
+ ufshcd_set_link_active(hba);
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+ /*
+ * Force reset and restore. Any other actions can lead
+ * to an unrecoverable state.
+ */
+ ufshcd_set_link_off(hba);
+ }
+ }
+
+ return 0;
+}
+
static int ufs_intel_ehl_init(struct ufs_hba *hba)
{
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
@@ -174,6 +211,7 @@ static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.init = ufs_intel_common_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
};
static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
@@ -181,6 +219,7 @@ static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
.init = ufs_intel_ehl_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
};
#ifdef CONFIG_PM_SLEEP
@@ -207,6 +246,30 @@ static int ufshcd_pci_resume(struct device *dev)
{
return ufshcd_system_resume(dev_get_drvdata(dev));
}
+
+/**
+ * ufshcd_pci_poweroff - suspend-to-disk poweroff function
+ * @dev: pointer to PCI device handle
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pci_poweroff(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int spm_lvl = hba->spm_lvl;
+ int ret;
+
+ /*
+ * For poweroff we need to set the UFS device to PowerDown mode.
+ * Force spm_lvl to ensure that.
+ */
+ hba->spm_lvl = 5;
+ ret = ufshcd_system_suspend(hba);
+ hba->spm_lvl = spm_lvl;
+ return ret;
+}
+
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
@@ -302,8 +365,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
- ufshcd_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+ .freeze = ufshcd_pci_suspend,
+ .thaw = ufshcd_pci_resume,
+ .poweroff = ufshcd_pci_poweroff,
+ .restore = ufshcd_pci_resume,
+#endif
SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
ufshcd_pci_runtime_resume,
ufshcd_pci_runtime_idle)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9902b7e3aa4a..fb32d122f2e3 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -225,6 +225,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
@@ -288,7 +289,8 @@ static inline void ufshcd_wb_config(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
__func__, ret);
- ufshcd_wb_toggle_flush(hba, true);
+ if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
+ ufshcd_wb_toggle_flush(hba, true);
}
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
@@ -580,6 +582,23 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
hba->pwr_info.hs_rate);
}
+static void ufshcd_device_reset(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_vops_device_reset(hba);
+
+ if (!err) {
+ ufshcd_set_ufs_dev_active(hba);
+ if (ufshcd_is_wb_allowed(hba)) {
+ hba->wb_enabled = false;
+ hba->wb_buf_flush_enabled = false;
+ }
+ }
+ if (err != -EOPNOTSUPP)
+ ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
+}
+
void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
{
if (!us)
@@ -3665,7 +3684,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
+ "dme-enable: error code %d\n", ret);
return ret;
}
@@ -3964,7 +3983,7 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ret = ufshcd_host_reset_and_restore(hba);
@@ -3977,6 +3996,8 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s: link recovery failed, err %d",
__func__, ret);
+ else
+ ufshcd_clear_ua_wluns(hba);
return ret;
}
@@ -4973,7 +4994,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
} /* end of switch */
- if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
+ if ((host_byte(result) != DID_OK) &&
+ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
@@ -5418,9 +5440,6 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
{
- if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
- return;
-
if (enable)
ufshcd_wb_buf_flush_enable(hba);
else
@@ -5985,6 +6004,9 @@ skip_err_handling:
ufshcd_scsi_unblock_requests(hba);
ufshcd_err_handling_unprepare(hba);
up(&hba->eh_sem);
+
+ if (!err && needs_reset)
+ ufshcd_clear_ua_wluns(hba);
}
/**
@@ -6279,9 +6301,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
}
- if (enabled_intr_status && retval == IRQ_NONE) {
- dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
- __func__, intr_status);
+ if (enabled_intr_status && retval == IRQ_NONE &&
+ !ufshcd_eh_in_progress(hba)) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
+ __func__,
+ intr_status,
+ hba->ufs_stats.last_intr_status,
+ enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -6325,7 +6351,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
- req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
req->end_io_data = &wait;
free_slot = req->tag;
WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
@@ -6643,19 +6672,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
- unsigned int tag;
u32 pos;
int err;
- u8 resp = 0xF;
- struct ufshcd_lrb *lrbp;
+ u8 resp = 0xF, lun;
unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
- tag = cmd->request->tag;
- lrbp = &hba->lrb[tag];
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+ err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err)
err = resp;
@@ -6664,7 +6690,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* clear the commands that were pending for corresponding LUN */
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[pos].lun == lrbp->lun) {
+ if (hba->lrb[pos].lun == lun) {
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
@@ -6925,13 +6951,11 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshcd_set_clk_freq(hba, true);
err = ufshcd_hba_enable(hba);
- if (err)
- goto out;
/* Establish the link again and restore the device */
- err = ufshcd_probe_hba(hba, false);
+ if (!err)
+ err = ufshcd_probe_hba(hba, false);
-out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
@@ -6968,7 +6992,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
do {
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
@@ -7702,6 +7726,8 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
if (ret)
goto out;
+ ufshcd_clear_ua_wluns(hba);
+
/* Initialize devfreq after UFS device is detected */
if (ufshcd_is_clkscaling_supported(hba)) {
memcpy(&hba->clk_scaling.saved_pwr_info.info,
@@ -7903,8 +7929,6 @@ out:
pm_runtime_put_sync(hba->dev);
ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
- } else {
- ufshcd_clear_ua_wluns(hba);
}
}
@@ -8045,7 +8069,7 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg || !vreg->enabled)
+ if (!vreg || !vreg->enabled || vreg->always_on)
goto out;
ret = regulator_disable(vreg->reg);
@@ -8414,13 +8438,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* handling context.
*/
hba->host->eh_noresume = 1;
- if (hba->wlun_dev_clr_ua) {
- ret = ufshcd_send_request_sense(hba, sdp);
- if (ret)
- goto out;
- /* Unit attention condition is cleared now */
- hba->wlun_dev_clr_ua = false;
- }
+ ufshcd_clear_ua_wluns(hba);
cmd[4] = pwr_mode << 4;
@@ -8441,7 +8459,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
if (!ret)
hba->curr_dev_pwr_mode = pwr_mode;
-out:
+
scsi_device_put(sdp);
hba->host->eh_noresume = 0;
return ret;
@@ -8685,6 +8703,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_wb_need_flush(hba));
}
+ flush_work(&hba->eeh_work);
+
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
if (!ufshcd_is_runtime_pm(pm_op))
/* ensure that bkops is disabled */
@@ -8697,8 +8717,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
}
- flush_work(&hba->eeh_work);
-
/*
* In the case of DeepSleep, the device is expected to remain powered
* with the link off, so do not check for bkops.
@@ -8747,7 +8765,7 @@ set_link_active:
* further below.
*/
if (ufshcd_is_ufs_dev_deepsleep(hba)) {
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
WARN_ON(!ufshcd_is_link_off(hba));
}
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
@@ -8757,7 +8775,7 @@ set_link_active:
set_dev_active:
/* Can also get here needing to exit DeepSleep */
if (ufshcd_is_ufs_dev_deepsleep(hba)) {
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ufshcd_host_reset_and_restore(hba);
}
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
@@ -8767,6 +8785,7 @@ enable_gating:
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
+ ufshcd_clear_ua_wluns(hba);
ufshcd_release(hba);
out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
@@ -8877,6 +8896,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
+ ufshcd_clear_ua_wluns(hba);
+
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -8925,7 +8946,8 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
hba->curr_dev_pwr_mode) &&
(ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
- hba->uic_link_state))
+ hba->uic_link_state) &&
+ !hba->dev_info.b_rpm_dev_flush_capable)
goto out;
if (pm_runtime_suspended(hba->dev)) {
@@ -9353,7 +9375,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ufshcd_init_crypto(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index f8c2467dc014..aa9ea3552323 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -1218,16 +1218,12 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
hba->vops->dbg_register_dump(hba);
}
-static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
+static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->device_reset) {
- int err = hba->vops->device_reset(hba);
-
- if (!err)
- ufshcd_set_ufs_dev_active(hba);
- if (err != -EOPNOTSUPP)
- ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
- }
+ if (hba->vops && hba->vops->device_reset)
+ return hba->vops->device_reset(hba);
+
+ return -EOPNOTSUPP;
}
static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index f8e070d67fa3..a14684ffe4c1 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -214,7 +214,7 @@ int __init register_intc_controller(struct intc_desc *desc)
d->window[k].phys = res->start;
d->window[k].size = resource_size(res);
d->window[k].virt = ioremap(res->start,
- resource_size(res));
+ resource_size(res));
if (!d->window[k].virt)
goto err2;
}
diff --git a/drivers/sh/intc/virq-debugfs.c b/drivers/sh/intc/virq-debugfs.c
index 9e62ba9311f0..939915a07d99 100644
--- a/drivers/sh/intc/virq-debugfs.c
+++ b/drivers/sh/intc/virq-debugfs.c
@@ -16,7 +16,7 @@
#include <linux/debugfs.h>
#include "internals.h"
-static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
+static int intc_irq_xlate_show(struct seq_file *m, void *priv)
{
int i;
@@ -37,17 +37,7 @@ static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
return 0;
}
-static int intc_irq_xlate_open(struct inode *inode, struct file *file)
-{
- return single_open(file, intc_irq_xlate_debug, inode->i_private);
-}
-
-static const struct file_operations intc_irq_xlate_fops = {
- .open = intc_irq_xlate_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(intc_irq_xlate);
static int __init intc_irq_xlate_init(void)
{
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index c4472b68b7c2..698d21f50516 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -271,8 +271,21 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
return soc_dev;
}
+static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+ { .compatible = "atmel,at91rm9200", },
+ { .compatible = "atmel,at91sam9", },
+ { .compatible = "atmel,sama5", },
+ { .compatible = "atmel,samv7", },
+ { }
+};
+
static int __init atmel_soc_device_init(void)
{
+ struct device_node *np = of_find_node_by_path("/");
+
+ if (!of_match_node(at91_soc_allowed_list, np))
+ return 0;
+
at91_soc_init(socs);
return 0;
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 497a7e0fd027..654e9246ce6b 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -27,7 +27,7 @@
static struct gen_pool *muram_pool;
static spinlock_t cpm_muram_lock;
-static u8 __iomem *muram_vbase;
+static void __iomem *muram_vbase;
static phys_addr_t muram_pbase;
struct muram_block {
@@ -223,9 +223,9 @@ void __iomem *cpm_muram_addr(unsigned long offset)
}
EXPORT_SYMBOL(cpm_muram_addr);
-unsigned long cpm_muram_offset(void __iomem *addr)
+unsigned long cpm_muram_offset(const void __iomem *addr)
{
- return addr - (void __iomem *)muram_vbase;
+ return addr - muram_vbase;
}
EXPORT_SYMBOL(cpm_muram_offset);
@@ -235,6 +235,18 @@ EXPORT_SYMBOL(cpm_muram_offset);
*/
dma_addr_t cpm_muram_dma(void __iomem *addr)
{
- return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
+ return muram_pbase + (addr - muram_vbase);
}
EXPORT_SYMBOL(cpm_muram_dma);
+
+/*
+ * As cpm_muram_free, but takes the virtual address rather than the
+ * muram offset.
+ */
+void cpm_muram_free_addr(const void __iomem *addr)
+{
+ if (!addr)
+ return;
+ cpm_muram_free(cpm_muram_offset(addr));
+}
+EXPORT_SYMBOL(cpm_muram_free_addr);
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index a9370f4aacca..05812f8ae734 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -13,7 +13,7 @@ config SOC_IMX8M
depends on ARCH_MXC || COMPILE_TEST
default ARCH_MXC && ARM64
select SOC_BUS
- select ARM_GIC_V3 if ARCH_MXC
+ select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7
help
If you say yes here you get support for the NXP i.MX8M family
support, it will provide the SoC info like SoC family,
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
index 1217cafdfd4d..9b0766384570 100644
--- a/drivers/soc/litex/litex_soc_ctrl.c
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -140,12 +140,13 @@ struct litex_soc_ctrl_device {
void __iomem *base;
};
+#ifdef CONFIG_OF
static const struct of_device_id litex_soc_ctrl_of_match[] = {
{.compatible = "litex,soc-controller"},
{},
};
-
MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
+#endif /* CONFIG_OF */
static int litex_soc_ctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 809bfff3690a..62ea0c9e321b 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -189,24 +189,26 @@ static int altera_spi_txrx(struct spi_master *master,
/* send the first byte */
altera_spi_tx_word(hw);
- } else {
- while (hw->count < hw->len) {
- altera_spi_tx_word(hw);
- for (;;) {
- altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
- if (val & ALTERA_SPI_STATUS_RRDY_MSK)
- break;
+ return 1;
+ }
+
+ while (hw->count < hw->len) {
+ altera_spi_tx_word(hw);
- cpu_relax();
- }
+ for (;;) {
+ altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+ break;
- altera_spi_rx_word(hw);
+ cpu_relax();
}
- spi_finalize_current_transfer(master);
+
+ altera_spi_rx_word(hw);
}
+ spi_finalize_current_transfer(master);
- return t->len;
+ return 0;
}
static irqreturn_t altera_spi_irq(int irq, void *dev)
@@ -252,7 +254,8 @@ static int altera_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Invalid number of chipselect: %hu\n",
pdata->num_chipselect);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit;
}
master->num_chipselect = pdata->num_chipselect;
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 70467b9d61ba..a3afd1b9ac56 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -115,6 +115,7 @@ struct cdns_spi {
void __iomem *regs;
struct clk *ref_clk;
struct clk *pclk;
+ unsigned int clk_rate;
u32 speed_hz;
const u8 *txbuf;
u8 *rxbuf;
@@ -250,7 +251,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi,
u32 ctrl_reg, baud_rate_val;
unsigned long frequency;
- frequency = clk_get_rate(xspi->ref_clk);
+ frequency = xspi->clk_rate;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
@@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->auto_runtime_pm = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ xspi->clk_rate = clk_get_rate(xspi->ref_clk);
/* Set to default valid value */
- master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+ master->max_speed_hz = xspi->clk_rate / 4;
xspi->speed_hz = master->max_speed_hz;
master->bits_per_word_mask = SPI_BPW_MASK(8);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 9494257e1c33..6d8e0a05a535 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -115,14 +115,13 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_spi_platform_data *pdata;
- bool pol = spi->mode & SPI_CS_HIGH;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
pdata = spi->dev.parent->parent->platform_data;
if (value == BITBANG_CS_INACTIVE) {
if (pdata->cs_control)
- pdata->cs_control(spi, !pol);
+ pdata->cs_control(spi, false);
}
if (value == BITBANG_CS_ACTIVE) {
@@ -134,7 +133,7 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
fsl_spi_change_mode(spi);
if (pdata->cs_control)
- pdata->cs_control(spi, pol);
+ pdata->cs_control(spi, true);
}
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 512e925d5ea4..881f645661cc 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -83,6 +83,7 @@ struct spi_geni_master {
spinlock_t lock;
int irq;
bool cs_flag;
+ bool abort_failed;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
- if (!time_left)
+ if (!time_left) {
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+ /*
+ * No need for a lock since SPI core has a lock and we never
+ * access this from an interrupt.
+ */
+ mas->abort_failed = true;
+ }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 m_irq, m_irq_en;
+
+ if (!mas->abort_failed)
+ return false;
+
+ /*
+ * The only known case where a transfer times out and then a cancel
+ * times out then an abort times out is if something is blocking our
+ * interrupt handler from running. Avoid starting any new transfers
+ * until that sorts itself out.
+ */
+ spin_lock_irq(&mas->lock);
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+ spin_unlock_irq(&mas->lock);
+
+ if (m_irq & m_irq_en) {
+ dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+ m_irq & m_irq_en);
+ return true;
+ }
+
+ /*
+ * If we're here the problem resolved itself so no need to check more
+ * on future transfers.
+ */
+ mas->abort_failed = false;
+
+ return false;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@@ -158,10 +200,21 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (set_flag == mas->cs_flag)
return;
- mas->cs_flag = set_flag;
-
pm_runtime_get_sync(mas->dev);
+
+ if (spi_geni_is_abort_still_pending(mas)) {
+ dev_err(mas->dev, "Can't set chip select\n");
+ goto exit;
+ }
+
spin_lock_irq(&mas->lock);
+ if (mas->cur_xfer) {
+ dev_err(mas->dev, "Can't set CS when prev xfer running\n");
+ spin_unlock_irq(&mas->lock);
+ goto exit;
+ }
+
+ mas->cs_flag = set_flag;
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -170,9 +223,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
- if (!time_left)
+ if (!time_left) {
+ dev_warn(mas->dev, "Timeout setting chip select\n");
handle_fifo_timeout(spi, NULL);
+ }
+exit:
pm_runtime_put(mas->dev);
}
@@ -280,6 +336,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -354,6 +413,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
+ /* Stop the watermark IRQ if nothing to send */
+ if (!mas->cur_xfer) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ return false;
+ }
+
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
@@ -396,6 +461,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
+
+ /* Clear out the FIFO and bail if nowhere to put it */
+ if (!mas->cur_xfer) {
+ for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
+ readl(se->base + SE_GENI_RX_FIFOn);
+ return;
+ }
+
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
@@ -495,6 +568,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
return 0;
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 471dedf3d339..6017209c6d2f 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
/* align packet size with data registers access */
if (spi->cur_bpw > 8)
- fthlv -= (fthlv % 2); /* multiple of 2 */
+ fthlv += (fthlv % 2) ? 1 : 0;
else
- fthlv -= (fthlv % 4); /* multiple of 4 */
+ fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
if (!fthlv)
fthlv = 1;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 51d7c004fbab..720ab34784c1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1108,6 +1108,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
+ u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
@@ -1116,8 +1117,11 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
return -EINTR;
}
} else {
+ if (!speed_hz)
+ speed_hz = 100000;
+
ms = 8LL * 1000LL * xfer->len;
- do_div(ms, xfer->speed_hz);
+ do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
@@ -3378,8 +3382,9 @@ int spi_setup(struct spi_device *spi)
if (status)
return status;
- if (!spi->max_speed_hz ||
- spi->max_speed_hz > spi->controller->max_speed_hz)
+ if (spi->controller->max_speed_hz &&
+ (!spi->max_speed_hz ||
+ spi->max_speed_hz > spi->controller->max_speed_hz))
spi->max_speed_hz = spi->controller->max_speed_hz;
mutex_lock(&spi->controller->io_mutex);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 859910ec8d9f..8cb4d923aeaa 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -682,6 +682,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "lwn,bk4" },
{ .compatible = "dh,dhcom-board" },
{ .compatible = "menlo,m53cpld" },
+ { .compatible = "cisco,spi-petra" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index d99231c737fb..80d74cce2a01 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2987,7 +2987,9 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
v32.chanlist_len = cmd->chanlist_len;
v32.data = ptr_to_compat(cmd->data);
v32.data_len = cmd->data_len;
- return copy_to_user(cmd32, &v32, sizeof(v32));
+ if (copy_to_user(cmd32, &v32, sizeof(v32)))
+ return -EFAULT;
+ return 0;
}
/* Handle 32-bit COMEDI_CMD ioctl. */
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index d524e92051a3..ca3d07fe7f58 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -901,19 +901,14 @@ static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
}
static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
- struct switchdev_trans *trans,
u8 state)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return dpaa2_switch_port_set_stp_state(port_priv, state);
}
static int dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
- struct switchdev_trans *trans,
unsigned long flags)
{
if (flags & ~(BR_LEARNING | BR_FLOOD))
@@ -923,15 +918,11 @@ static int dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
}
static int dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev,
- struct switchdev_trans *trans,
unsigned long flags)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err = 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* Learning is enabled per switch */
err = dpaa2_switch_set_learning(port_priv->ethsw_data,
!!(flags & BR_LEARNING));
@@ -945,22 +936,21 @@ exit:
}
static int dpaa2_switch_port_attr_set(struct net_device *netdev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = dpaa2_switch_port_attr_stp_state_set(netdev, trans,
+ err = dpaa2_switch_port_attr_stp_state_set(netdev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- err = dpaa2_switch_port_attr_br_flags_pre_set(netdev, trans,
+ err = dpaa2_switch_port_attr_br_flags_pre_set(netdev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = dpaa2_switch_port_attr_br_flags_set(netdev, trans,
+ err = dpaa2_switch_port_attr_br_flags_set(netdev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
@@ -975,54 +965,49 @@ static int dpaa2_switch_port_attr_set(struct net_device *netdev,
}
static int dpaa2_switch_port_vlans_add(struct net_device *netdev,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpsw_attr *attr = &ethsw->sw_attr;
- int vid, err = 0, new_vlans = 0;
-
- if (switchdev_trans_ph_prepare(trans)) {
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (!port_priv->ethsw_data->vlans[vid])
- new_vlans++;
-
- /* Make sure that the VLAN is not already configured
- * on the switch port
- */
- if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER)
- return -EEXIST;
- }
+ int err = 0;
- /* Check if there is space for a new VLAN */
- err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
- &ethsw->sw_attr);
- if (err) {
- netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
- return err;
- }
- if (attr->max_vlans - attr->num_vlans < new_vlans)
- return -ENOSPC;
+ /* Make sure that the VLAN is not already configured
+ * on the switch port
+ */
+ if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
+ return -EEXIST;
- return 0;
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
}
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (!port_priv->ethsw_data->vlans[vid]) {
- /* this is a new VLAN */
- err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vid);
- if (err)
- return err;
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
- port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
- }
- err = dpaa2_switch_port_add_vlan(port_priv, vid, vlan->flags);
+ if (!port_priv->ethsw_data->vlans[vlan->vid]) {
+ /* this is a new VLAN */
+ err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vlan->vid);
if (err)
- break;
+ return err;
+
+ port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
}
- return err;
+ return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
}
static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
@@ -1043,15 +1028,11 @@ static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc
}
static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* Check if address is already set on this port */
if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
return -EEXIST;
@@ -1070,21 +1051,18 @@ static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
}
static int dpaa2_switch_port_obj_add(struct net_device *netdev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ const struct switchdev_obj *obj)
{
int err;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = dpaa2_switch_port_vlans_add(netdev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = dpaa2_switch_port_mdb_add(netdev,
- SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_MDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -1155,18 +1133,11 @@ static int dpaa2_switch_port_vlans_del(struct net_device *netdev,
const struct switchdev_obj_port_vlan *vlan)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int vid, err = 0;
if (netif_is_bridge_master(vlan->obj.orig_dev))
return -EOPNOTSUPP;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = dpaa2_switch_port_del_vlan(port_priv, vid);
- if (err)
- break;
- }
-
- return err;
+ return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
}
static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
@@ -1216,8 +1187,7 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
{
int err;
- err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr,
- port_attr_info->trans);
+ err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr);
port_attr_info->handled = true;
return notifier_from_errno(err);
@@ -1411,8 +1381,7 @@ static int dpaa2_switch_port_obj_event(unsigned long event,
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj,
- port_obj_info->trans);
+ err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
break;
case SWITCHDEV_PORT_OBJ_DEL:
err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
diff --git a/drivers/staging/hikey9xx/hisi-spmi-controller.c b/drivers/staging/hikey9xx/hisi-spmi-controller.c
index 861aedd5de48..0d42bc65f39b 100644
--- a/drivers/staging/hikey9xx/hisi-spmi-controller.c
+++ b/drivers/staging/hikey9xx/hisi-spmi-controller.c
@@ -278,21 +278,24 @@ static int spmi_controller_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
dev_err(&pdev->dev, "can not get resource!\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_controller;
}
spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
resource_size(iores));
if (!spmi_controller->base) {
dev_err(&pdev->dev, "can not remap base addr!\n");
- return -EADDRNOTAVAIL;
+ ret = -EADDRNOTAVAIL;
+ goto err_put_controller;
}
ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
&spmi_controller->channel);
if (ret) {
dev_err(&pdev->dev, "can not get channel\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_controller;
}
platform_set_drvdata(pdev, spmi_controller);
@@ -309,9 +312,15 @@ static int spmi_controller_probe(struct platform_device *pdev)
ctrl->write_cmd = spmi_write_cmd;
ret = spmi_controller_add(ctrl);
- if (ret)
- dev_err(&pdev->dev, "spmi_add_controller failed with error %d!\n", ret);
+ if (ret) {
+ dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
+ goto err_put_controller;
+ }
+
+ return 0;
+err_put_controller:
+ spmi_controller_put(ctrl);
return ret;
}
@@ -320,7 +329,7 @@ static int spmi_del_controller(struct platform_device *pdev)
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
spmi_controller_remove(ctrl);
- kfree(ctrl);
+ spmi_controller_put(ctrl);
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 52b9fb18c87f..b666cb23e5ca 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -1062,26 +1062,6 @@ static const struct v4l2_ctrl_config ctrl_select_isp_version = {
.def = 0,
};
-#if 0 /* #ifdef CONFIG_ION */
-/*
- * Control for ISP ion device fd
- *
- * userspace will open ion device and pass the fd to kernel.
- * this fd will be used to map shared fd to buffer.
- */
-/* V4L2_CID_ATOMISP_ION_DEVICE_FD is not defined */
-static const struct v4l2_ctrl_config ctrl_ion_dev_fd = {
- .ops = &ctrl_ops,
- .id = V4L2_CID_ATOMISP_ION_DEVICE_FD,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Ion Device Fd",
- .min = -1,
- .max = 1024,
- .step = 1,
- .def = ION_FD_UNSET
-};
-#endif
-
static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
{
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index b668a82d40ad..f5fbdbc4ffdb 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -367,7 +367,7 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx)
hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
raw_fmt->width = encoded_fmt->width;
- raw_fmt->width = encoded_fmt->width;
+ raw_fmt->height = encoded_fmt->height;
if (ctx->is_encoder)
hantro_set_fmt_out(ctx, raw_fmt);
else
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index 781c84a9b1b7..de7442d4834d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -203,7 +203,7 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
position = cedrus_buf->codec.h264.position;
sram_array[i] |= position << 1;
- if (ref_list[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
+ if (ref_list[i].fields == V4L2_H264_BOTTOM_FIELD_REF)
sram_array[i] |= BIT(0);
}
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index d241349214e7..bc4bb4374313 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -712,7 +712,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret) {
dev_err(&pdev->dev, "failed to register dma device\n");
- return ret;
+ goto err_uninit_hsdma;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -728,6 +728,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_uninit_hsdma:
+ mtk_hsdma_uninit(hsdma);
return ret;
}
diff --git a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
index ab5a8627d371..f798b0c744a4 100644
--- a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
@@ -20,9 +20,9 @@ enum country_code_type_t {
COUNTRY_CODE_MAX
};
-int rtw_regd_init(struct adapter *padapter,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
+void rtw_regd_init(struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index bf1417236161..11032316c53d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -3211,9 +3211,6 @@ void rtw_cfg80211_init_wiphy(struct adapter *padapter)
rtw_cfg80211_init_ht_capab(&bands->ht_cap, NL80211_BAND_2GHZ, rf_type);
}
- /* init regulary domain */
- rtw_regd_init(padapter, rtw_reg_notifier);
-
/* copy mac_addr to wiphy */
memcpy(wiphy->perm_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
@@ -3328,6 +3325,9 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
*((struct adapter **)wiphy_priv(wiphy)) = padapter;
rtw_cfg80211_preinit_wiphy(padapter, wiphy);
+ /* init regulary domain */
+ rtw_regd_init(wiphy, rtw_reg_notifier);
+
ret = wiphy_register(wiphy);
if (ret < 0) {
DBG_8192C("Couldn't register wiphy device\n");
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index 578b9f734231..2833fc6901e6 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -139,15 +139,11 @@ static void _rtw_regd_init_wiphy(struct rtw_regulatory *reg,
_rtw_reg_apply_flags(wiphy);
}
-int rtw_regd_init(struct adapter *padapter,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+void rtw_regd_init(struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
{
- struct wiphy *wiphy = padapter->rtw_wdev->wiphy;
-
_rtw_regd_init_wiphy(NULL, wiphy, reg_notifier);
-
- return 0;
}
void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 6b171fff007b..a5991df23581 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -562,8 +562,6 @@ tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
{
- if (tcmu_cmd->se_cmd)
- tcmu_cmd->se_cmd->priv = NULL;
kfree(tcmu_cmd->dbi);
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
}
@@ -1174,11 +1172,12 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
mutex_lock(&udev->cmdr_lock);
- se_cmd->priv = tcmu_cmd;
if (!(se_cmd->transport_state & CMD_T_ABORTED))
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0)
tcmu_free_cmd(tcmu_cmd);
+ else
+ se_cmd->priv = tcmu_cmd;
mutex_unlock(&udev->cmdr_lock);
return scsi_ret;
}
@@ -1241,6 +1240,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
list_del_init(&cmd->queue_entry);
tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
unqueued = true;
}
@@ -1332,6 +1332,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
}
done:
+ se_cmd->priv = NULL;
if (read_len_valid) {
pr_debug("read_len = %d\n", read_len);
target_complete_cmd_with_length(cmd->se_cmd,
@@ -1478,6 +1479,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
se_cmd = cmd->se_cmd;
tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
@@ -1592,6 +1594,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
* removed then LIO core will do the right thing and
* fail the retry.
*/
+ tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
tcmu_free_cmd(tcmu_cmd);
continue;
@@ -1605,6 +1608,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
*/
+ tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd,
SAM_STAT_CHECK_CONDITION);
tcmu_free_cmd(tcmu_cmd);
@@ -2212,6 +2216,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
+ cmd->se_cmd->priv = NULL;
if (err_level == 1) {
/*
* Userspace was not able to start the
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 44e15d7fb2f0..66d6f1d06f21 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
-struct xcopy_dev_search_info {
- const unsigned char *dev_wwn;
- struct se_device *found_dev;
-};
-
+/**
+ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
+ *
+ * @se_dev: device being considered for match
+ * @dev_wwn: XCOPY requested NAA dev_wwn
+ * @return: 1 on match, 0 on no-match
+ */
static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
- void *data)
+ const unsigned char *dev_wwn)
{
- struct xcopy_dev_search_info *info = data;
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
- if (!se_dev->dev_attrib.emulate_3pc)
+ if (!se_dev->dev_attrib.emulate_3pc) {
+ pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
return 0;
+ }
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
- rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
- if (rc != 0)
- return 0;
-
- info->found_dev = se_dev;
- pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
-
- rc = target_depend_item(&se_dev->dev_group.cg_item);
+ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0) {
- pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
- rc, se_dev);
- return rc;
+ pr_debug("XCOPY: skip non-matching: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
+ return 0;
}
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
- pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
- se_dev, &se_dev->dev_group);
return 1;
}
-static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
- struct se_device **found_dev)
+static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
+ const unsigned char *dev_wwn,
+ struct se_device **_found_dev,
+ struct percpu_ref **_found_lun_ref)
{
- struct xcopy_dev_search_info info;
- int ret;
-
- memset(&info, 0, sizeof(info));
- info.dev_wwn = dev_wwn;
-
- ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
- if (ret == 1) {
- *found_dev = info.found_dev;
- return 0;
- } else {
- pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
- return -EINVAL;
+ struct se_dev_entry *deve;
+ struct se_node_acl *nacl;
+ struct se_lun *this_lun = NULL;
+ struct se_device *found_dev = NULL;
+
+ /* cmd with NULL sess indicates no associated $FABRIC_MOD */
+ if (!sess)
+ goto err_out;
+
+ pr_debug("XCOPY 0xe4: searching for: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
+
+ nacl = sess->se_node_acl;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+ struct se_device *this_dev;
+ int rc;
+
+ this_lun = rcu_dereference(deve->se_lun);
+ this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
+
+ rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
+ if (rc) {
+ if (percpu_ref_tryget_live(&this_lun->lun_ref))
+ found_dev = this_dev;
+ break;
+ }
}
+ rcu_read_unlock();
+ if (found_dev == NULL)
+ goto err_out;
+
+ pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
+ found_dev, &found_dev->dev_group);
+ *_found_dev = found_dev;
+ *_found_lun_ref = &this_lun->lun_ref;
+ return 0;
+err_out:
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
switch (xop->op_origin) {
case XCOL_SOURCE_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
- &xop->dst_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->dst_tid_wwn,
+ &xop->dst_dev,
+ &xop->remote_lun_ref);
break;
case XCOL_DEST_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
- &xop->src_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->src_tid_wwn,
+ &xop->src_dev,
+ &xop->remote_lun_ref);
break;
default:
pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
@@ -391,18 +418,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
{
- struct se_device *remote_dev;
-
if (xop->op_origin == XCOL_SOURCE_RECV_OP)
- remote_dev = xop->dst_dev;
+ pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
else
- remote_dev = xop->src_dev;
-
- pr_debug("Calling configfs_undepend_item for"
- " remote_dev: %p remote_dev->dev_group: %p\n",
- remote_dev, &remote_dev->dev_group.cg_item);
+ pr_debug("putting src lun_ref for %p\n", xop->src_dev);
- target_undepend_item(&remote_dev->dev_group.cg_item);
+ percpu_ref_put(xop->remote_lun_ref);
}
static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index c56a1bde9417..e5f20005179a 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -27,6 +27,7 @@ struct xcopy_op {
struct se_device *dst_dev;
unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ struct percpu_ref *remote_lun_ref;
sector_t src_lba;
sector_t dst_lba;
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index c981757ba0d4..780d7c4fd756 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
@@ -148,7 +149,8 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
*/
optee_cq_wait_for_completion(&optee->call_queue, &w);
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
- might_sleep();
+ if (need_resched())
+ cond_resched();
param.a0 = res.a0;
param.a1 = res.a1;
param.a2 = res.a2;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 8b7f941a9bb7..b8c4159bc32d 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2316,7 +2316,7 @@ static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
if (auth && auth->reply.route_hi == sw->config.route_hi &&
auth->reply.route_lo == sw->config.route_lo) {
- tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n",
+ tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
ret = -EIO;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 47a6e42f0d04..e15cd6b5bb99 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -401,6 +401,20 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help
FDC channel number to use for KGDB.
+config NULL_TTY
+ tristate "NULL TTY driver"
+ help
+ Say Y here if you want a NULL TTY which simply discards messages.
+
+ This is useful to allow userspace applications which expect a console
+ device to work without modifications even when no console is
+ available or desired.
+
+ In order to use this driver, you should redirect the console to this
+ TTY, or boot the kernel with console=ttynull.
+
+ If unsure, say N.
+
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 3c1c5a9240a7..b3ccae932660 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
tty_buffer.o tty_port.o tty_mutex.o \
tty_ldsem.o tty_baudrate.o tty_jobctrl.o \
- n_null.o ttynull.o
+ n_null.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-$(CONFIG_AUDIT) += tty_audit.o
@@ -25,6 +25,7 @@ obj-$(CONFIG_ISI) += isicom.o
obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
obj-$(CONFIG_NOZOMI) += nozomi.o
+obj-$(CONFIG_NULL_TTY) += ttynull.o
obj-$(CONFIG_ROCKETPORT) += rocket.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 319d68c8a5df..219e85756171 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2081,9 +2081,6 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
return 0;
}
-extern ssize_t redirected_tty_write(struct file *, const char __user *,
- size_t, loff_t *);
-
/**
* job_control - check job control
* @tty: tty
@@ -2105,7 +2102,7 @@ static int job_control(struct tty_struct *tty, struct file *file)
/* NOTE: not yet done after every sleep pending a thorough
check of the logic of this change. -- jlc */
/* don't stop on /dev/console */
- if (file->f_op->write == redirected_tty_write)
+ if (file->f_op->write_iter == redirected_tty_write)
return 0;
return __tty_check_change(tty, SIGTTIN);
@@ -2309,7 +2306,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
ssize_t retval = 0;
/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
- if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
+ if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) {
retval = tty_check_change(tty);
if (retval)
return retval;
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 118b29912289..e0c00a1b0763 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port)
(val & STAT_TX_RDY(port)), 1, 10000);
}
+static void wait_for_xmite(struct uart_port *port)
+{
+ u32 val;
+
+ readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+ (val & STAT_TX_EMP), 1, 10000);
+}
+
static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
{
wait_for_xmitr(port);
@@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
uart_console_write(port, s, count, mvebu_uart_console_putchar);
- wait_for_xmitr(port);
+ wait_for_xmite(port);
if (ier)
writel(ier, port->membase + UART_CTRL(port));
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 1066eebe3b28..328d5a78792f 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -1000,6 +1000,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
/* Set up clock divider */
ssp->clkin_rate = clk_get_rate(ssp->clk);
ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
+ ssp->port.uartclk = ssp->baud_rate * 16;
__ssp_update_div(ssp);
platform_set_drvdata(pdev, ssp);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8034489337d7..48de20916ca7 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -143,12 +143,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
DEFINE_MUTEX(tty_mutex);
static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *,
- size_t, loff_t *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
-long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
@@ -438,8 +435,7 @@ static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
return 0;
}
-static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
{
return -EIO;
}
@@ -478,7 +474,8 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
static const struct file_operations tty_fops = {
.llseek = no_llseek,
.read = tty_read,
- .write = tty_write,
+ .write_iter = tty_write,
+ .splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
@@ -491,7 +488,8 @@ static const struct file_operations tty_fops = {
static const struct file_operations console_fops = {
.llseek = no_llseek,
.read = tty_read,
- .write = redirected_tty_write,
+ .write_iter = redirected_tty_write,
+ .splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
@@ -503,7 +501,7 @@ static const struct file_operations console_fops = {
static const struct file_operations hung_up_tty_fops = {
.llseek = no_llseek,
.read = hung_up_tty_read,
- .write = hung_up_tty_write,
+ .write_iter = hung_up_tty_write,
.poll = hung_up_tty_poll,
.unlocked_ioctl = hung_up_tty_ioctl,
.compat_ioctl = hung_up_tty_compat_ioctl,
@@ -606,9 +604,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
/* This breaks for file handles being sent over AF_UNIX sockets ? */
list_for_each_entry(priv, &tty->tty_files, list) {
filp = priv->file;
- if (filp->f_op->write == redirected_tty_write)
+ if (filp->f_op->write_iter == redirected_tty_write)
cons_filp = filp;
- if (filp->f_op->write != tty_write)
+ if (filp->f_op->write_iter != tty_write)
continue;
closecount++;
__tty_fasync(-1, filp, 0); /* can't block */
@@ -901,9 +899,9 @@ static inline ssize_t do_tty_write(
ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
struct tty_struct *tty,
struct file *file,
- const char __user *buf,
- size_t count)
+ struct iov_iter *from)
{
+ size_t count = iov_iter_count(from);
ssize_t ret, written = 0;
unsigned int chunk;
@@ -955,14 +953,20 @@ static inline ssize_t do_tty_write(
size_t size = count;
if (size > chunk)
size = chunk;
+
ret = -EFAULT;
- if (copy_from_user(tty->write_buf, buf, size))
+ if (copy_from_iter(tty->write_buf, size, from) != size)
break;
+
ret = write(tty, file, tty->write_buf, size);
if (ret <= 0)
break;
+
+ /* FIXME! Have Al check this! */
+ if (ret != size)
+ iov_iter_revert(from, size-ret);
+
written += ret;
- buf += ret;
count -= ret;
if (!count)
break;
@@ -1022,9 +1026,9 @@ void tty_write_message(struct tty_struct *tty, char *msg)
* write method will not be invoked in parallel for each device.
*/
-static ssize_t tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
{
+ struct file *file = iocb->ki_filp;
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
ssize_t ret;
@@ -1038,17 +1042,16 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
tty_err(tty, "missing write_room method\n");
ld = tty_ldisc_ref_wait(tty);
if (!ld)
- return hung_up_tty_write(file, buf, count, ppos);
+ return hung_up_tty_write(iocb, from);
if (!ld->ops->write)
ret = -EIO;
else
- ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+ ret = do_tty_write(ld->ops->write, tty, file, from);
tty_ldisc_deref(ld);
return ret;
}
-ssize_t redirected_tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *p = NULL;
@@ -1059,11 +1062,11 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
if (p) {
ssize_t res;
- res = vfs_write(p, buf, count, &p->f_pos);
+ res = vfs_iocb_iter_write(p, iocb, iter);
fput(p);
return res;
}
- return tty_write(file, buf, count, ppos);
+ return tty_write(iocb, iter);
}
/*
@@ -2295,7 +2298,7 @@ static int tioccons(struct file *file)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (file->f_op->write == redirected_tty_write) {
+ if (file->f_op->write_iter == redirected_tty_write) {
struct file *f;
spin_lock(&redirect_lock);
f = redirect;
diff --git a/drivers/tty/ttynull.c b/drivers/tty/ttynull.c
index eced70ec54e1..17f05b7eb6d3 100644
--- a/drivers/tty/ttynull.c
+++ b/drivers/tty/ttynull.c
@@ -2,13 +2,6 @@
/*
* Copyright (C) 2019 Axis Communications AB
*
- * The console is useful for userspace applications which expect a console
- * device to work without modifications even when no console is available
- * or desired.
- *
- * In order to use this driver, you should redirect the console to this
- * TTY, or boot the kernel with console=ttynull.
- *
* Based on ttyprintk.c:
* Copyright (C) 2010 Samo Pogacnik
*/
@@ -66,17 +59,6 @@ static struct console ttynull_console = {
.device = ttynull_device,
};
-void __init register_ttynull_console(void)
-{
- if (!ttynull_driver)
- return;
-
- if (add_preferred_console(ttynull_console.name, 0, NULL))
- return;
-
- register_console(&ttynull_console);
-}
-
static int __init ttynull_init(void)
{
struct tty_driver *driver;
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
index 22a56c4dce67..7990fee03fe4 100644
--- a/drivers/usb/cdns3/cdns3-imx.c
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev)
}
data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
- data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+ data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
+ sizeof(imx_cdns3_core_clks), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
if (ret)
return ret;
@@ -214,20 +218,16 @@ err:
return ret;
}
-static int cdns_imx_remove_core(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
-
- return 0;
-}
-
static int cdns_imx_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct cdns_imx *data = dev_get_drvdata(dev);
- device_for_each_child(dev, NULL, cdns_imx_remove_core);
+ pm_runtime_get_sync(dev);
+ of_platform_depopulate(dev);
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 9e12152ea46b..8b7bc10b6e8b 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
- if (!misc_pdev || !platform_get_drvdata(misc_pdev))
+ if (!misc_pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(misc_pdev)) {
+ put_device(&misc_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
data->dev = &misc_pdev->dev;
/*
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f52f1bc0559f..781905745812 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1895,6 +1895,10 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x04d8, 0xfd08),
.driver_info = IGNORE_DEVICE,
},
+
+ { USB_DEVICE(0x04d8, 0xf58b),
+ .driver_info = IGNORE_DEVICE,
+ },
#endif
/*Samsung phone in firmware update mode */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 02d0cfd23bb2..508b1c3f8b73 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
if (!desc->resp_count || !--desc->resp_count)
goto out;
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ rv = -ENODEV;
+ goto out;
+ }
+ if (test_bit(WDM_RESETTING, &desc->flags)) {
+ rv = -EIO;
+ goto out;
+ }
+
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
- dev_err(&desc->intf->dev,
- "usb_submit_urb failed with result %d\n", rv);
+ if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+ dev_err(&desc->intf->dev,
+ "usb_submit_urb failed with result %d\n", rv);
/* make sure the next notification trigger a submit */
clear_bit(WDM_RESPONDING, &desc->flags);
@@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
+ kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 67cbd42421be..134dc2005ce9 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
#define usblp_reset(usblp)\
usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
-#define usblp_hp_channel_change_request(usblp, channel, buffer) \
- usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
+static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kzalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
+ USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
+ channel, buf, 1);
+ if (ret == 0)
+ *new_channel = buf[0];
+
+ kfree(buf);
+
+ return ret;
+}
/*
* See the description for usblp_select_alts() below for the usage
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 60886a7464c3..ad5a0f405a75 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1649,14 +1649,12 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
urb->status = status;
/*
* This function can be called in task context inside another remote
- * coverage collection section, but KCOV doesn't support that kind of
+ * coverage collection section, but kcov doesn't support that kind of
* recursion yet. Only collect coverage in softirq context for now.
*/
- if (in_serving_softirq())
- kcov_remote_start_usb((u64)urb->dev->bus->busnum);
+ kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- if (in_serving_softirq())
- kcov_remote_stop();
+ kcov_remote_stop_softirq();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2f95f08ca511..1b241f937d8f 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -285,6 +285,7 @@
/* Global USB2 PHY Vendor Control Register */
#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_DONE BIT(24)
#define DWC3_GUSB2PHYACC_BUSY BIT(23)
#define DWC3_GUSB2PHYACC_WRITE BIT(22)
#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 417e05381b5d..bdf1f98dfad8 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -754,7 +754,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
- return ret;
+ goto err_disable_clks;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 78cb4db8a6e4..ee44321fee38 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1763,6 +1763,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
list_for_each_entry_safe(r, t, &dep->started_list, list)
dwc3_gadget_move_cancelled_request(r);
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
goto out;
}
}
@@ -2083,6 +2085,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
+static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
@@ -2145,6 +2148,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
dwc->ev_buf->length;
}
+ } else {
+ __dwc3_gadget_start(dwc);
}
ret = dwc3_gadget_run_stop(dwc, is_on, false);
@@ -2319,10 +2324,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
}
dwc->gadget_driver = driver;
-
- if (pm_runtime_active(dwc->dev))
- __dwc3_gadget_start(dwc);
-
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
@@ -2348,13 +2349,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
-
- if (pm_runtime_suspended(dwc->dev))
- goto out;
-
- __dwc3_gadget_stop(dwc);
-
-out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index aa213c9815f6..f23f4c9a557e 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -7,6 +7,8 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/delay.h>
+#include <linux/time64.h>
#include <linux/ulpi/regs.h>
#include "core.h"
@@ -17,14 +19,28 @@
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
+#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
{
- unsigned int count = 1000;
+ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+ unsigned int count = 10000;
u32 reg;
+ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ if (read)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
+ usleep_range(1000, 1200);
+
while (count--) {
+ ndelay(ns);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
- if (!(reg & DWC3_GUSB2PHYACC_BUSY))
+ if (reg & DWC3_GUSB2PHYACC_DONE)
return 0;
cpu_relax();
}
@@ -38,16 +54,10 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
u32 reg;
int ret;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- ret = dwc3_ulpi_busyloop(dwc);
+ ret = dwc3_ulpi_busyloop(dwc, addr, true);
if (ret)
return ret;
@@ -61,17 +71,11 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
reg |= DWC3_GUSB2PHYACC_WRITE | val;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- return dwc3_ulpi_busyloop(dwc);
+ return dwc3_ulpi_busyloop(dwc, addr, false);
}
static const struct ulpi_ops dwc3_ulpi_ops = {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7e47e6223089..2d152571a7de 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM
depends on NET
select USB_U_ETHER
select USB_F_NCM
+ select CRC32
help
NCM is an advanced protocol for Ethernet encapsulation, allows
grouping of several ethernet frames into one USB transfer and
@@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM
depends on NET
select USB_U_ETHER
select USB_F_EEM
+ select CRC32
help
CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
and therefore can be supported by more hardware. Technically ECM and
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index c6d455f2bb92..1a556a628971 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -392,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function)
spin_lock_irqsave(&cdev->lock, flags);
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_deactivate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
if (status == 0)
cdev->deactivations++;
@@ -424,8 +427,11 @@ int usb_function_activate(struct usb_function *function)
status = -EINVAL;
else {
cdev->deactivations--;
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_activate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
}
spin_unlock_irqrestore(&cdev->lock, flags);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 56051bb97349..36ffb43f9c1a 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -221,9 +221,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
- char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
+ struct gadget_info *gi = to_gadget_info(item);
+ char *udc_name;
+ int ret;
+
+ mutex_lock(&gi->lock);
+ udc_name = gi->composite.gadget_driver.udc_name;
+ ret = sprintf(page, "%s\n", udc_name ?: "");
+ mutex_unlock(&gi->lock);
- return sprintf(page, "%s\n", udc_name ?: "");
+ return ret;
}
static int unregister_gadget(struct gadget_info *gi)
@@ -1248,9 +1255,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
cfg = container_of(c, struct config_usb_cfg, c);
- list_for_each_entry_safe(f, tmp, &c->functions, list) {
+ list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
- list_move_tail(&f->list, &cfg->func_list);
+ list_move(&f->list, &cfg->func_list);
if (f->unbind) {
dev_dbg(&gi->cdev.gadget->dev,
"unbind function '%s'/%p\n",
@@ -1536,7 +1543,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
.suspend = configfs_composite_suspend,
.resume = configfs_composite_resume,
- .max_speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
.name = "configfs-gadget",
@@ -1576,7 +1583,7 @@ static struct config_group *gadgets_make(
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
- gi->composite.max_speed = USB_SPEED_SUPER;
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 64a4112068fc..2f1eb2e81d30 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1162,6 +1162,7 @@ fail_tx_reqs:
printer_req_free(dev->in_ep, req);
}
+ usb_free_all_descriptors(f);
return ret;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 3633df6d7610..5d960b6603b6 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -444,12 +444,28 @@ struct cntrl_range_lay3 {
__le32 dRES;
} __packed;
-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
- unsigned int factor, bool is_playback)
+ enum usb_device_speed speed, bool is_playback)
{
int chmask, srate, ssize;
- u16 max_packet_size;
+ u16 max_size_bw, max_size_ep;
+ unsigned int factor;
+
+ switch (speed) {
+ case USB_SPEED_FULL:
+ max_size_ep = 1023;
+ factor = 1000;
+ break;
+
+ case USB_SPEED_HIGH:
+ max_size_ep = 1024;
+ factor = 8000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
if (is_playback) {
chmask = uac2_opts->p_chmask;
@@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- max_packet_size = num_channels(chmask) * ssize *
+ max_size_bw = num_channels(chmask) * ssize *
DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
- ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
- le16_to_cpu(ep_desc->wMaxPacketSize)));
+ ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+ max_size_ep));
+
+ return 0;
}
/* Use macro to overcome line length limitation */
@@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
/* Calculate wMaxPacketSize according to audio bandwidth */
- set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
- set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
- set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
- set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
if (EPOUT_EN(uac2_opts)) {
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 31ea76adcc0d..c019f2b0c0af 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -45,9 +45,10 @@
#define UETH__VERSION "29-May-2008"
/* Experiments show that both Linux and Windows hosts allow up to 16k
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
* blocks and still have efficient handling. */
-#define GETHER_MAX_ETH_FRAME_LEN 15412
+#define GETHER_MAX_MTU_SIZE 15412
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
struct eth_dev {
/* lock is held while accessing port_usb
@@ -786,7 +787,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
@@ -848,7 +849,7 @@ struct net_device *gether_setup_name_default(const char *netname)
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
return net;
}
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index 59be2d8417c9..e8033e5f0c18 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -200,8 +200,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail_string_ids;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1a12aab208b4..8c614bb86c66 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -90,7 +90,7 @@ config USB_BCM63XX_UDC
config USB_FSL_USB2
tristate "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC
help
Some of Freescale PowerPC and i.MX processors have a High Speed
Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index f5a7ce28aecd..a21f2224e7eb 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
-fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 0bd6b20435b8..02d8bfae58fb 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
u32 state, reg, loops;
/* Stop DMA activity */
- writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ if (ep->epn.desc_mode)
+ writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ else
+ writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Wait for it to complete */
for (loops = 0; loops < 1000; loops++) {
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
index 3e88c7670b2e..fb01ff47b64c 100644
--- a/drivers/usb/gadget/udc/bdc/Kconfig
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -17,7 +17,7 @@ if USB_BDC_UDC
comment "Platform Support"
config USB_BDC_PCI
tristate "BDC support for PCIe based platforms"
- depends on USB_PCI
+ depends on USB_PCI && BROKEN
default USB_BDC_UDC
help
Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 5b5cfeb6c14a..ea114f922ccf 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -659,8 +659,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
*
* Enables the D+ (or potentially D-) pullup. The host will start
* enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered). This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
+ * is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
@@ -1530,10 +1529,13 @@ static ssize_t soft_connect_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ ssize_t ret;
+ mutex_lock(&udc_lock);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if (sysfs_streq(buf, "connect")) {
@@ -1544,10 +1546,14 @@ static ssize_t soft_connect_store(struct device *dev,
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return n;
+ ret = n;
+out:
+ mutex_unlock(&udc_lock);
+ return ret;
}
static DEVICE_ATTR_WO(soft_connect);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ab5e978b5052..57067763b100 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2118,9 +2118,21 @@ static int dummy_hub_control(
dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
- default:
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ fallthrough;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
+ break;
+ default:
+ /* Disallow INDICATOR and C_OVER_CURRENT */
+ goto error;
}
break;
case GetHubDescriptor:
@@ -2258,17 +2270,20 @@ static int dummy_hub_control(
}
fallthrough;
case USB_PORT_FEAT_RESET:
+ if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+ break;
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
- dum_hcd->port_status = 0;
dum_hcd->port_status =
(USB_SS_PORT_STAT_POWER |
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_RESET);
- } else
+ } else {
dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
+ dum_hcd->port_status |= USB_PORT_STAT_RESET;
+ }
/*
* We want to reset device status. All but the
* Self powered feature
@@ -2280,19 +2295,19 @@ static int dummy_hub_control(
* interval? Is it still 50msec as for HS?
*/
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
- fallthrough;
- default:
- if (hcd->speed == HCD_USB3) {
- if ((dum_hcd->port_status &
- USB_SS_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
- } else
- if ((dum_hcd->port_status &
- USB_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
set_link_state(dum_hcd);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3, and ignored for USB-2 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ break;
+ default:
+ /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
+ goto error;
}
break;
case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
deleted file mode 100644
index 5a321992decc..000000000000
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2009
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Description:
- * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
- * driver to function correctly on these systems.
- */
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fsl_devices.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "fsl_usb2_udc.h"
-
-static struct clk *mxc_ahb_clk;
-static struct clk *mxc_per_clk;
-static struct clk *mxc_ipg_clk;
-
-/* workaround ENGcm09152 for i.MX35 */
-#define MX35_USBPHYCTRL_OFFSET 0x600
-#define USBPHYCTRL_OTGBASE_OFFSET 0x8
-#define USBPHYCTRL_EVDO (1 << 23)
-
-int fsl_udc_clk_init(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata;
- unsigned long freq;
- int ret;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(mxc_ipg_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ipg\") failed\n");
- return PTR_ERR(mxc_ipg_clk);
- }
-
- mxc_ahb_clk = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(mxc_ahb_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ahb\") failed\n");
- return PTR_ERR(mxc_ahb_clk);
- }
-
- mxc_per_clk = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(mxc_per_clk)) {
- dev_err(&pdev->dev, "clk_get(\"per\") failed\n");
- return PTR_ERR(mxc_per_clk);
- }
-
- clk_prepare_enable(mxc_ipg_clk);
- clk_prepare_enable(mxc_ahb_clk);
- clk_prepare_enable(mxc_per_clk);
-
- /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
- if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
- freq = clk_get_rate(mxc_per_clk);
- if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
- (freq < 59999000 || freq > 60001000)) {
- dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
- ret = -EINVAL;
- goto eclkrate;
- }
- }
-
- return 0;
-
-eclkrate:
- clk_disable_unprepare(mxc_ipg_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- return ret;
-}
-
-int fsl_udc_clk_finalize(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret = 0;
-
- /* workaround ENGcm09152 for i.MX35 */
- if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
- unsigned int v;
- struct resource *res = platform_get_resource
- (pdev, IORESOURCE_MEM, 0);
- void __iomem *phy_regs = ioremap(res->start +
- MX35_USBPHYCTRL_OFFSET, 512);
- if (!phy_regs) {
- dev_err(&pdev->dev, "ioremap for phy address fails\n");
- ret = -EINVAL;
- goto ioremap_err;
- }
-
- v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
- writel(v | USBPHYCTRL_EVDO,
- phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-
- iounmap(phy_regs);
- }
-
-
-ioremap_err:
- /* ULPI transceivers don't need usbpll */
- if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- }
-
- return ret;
-}
-
-void fsl_udc_clk_release(void)
-{
- if (mxc_per_clk)
- clk_disable_unprepare(mxc_per_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_ipg_clk);
-}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index e358ae17d51e..1926b328b6aa 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 hcc_params;
+ int rc;
hcd->uses_new_polling = 1;
@@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
down_write(&ehci_cf_port_reset_rwsem);
ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+ /* Wait until HC become operational */
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
+ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
up_write(&ehci_cf_port_reset_rwsem);
+
+ if (rc) {
+ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+ return rc;
+ }
+
ehci->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 087402aec5cb..9f9ab5ccea88 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
unlink_empty_async_suspended(ehci);
+ /* Some Synopsys controllers mistakenly leave IAA turned on */
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
/* Any IAA cycle that started before the suspend is now invalid */
end_iaa_cycle(ehci);
ehci_handle_start_intr_unlinks(ehci);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5677b81c0915..cf0c93a90200 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2931,6 +2931,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
+ /* make sure TRB is fully written before giving it to the controller */
+ wmb();
trb->field[3] = cpu_to_le32(field4);
trace_xhci_queue_trb(ring, trb);
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 934be1686352..50bb91b6a4b8 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
enable);
if (err < 0)
break;
+
+ /*
+ * wait 500us for LFPS detector to be disabled before
+ * sending ACK
+ */
+ if (!enable)
+ usleep_range(500, 1000);
}
if (err < 0) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 91ab81c3fc79..e86940571b4c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4770,19 +4770,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+ else
+ timeout_ns = udev->u1_params.sel;
+
/* Prevent U1 if service interval is shorter than U1 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
- else
- timeout_ns = udev->u1_params.sel;
-
/* The U1 timeout is encoded in 1us intervals.
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
*/
@@ -4834,19 +4834,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+ else
+ timeout_ns = udev->u2_params.sel;
+
/* Prevent U2 if service interval is shorter than U2 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
- else
- timeout_ns = udev->u2_params.sel;
-
/* The U2 timeout is encoded in 256us intervals */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
/* If the necessary timeout value is bigger than what we can set in the
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 73ebfa6e9715..c640f98d20c5 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -496,6 +496,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
+ /* make sure URB is idle after timeout or (spurious) CMD_ACK */
+ usb_kill_urb(dev->cntl_urb);
+
mutex_unlock(&dev->io_mutex);
if (retval < 0) {
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index f1201d4de297..e8f06b41a503 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -532,23 +532,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
struct device *dev = &port->dev;
int i;
int status;
- u8 rxcmd = IUU_UART_RX;
+ u8 *rxcmd;
struct iuu_private *priv = usb_get_serial_port_data(port);
if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
return -EIO;
+ rxcmd = kmalloc(1, GFP_KERNEL);
+ if (!rxcmd)
+ return -ENOMEM;
+
+ rxcmd[0] = IUU_UART_RX;
+
for (i = 0; i < 2; i++) {
- status = bulk_immediate(port, &rxcmd, 1);
+ status = bulk_immediate(port, rxcmd, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
- return status;
+ goto out_free;
}
status = read_immediate(port, &priv->len, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
if (priv->len > 0) {
@@ -556,12 +562,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
status = read_immediate(port, priv->buf, priv->len);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
}
}
dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
iuu_led(port, 0, 0xF000, 0, 0xFF);
+
+out_free:
+ kfree(rxcmd);
+
return status;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2c21e34235bb..3fe959104311 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1117,6 +1117,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
@@ -2057,6 +2059,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 870e9cf3d5dc..f9677a5ec31b 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -91,6 +91,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
US_FL_BROKEN_FUA),
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
"PNY",
"Pro Elite SSD",
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index 187690fd1a5b..60d375e9c3c7 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
to enable support for VirtualLink devices with NVIDIA GPUs.
To compile this driver as a module, choose M here: the
- module will be called typec_displayport.
+ module will be called typec_nvidia.
endmenu
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebfd3113a9a8..8f77669f9cf4 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -766,6 +766,7 @@ int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmod
return ret;
sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
return 0;
}
@@ -923,6 +924,7 @@ int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
return ret;
sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE);
return 0;
}
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index cf37a59ce130..46a25b8db72e 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -207,10 +207,21 @@ static int
pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
u8 msg[2] = { };
+ int ret;
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+ /* Configure HPD first if HPD,IRQ comes together */
+ if (!IOM_PORT_HPD_ASSERTED(port->iom_status) &&
+ dp->status & DP_STATUS_IRQ_HPD &&
+ dp->status & DP_STATUS_HPD_STATE) {
+ msg[1] = PMC_USB_DP_HPD_LVL;
+ ret = pmc_usb_command(port, msg, sizeof(msg));
+ if (ret)
+ return ret;
+ }
+
if (dp->status & DP_STATUS_IRQ_HPD)
msg[1] = PMC_USB_DP_HPD_IRQ;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 66cde5e5f796..3209b5ddd30c 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue);
+ if (wValue >= 32)
+ goto error;
vhci_hcd->port_status[rhport] &= ~(1 << wValue);
break;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 531a00d703cd..df82b124170e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -381,7 +381,8 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net,
}
}
-static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
+static void vhost_zerocopy_callback(struct sk_buff *skb,
+ struct ubuf_info *ubuf, bool success)
{
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
@@ -827,14 +828,15 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
msg.msg_flags &= ~MSG_MORE;
}
- /* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
- vhost_discard_vq_desc(vq, 1);
- vhost_net_enable_vq(net, vq);
- break;
- }
- if (err != len)
+ if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ pr_debug("Fail to send packet: err %d", err);
+ } else if (unlikely(err != len))
pr_debug("Truncated TX packet: len %d != %zd\n",
err, len);
done:
@@ -863,6 +865,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *ubufs;
+ struct ubuf_info *ubuf;
bool zcopy_used;
int sent_pkts = 0;
@@ -895,14 +898,13 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
- struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
-
vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
ubuf->ctx = nvq->ubufs;
ubuf->desc = nvq->upend_idx;
+ ubuf->flags = SKBFL_ZEROCOPY_FRAG;
refcount_set(&ubuf->refcnt, 1);
msg.msg_control = &ctl;
ctl.type = TUN_MSG_UBUF;
@@ -923,19 +925,21 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
msg.msg_flags &= ~MSG_MORE;
}
- /* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
- vhost_net_ubuf_put(ubufs);
+ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+ vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}
- vhost_discard_vq_desc(vq, 1);
- vhost_net_enable_vq(net, vq);
- break;
- }
- if (err != len)
+ if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ pr_debug("Fail to send packet: err %d", err);
+ } else if (unlikely(err != len))
pr_debug("Truncated TX packet: "
" len %d != %zd\n", err, len);
if (!zcopy_used)
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a483cec31d5c..5e78fb719602 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -30,7 +30,12 @@
#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
- VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+ VHOST_VSOCK_FEATURES = VHOST_FEATURES |
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+};
+
+enum {
+ VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
};
/* Used to track all the vhost_vsock instances on the system. */
@@ -94,6 +99,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
@@ -449,6 +457,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&vsock->dev, vq);
do {
u32 len;
@@ -766,8 +777,12 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
mutex_lock(&vsock->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&vsock->dev)) {
- mutex_unlock(&vsock->dev.mutex);
- return -EFAULT;
+ goto err;
+ }
+
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+ if (vhost_init_device_iotlb(&vsock->dev, true))
+ goto err;
}
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
@@ -778,6 +793,10 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
}
mutex_unlock(&vsock->dev.mutex);
return 0;
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
}
static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
@@ -811,6 +830,18 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
return vhost_vsock_set_features(vsock, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VSOCK_BACKEND_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&vsock->dev, features);
+ return 0;
default:
mutex_lock(&vsock->dev.mutex);
r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
@@ -823,6 +854,34 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
}
}
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
static const struct file_operations vhost_vsock_fops = {
.owner = THIS_MODULE,
.open = vhost_vsock_dev_open,
@@ -830,6 +889,9 @@ static const struct file_operations vhost_vsock_fops = {
.llseek = noop_llseek,
.unlocked_ioctl = vhost_vsock_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .read_iter = vhost_vsock_chr_read_iter,
+ .write_iter = vhost_vsock_chr_write_iter,
+ .poll = vhost_vsock_chr_poll,
};
static struct miscdevice vhost_vsock_misc = {
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index a8030332a191..e850f79351cb 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -2060,16 +2060,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
.irq_ack = ack_dynirq,
};
-int xen_set_callback_via(uint64_t via)
-{
- struct xen_hvm_param a;
- a.domid = DOMID_SELF;
- a.index = HVM_PARAM_CALLBACK_IRQ;
- a.value = via;
- return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
-}
-EXPORT_SYMBOL_GPL(xen_set_callback_via);
-
#ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event
* channel notifications because we can receive vector callbacks on any
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index dd911e1ff782..18f0ed8b1f93 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -132,6 +132,13 @@ static int platform_pci_probe(struct pci_dev *pdev,
dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
goto out;
}
+ /*
+ * It doesn't strictly *have* to run on CPU0 but it sure
+ * as hell better process the event channel ports delivered
+ * to CPU0.
+ */
+ irq_set_affinity(pdev->irq, cpumask_of(0));
+
callback_via = get_callback_via(pdev);
ret = xen_set_callback_via(callback_via);
if (ret) {
@@ -149,7 +156,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
ret = gnttab_init();
if (ret)
goto grant_out;
- xenbus_probe(NULL);
return 0;
grant_out:
gnttab_free_auto_xlat_frames();
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index b0c73c58f987..720a7b7abd46 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -717,14 +717,15 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
return 0;
}
-static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
+static long privcmd_ioctl_mmap_resource(struct file *file,
+ struct privcmd_mmap_resource __user *udata)
{
struct privcmd_data *data = file->private_data;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct privcmd_mmap_resource kdata;
xen_pfn_t *pfns = NULL;
- struct xen_mem_acquire_resource xdata;
+ struct xen_mem_acquire_resource xdata = { };
int rc;
if (copy_from_user(&kdata, udata, sizeof(kdata)))
@@ -734,6 +735,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
return -EPERM;
+ /* Both fields must be set or unset */
+ if (!!kdata.addr != !!kdata.num)
+ return -EINVAL;
+
+ xdata.domid = kdata.dom;
+ xdata.type = kdata.type;
+ xdata.id = kdata.id;
+
+ if (!kdata.addr && !kdata.num) {
+ /* Query the size of the resource. */
+ rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
+ if (rc)
+ return rc;
+ return __put_user(xdata.nr_frames, &udata->num);
+ }
+
mmap_write_lock(mm);
vma = find_vma(mm, kdata.addr);
@@ -768,10 +785,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
} else
vma->vm_private_data = PRIV_VMA_LOCKED;
- memset(&xdata, 0, sizeof(xdata));
- xdata.domid = kdata.dom;
- xdata.type = kdata.type;
- xdata.id = kdata.id;
xdata.frame = kdata.idx;
xdata.nr_frames = kdata.num;
set_xen_guest_handle(xdata.frame_list, pfns);
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 2a93b7c9c159..dc1537335414 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -115,6 +115,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename);
int xenbus_probe_devices(struct xen_bus_type *bus);
+void xenbus_probe(void);
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index eb5151fc8efa..e5fda0256feb 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex);
static int xenbus_irq;
static struct task_struct *xenbus_task;
-static DECLARE_WORK(probe_work, xenbus_probe);
-
-
static irqreturn_t wake_waiting(int irq, void *unused)
{
- if (unlikely(xenstored_ready == 0)) {
- xenstored_ready = 1;
- schedule_work(&probe_work);
- }
-
wake_up(&xb_waitq);
return IRQ_HANDLED;
}
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 44634d970a5c..18ffd0551b54 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -683,29 +683,107 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
-void xenbus_probe(struct work_struct *unused)
+void xenbus_probe(void)
{
xenstored_ready = 1;
+ /*
+ * In the HVM case, xenbus_init() deferred its call to
+ * xs_init() in case callbacks were not operational yet.
+ * So do it now.
+ */
+ if (xen_store_domain_type == XS_HVM)
+ xs_init();
+
/* Notify others that xenstore is up */
blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
}
-EXPORT_SYMBOL_GPL(xenbus_probe);
-static int __init xenbus_probe_initcall(void)
+/*
+ * Returns true when XenStore init must be deferred in order to
+ * allow the PCI platform device to be initialised, before we
+ * can actually have event channel interrupts working.
+ */
+static bool xs_hvm_defer_init_for_callback(void)
{
- if (!xen_domain())
- return -ENODEV;
+#ifdef CONFIG_XEN_PVHVM
+ return xen_store_domain_type == XS_HVM &&
+ !xen_have_vector_callback;
+#else
+ return false;
+#endif
+}
- if (xen_initial_domain() || xen_hvm_domain())
- return 0;
+static int xenbus_probe_thread(void *unused)
+{
+ DEFINE_WAIT(w);
- xenbus_probe(NULL);
+ /*
+ * We actually just want to wait for *any* trigger of xb_waitq,
+ * and run xenbus_probe() the moment it occurs.
+ */
+ prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&xb_waitq, &w);
+
+ DPRINTK("probing");
+ xenbus_probe();
return 0;
}
+static int __init xenbus_probe_initcall(void)
+{
+ /*
+ * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
+ * need to wait for the platform PCI device to come up.
+ */
+ if (xen_store_domain_type == XS_PV ||
+ (xen_store_domain_type == XS_HVM &&
+ !xs_hvm_defer_init_for_callback()))
+ xenbus_probe();
+
+ /*
+ * For XS_LOCAL, spawn a thread which will wait for xenstored
+ * or a xenstore-stubdom to be started, then probe. It will be
+ * triggered when communication starts happening, by waiting
+ * on xb_waitq.
+ */
+ if (xen_store_domain_type == XS_LOCAL) {
+ struct task_struct *probe_task;
+
+ probe_task = kthread_run(xenbus_probe_thread, NULL,
+ "xenbus_probe");
+ if (IS_ERR(probe_task))
+ return PTR_ERR(probe_task);
+ }
+ return 0;
+}
device_initcall(xenbus_probe_initcall);
+int xen_set_callback_via(uint64_t via)
+{
+ struct xen_hvm_param a;
+ int ret;
+
+ a.domid = DOMID_SELF;
+ a.index = HVM_PARAM_CALLBACK_IRQ;
+ a.value = via;
+
+ ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
+ if (ret)
+ return ret;
+
+ /*
+ * If xenbus_probe_initcall() deferred the xenbus_probe()
+ * due to the callback not functioning yet, we can do it now.
+ */
+ if (!xenstored_ready && xs_hvm_defer_init_for_callback())
+ xenbus_probe();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xen_set_callback_via);
+
/* Set up event channel for xenstored which is run as a local process
* (this is normally used only in dom0)
*/
@@ -818,11 +896,17 @@ static int __init xenbus_init(void)
break;
}
- /* Initialize the interface to xenstore. */
- err = xs_init();
- if (err) {
- pr_warn("Error initializing xenstore comms: %i\n", err);
- goto out_error;
+ /*
+ * HVM domains may not have a functional callback yet. In that
+ * case let xs_init() be called from xenbus_probe(), which will
+ * get invoked at an appropriate time.
+ */
+ if (xen_store_domain_type != XS_HVM) {
+ err = xs_init();
+ if (err) {
+ pr_warn("Error initializing xenstore comms: %i\n", err);
+ goto out_error;
+ }
}
if ((xen_store_domain_type != XS_LOCAL) &&