diff options
author | David S. Miller <davem@davemloft.net> | 2020-05-15 13:48:59 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-05-15 13:48:59 -0700 |
commit | da07f52d3caf6c24c6dbffb5500f379d819e04bd (patch) | |
tree | 836e51994554f1aeddeebb6cb6ee568944a467e7 /drivers | |
parent | 93d43e58683efd958a0421b932a273df74e0e008 (diff) | |
parent | f85c1598ddfe83f61d0656bd1d2025fa3b148b99 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Move the bpf verifier trace check into the new switch statement in
HEAD.
Resolve the overlapping changes in hinic, where bug fixes overlap
the addition of VF support.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
145 files changed, 1102 insertions, 639 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index b4c0152e92aa..145ec0b6f20b 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1994,23 +1994,31 @@ void acpi_ec_set_gpe_wake_mask(u8 action) acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); } -bool acpi_ec_other_gpes_active(void) -{ - return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX); -} - bool acpi_ec_dispatch_gpe(void) { u32 ret; if (!first_ec) + return acpi_any_gpe_status_set(U32_MAX); + + /* + * Report wakeup if the status bit is set for any enabled GPE other + * than the EC one. + */ + if (acpi_any_gpe_status_set(first_ec->gpe)) + return true; + + if (ec_no_wakeup) return false; + /* + * Dispatch the EC GPE in-band, but do not report wakeup in any case + * to allow the caller to process events properly after that. + */ ret = acpi_dispatch_gpe(NULL, first_ec->gpe); - if (ret == ACPI_INTERRUPT_HANDLED) { + if (ret == ACPI_INTERRUPT_HANDLED) pm_pr_dbg("EC GPE dispatched\n"); - return true; - } + return false; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index e387517d3354..43411a7457cd 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -202,7 +202,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); #ifdef CONFIG_PM_SLEEP void acpi_ec_flush_work(void); -bool acpi_ec_other_gpes_active(void); bool acpi_ec_dispatch_gpe(void); #endif diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 4edc8a3ce40f..3850704570c0 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -1013,21 +1013,11 @@ static bool acpi_s2idle_wake(void) if (acpi_check_wakeup_handlers()) return true; - /* - * If the status bit is set for any enabled GPE other than the - * EC one, the wakeup is regarded as a genuine one. - */ - if (acpi_ec_other_gpes_active()) + /* Check non-EC GPE wakeups and dispatch the EC GPE. */ + if (acpi_ec_dispatch_gpe()) return true; /* - * If the EC GPE status bit has not been set, the wakeup is - * regarded as a spurious one. - */ - if (!acpi_ec_dispatch_gpe()) - return false; - - /* * Cancel the wakeup and process all pending events in case * there are any wakeup ones in there. * diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index fe1523664816..8558b629880b 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -645,6 +645,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name) dev->dev.release = amba_device_release; dev->dev.bus = &amba_bustype; dev->dev.dma_mask = &dev->dev.coherent_dma_mask; + dev->dev.dma_parms = &dev->dma_parms; dev->res.name = dev_name(&dev->dev); } diff --git a/drivers/base/component.c b/drivers/base/component.c index e97704104784..dcfbe7251dc4 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -256,7 +256,8 @@ static int try_to_bring_up_master(struct master *master, ret = master->ops->bind(master->dev); if (ret < 0) { devres_release_group(master->dev, NULL); - dev_info(master->dev, "master bind failed: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_info(master->dev, "master bind failed: %d\n", ret); return ret; } @@ -611,8 +612,9 @@ static int component_bind(struct component *component, struct master *master, devres_release_group(component->dev, NULL); devres_release_group(master->dev, NULL); - dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", - dev_name(component->dev), component->ops, ret); + if (ret != -EPROBE_DEFER) + dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", + dev_name(component->dev), component->ops, ret); } return ret; diff --git a/drivers/base/core.c b/drivers/base/core.c index 139cdf7e7327..073045cb214e 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2370,6 +2370,11 @@ u32 fw_devlink_get_flags(void) return fw_devlink_flags; } +static bool fw_devlink_is_permissive(void) +{ + return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY; +} + /** * device_add - add device to device hierarchy. * @dev: device. @@ -2524,7 +2529,7 @@ int device_add(struct device *dev) if (fw_devlink_flags && is_fwnode_dev && fwnode_has_op(dev->fwnode, add_links)) { fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev); - if (fw_ret == -ENODEV) + if (fw_ret == -ENODEV && !fw_devlink_is_permissive()) device_link_wait_for_mandatory_supplier(dev); else if (fw_ret) device_link_wait_for_optional_supplier(dev); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 06ec0e851fa1..94037be7f5d7 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -224,17 +224,9 @@ static int deferred_devs_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(deferred_devs); -#ifdef CONFIG_MODULES -/* - * In the case of modules, set the default probe timeout to - * 30 seconds to give userland some time to load needed modules - */ -int driver_deferred_probe_timeout = 30; -#else -/* In the case of !modules, no probe timeout needed */ -int driver_deferred_probe_timeout = -1; -#endif +int driver_deferred_probe_timeout; EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout); +static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue); static int __init deferred_probe_timeout_setup(char *str) { @@ -266,8 +258,8 @@ int driver_deferred_probe_check_state(struct device *dev) return -ENODEV; } - if (!driver_deferred_probe_timeout) { - dev_WARN(dev, "deferred probe timeout, ignoring dependency"); + if (!driver_deferred_probe_timeout && initcalls_done) { + dev_warn(dev, "deferred probe timeout, ignoring dependency"); return -ETIMEDOUT; } @@ -284,6 +276,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work) list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe) dev_info(private->device, "deferred probe pending"); + wake_up(&probe_timeout_waitqueue); } static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); @@ -658,6 +651,9 @@ int driver_probe_done(void) */ void wait_for_device_probe(void) { + /* wait for probe timeout */ + wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout); + /* wait for the deferred probe workqueue to finish */ flush_work(&deferred_probe_work); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 5255550b7c34..b27d0f6c18c9 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -380,6 +380,8 @@ struct platform_object { */ static void setup_pdev_dma_masks(struct platform_device *pdev) { + pdev->dev.dma_parms = &pdev->dma_parms; + if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); if (!pdev->dev.dma_mask) { diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index b38359c480ea..eb2ab058a01d 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -812,10 +812,9 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, if (!mhi_cntrl) return -EINVAL; - if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put) - return -EINVAL; - - if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status) + if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || + !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || + !mhi_cntrl->write_reg) return -EINVAL; ret = parse_config(mhi_cntrl, config); diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 5deadfaa053a..095d95bc0e37 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -11,9 +11,6 @@ extern struct bus_type mhi_bus_type; -/* MHI MMIO register mapping */ -#define PCI_INVALID_READ(val) (val == U32_MAX) - #define MHIREGLEN (0x0) #define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) #define MHIREGLEN_MHIREGLEN_SHIFT (0) diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index eb4256b81406..97e06cc586e4 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -18,16 +18,7 @@ int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 *out) { - u32 tmp = readl(base + offset); - - /* If there is any unexpected value, query the link status */ - if (PCI_INVALID_READ(tmp) && - mhi_cntrl->link_status(mhi_cntrl)) - return -EIO; - - *out = tmp; - - return 0; + return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); } int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, @@ -49,7 +40,7 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 val) { - writel(val, base + offset); + mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); } void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, @@ -294,7 +285,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl) !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) continue; mhi_dev = mhi_alloc_device(mhi_cntrl); - if (!mhi_dev) + if (IS_ERR(mhi_dev)) return; mhi_dev->dev_type = MHI_DEVICE_XFER; @@ -336,7 +327,8 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl) /* Channel name is same for both UL and DL */ mhi_dev->chan_name = mhi_chan->name; - dev_set_name(&mhi_dev->dev, "%04x_%s", mhi_chan->chan, + dev_set_name(&mhi_dev->dev, "%s_%s", + dev_name(mhi_cntrl->cntrl_dev), mhi_dev->chan_name); /* Init wakeup source if available */ diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 52690cb5c89c..dc83d65f7784 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -902,7 +902,11 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), msecs_to_jiffies(mhi_cntrl->timeout_ms)); - return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO; + ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; + if (ret) + mhi_power_down(mhi_cntrl, false); + + return ret; } EXPORT_SYMBOL(mhi_sync_power_up); diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index 31f9f0e369b9..55b031d2c989 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -16,7 +16,7 @@ int efi_tpm_final_log_size; EXPORT_SYMBOL(efi_tpm_final_log_size); -static int tpm2_calc_event_log_size(void *data, int count, void *size_info) +static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info) { struct tcg_pcr_event2_head *header; int event_size, size = 0; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 5638b4e5355f..4269ea9a817e 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -531,7 +531,7 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset, { struct pca953x_chip *chip = gpiochip_get_data(gc); - switch (config) { + switch (pinconf_to_config_param(config)) { case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_BIAS_PULL_DOWN: return pca953x_gpio_set_pull_up_down(chip, offset, config); diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index acb99eff9939..86568154cdb3 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -368,6 +368,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d) struct tegra_gpio_info *tgi = bank->tgi; unsigned int gpio = d->hwirq; + tegra_gpio_irq_mask(d); gpiochip_unlock_as_irq(&tgi->gc, gpio); } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 40f2d7f69be2..182136d98b97 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1158,8 +1158,19 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc, struct gpioline_info *info) { struct gpio_chip *gc = desc->gdev->chip; + bool ok_for_pinctrl; unsigned long flags; + /* + * This function takes a mutex so we must check this before taking + * the spinlock. + * + * FIXME: find a non-racy way to retrieve this information. Maybe a + * lock common to both frameworks? + */ + ok_for_pinctrl = + pinctrl_gpio_can_use_line(gc->base + info->line_offset); + spin_lock_irqsave(&gpio_lock, flags); if (desc->name) { @@ -1186,7 +1197,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc, test_bit(FLAG_USED_AS_IRQ, &desc->flags) || test_bit(FLAG_EXPORT, &desc->flags) || test_bit(FLAG_SYSFS, &desc->flags) || - !pinctrl_gpio_can_use_line(gc->base + info->line_offset)) + !ok_for_pinctrl) info->flags |= GPIOLINE_FLAG_KERNEL; if (test_bit(FLAG_IS_OUT, &desc->flags)) info->flags |= GPIOLINE_FLAG_IS_OUT; @@ -1227,6 +1238,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) void __user *ip = (void __user *)arg; struct gpio_desc *desc; __u32 offset; + int hwgpio; /* We fail any subsequent ioctl():s when the chip is gone */ if (!gc) @@ -1259,13 +1271,19 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (IS_ERR(desc)) return PTR_ERR(desc); + hwgpio = gpio_chip_hwgpio(desc); + + if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL && + test_bit(hwgpio, priv->watched_lines)) + return -EBUSY; + gpio_desc_to_lineinfo(desc, &lineinfo); if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) return -EFAULT; if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) - set_bit(gpio_chip_hwgpio(desc), priv->watched_lines); + set_bit(hwgpio, priv->watched_lines); return 0; } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) { @@ -1280,7 +1298,12 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (IS_ERR(desc)) return PTR_ERR(desc); - clear_bit(gpio_chip_hwgpio(desc), priv->watched_lines); + hwgpio = gpio_chip_hwgpio(desc); + + if (!test_bit(hwgpio, priv->watched_lines)) + return -EBUSY; + + clear_bit(hwgpio, priv->watched_lines); return 0; } return -EINVAL; @@ -5289,8 +5312,9 @@ static int __init gpiolib_dev_init(void) gpiolib_initialized = true; gpiochip_setup_devs(); - if (IS_ENABLED(CONFIG_OF_DYNAMIC)) - WARN_ON(of_reconfig_notifier_register(&gpio_of_notifier)); +#if IS_ENABLED(CONFIG_OF_DYNAMIC) && IS_ENABLED(CONFIG_OF_GPIO) + WARN_ON(of_reconfig_notifier_register(&gpio_of_notifier)); +#endif /* CONFIG_OF_DYNAMIC && CONFIG_OF_GPIO */ return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2992a49ad4a5..8ac1581a6b53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -945,6 +945,7 @@ struct amdgpu_device { /* s3/s4 mask */ bool in_suspend; + bool in_hibernate; /* record last mm index being written through WREG32*/ unsigned long last_mm_index; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 9dff792c9290..6a5b91d23fd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1343,7 +1343,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } /* Free the BO*/ - amdgpu_bo_unref(&mem->bo); + drm_gem_object_put_unlocked(&mem->bo->tbo.base); mutex_destroy(&mem->lock); kfree(mem); @@ -1688,7 +1688,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; - (*mem)->bo = amdgpu_bo_ref(bo); + drm_gem_object_get(&bo->tbo.base); + (*mem)->bo = bo; (*mem)->va = va; (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f84f9e35a73b..affde2de2a0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3372,15 +3372,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) } } - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); - amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - - amdgpu_amdkfd_suspend(adev, !fbcon); - amdgpu_ras_suspend(adev); r = amdgpu_device_ip_suspend_phase1(adev); + amdgpu_amdkfd_suspend(adev, !fbcon); + /* evict vram memory */ amdgpu_bo_evict_vram(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 466bfe541e45..a735d79a717b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1181,7 +1181,9 @@ static int amdgpu_pmops_freeze(struct device *dev) struct amdgpu_device *adev = drm_dev->dev_private; int r; + adev->in_hibernate = true; r = amdgpu_device_suspend(drm_dev, true); + adev->in_hibernate = false; if (r) return r; return amdgpu_asic_reset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 9ae7b61f696a..25ddb482466a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, u32 cpp; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED | - AMDGPU_GEM_CREATE_CPU_GTT_USWC; + AMDGPU_GEM_CREATE_VRAM_CLEARED; info = drm_get_format_info(adev->ddev, mode_cmd); cpp = info->cpp[0]; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f92c158d89a1..0e0daf0021b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4273,7 +4273,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, /* === CGCG /CGLS for GFX 3D Only === */ gfx_v10_0_update_3d_clock_gating(adev, enable); /* === MGCG + MGLS === */ - /* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */ + gfx_v10_0_update_medium_grain_clock_gating(adev, enable); } if (adev->cg_flags & @@ -4353,11 +4353,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: - if (!enable) { - amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } else - amdgpu_gfx_off_ctrl(adev, true); + amdgpu_gfx_off_ctrl(adev, enable); break; default: break; @@ -4918,6 +4914,19 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask); } +static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring, + unsigned vmid) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t value = 0; + + value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + WREG32_SOC15(GC, 0, mmSQ_CMD, value); +} + static void gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -5309,6 +5318,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v10_0_ring_soft_recovery, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 0c390485bc10..d2d9dce68c2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1236,6 +1236,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */ { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 }, + /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, { 0, 0, 0, 0, 0 }, }; @@ -5025,10 +5027,9 @@ static int gfx_v9_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_RAVEN: case CHIP_RENOIR: - if (!enable) { + if (!enable) amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); @@ -5052,12 +5053,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, amdgpu_gfx_off_ctrl(adev, true); break; case CHIP_VEGA12: - if (!enable) { - amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } else { - amdgpu_gfx_off_ctrl(adev, true); - } + amdgpu_gfx_off_ctrl(adev, enable); break; default: break; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 94c29b74c086..28e651b173ab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -441,7 +441,7 @@ static void dm_vupdate_high_irq(void *interrupt_params) /** * dm_crtc_high_irq() - Handles CRTC interrupt - * @interrupt_params: ignored + * @interrupt_params: used for determining the CRTC instance * * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK * event handler. @@ -455,70 +455,6 @@ static void dm_crtc_high_irq(void *interrupt_params) unsigned long flags; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); - - if (acrtc) { - acrtc_state = to_dm_crtc_state(acrtc->base.state); - - DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n", - acrtc->crtc_id, - amdgpu_dm_vrr_active(acrtc_state)); - - /* Core vblank handling at start of front-porch is only possible - * in non-vrr mode, as only there vblank timestamping will give - * valid results while done in front-porch. Otherwise defer it - * to dm_vupdate_high_irq after end of front-porch. - */ - if (!amdgpu_dm_vrr_active(acrtc_state)) - drm_crtc_handle_vblank(&acrtc->base); - - /* Following stuff must happen at start of vblank, for crc - * computation and below-the-range btr support in vrr mode. - */ - amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); - - if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI && - acrtc_state->vrr_params.supported && - acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { - spin_lock_irqsave(&adev->ddev->event_lock, flags); - mod_freesync_handle_v_update( - adev->dm.freesync_module, - acrtc_state->stream, - &acrtc_state->vrr_params); - - dc_stream_adjust_vmin_vmax( - adev->dm.dc, - acrtc_state->stream, - &acrtc_state->vrr_params.adjust); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - } - } -} - -#if defined(CONFIG_DRM_AMD_DC_DCN) -/** - * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs - * @interrupt params - interrupt parameters - * - * Notify DRM's vblank event handler at VSTARTUP - * - * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which: - * * We are close enough to VUPDATE - the point of no return for hw - * * We are in the fixed portion of variable front porch when vrr is enabled - * * We are before VUPDATE, where double-buffered vrr registers are swapped - * - * It is therefore the correct place to signal vblank, send user flip events, - * and update VRR. - */ -static void dm_dcn_crtc_high_irq(void *interrupt_params) -{ - struct common_irq_params *irq_params = interrupt_params; - struct amdgpu_device *adev = irq_params->adev; - struct amdgpu_crtc *acrtc; - struct dm_crtc_state *acrtc_state; - unsigned long flags; - - acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); - if (!acrtc) return; @@ -528,22 +464,35 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) amdgpu_dm_vrr_active(acrtc_state), acrtc_state->active_planes); + /** + * Core vblank handling at start of front-porch is only possible + * in non-vrr mode, as only there vblank timestamping will give + * valid results while done in front-porch. Otherwise defer it + * to dm_vupdate_high_irq after end of front-porch. + */ + if (!amdgpu_dm_vrr_active(acrtc_state)) + drm_crtc_handle_vblank(&acrtc->base); + + /** + * Following stuff must happen at start of vblank, for crc + * computation and below-the-range btr support in vrr mode. + */ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); - drm_crtc_handle_vblank(&acrtc->base); + + /* BTR updates need to happen before VUPDATE on Vega and above. */ + if (adev->family < AMDGPU_FAMILY_AI) + return; spin_lock_irqsave(&adev->ddev->event_lock, flags); - if (acrtc_state->vrr_params.supported && + if (acrtc_state->stream && acrtc_state->vrr_params.supported && acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { - mod_freesync_handle_v_update( - adev->dm.freesync_module, - acrtc_state->stream, - &acrtc_state->vrr_params); + mod_freesync_handle_v_update(adev->dm.freesync_module, + acrtc_state->stream, + &acrtc_state->vrr_params); - dc_stream_adjust_vmin_vmax( - adev->dm.dc, - acrtc_state->stream, - &acrtc_state->vrr_params.adjust); + dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream, + &acrtc_state->vrr_params.adjust); } /* @@ -556,7 +505,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) * avoid race conditions between flip programming and completion, * which could cause too early flip completion events. */ - if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && + if (adev->family >= AMDGPU_FAMILY_RV && + acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && acrtc_state->active_planes == 0) { if (acrtc->event) { drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); @@ -568,7 +518,6 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) spin_unlock_irqrestore(&adev->ddev->event_lock, flags); } -#endif static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) @@ -2008,17 +1957,22 @@ void amdgpu_dm_update_connector_after_detect( dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { aconnector->edid = NULL; - drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); + if (aconnector->dc_link->aux_mode) { + drm_dp_cec_unset_edid( + &aconnector->dm_dp_aux.aux); + } } else { aconnector->edid = - (struct edid *) sink->dc_edid.raw_edid; - + (struct edid *)sink->dc_edid.raw_edid; drm_connector_update_edid_property(connector, - aconnector->edid); - drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, - aconnector->edid); + aconnector->edid); + + if (aconnector->dc_link->aux_mode) + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, + aconnector->edid); } + amdgpu_dm_update_freesync_caps(connector, aconnector->edid); update_connector_ext_caps(aconnector); } else { @@ -2440,8 +2394,36 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; + amdgpu_dm_irq_register_interrupt( + adev, &int_params, dm_crtc_high_irq, c_irq_params); + } + + /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to + * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx + * to trigger at end of each vblank, regardless of state of the lock, + * matching DCE behaviour. + */ + for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; + i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; + i++) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); + + if (r) { + DRM_ERROR("Failed to add vupdate irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_dcn_crtc_high_irq, c_irq_params); + dm_vupdate_high_irq, c_irq_params); } /* Use GRPH_PFLIP interrupt */ @@ -4448,10 +4430,6 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) struct amdgpu_device *adev = crtc->dev->dev_private; int rc; - /* Do not set vupdate for DCN hardware */ - if (adev->family > AMDGPU_FAMILY_AI) - return 0; - irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; @@ -7877,6 +7855,7 @@ static int dm_update_plane_state(struct dc *dc, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; + struct amdgpu_crtc *new_acrtc; bool needs_reset; int ret = 0; @@ -7886,9 +7865,30 @@ static int dm_update_plane_state(struct dc *dc, dm_new_plane_state = to_dm_plane_state(new_plane_state); dm_old_plane_state = to_dm_plane_state(old_plane_state); - /*TODO Implement atomic check for cursor plane */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) + /*TODO Implement better atomic check for cursor plane */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if (!enable || !new_plane_crtc || + drm_atomic_plane_disabling(plane->state, new_plane_state)) + return 0; + + new_acrtc = to_amdgpu_crtc(new_plane_crtc); + + if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) || + (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) { + DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n", + new_plane_state->crtc_w, new_plane_state->crtc_h); + return -EINVAL; + } + + if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width || + new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) { + DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n", + new_plane_state->crtc_x, new_plane_state->crtc_y); + return -EINVAL; + } + return 0; + } needs_reset = should_reset_plane(state, plane, old_plane_state, new_plane_state); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 78e1c11d4ae5..dcf84a61de37 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -398,15 +398,15 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; - memset(display, 0, sizeof(*display)); - memset(link, 0, sizeof(*link)); - - display->index = aconnector->base.index; - if (config->dpms_off) { hdcp_remove_display(hdcp_work, link_index, aconnector); return; } + + memset(display, 0, sizeof(*display)); + memset(link, 0, sizeof(*link)); + + display->index = aconnector->base.index; display->state = MOD_HDCP_DISPLAY_ACTIVE; if (aconnector->dc_sink != NULL) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 8489f1e56892..47431ca6986d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -834,11 +834,10 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; - int count = 0; - struct pipe_ctx *pipe; PERF_TRACE(); for (i = 0; i < MAX_PIPES; i++) { - pipe = &context->res_ctx.pipe_ctx[i]; + int count = 0; + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->plane_state) continue; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index c3535bd3965b..e4348e3b6389 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3068,25 +3068,32 @@ validate_out: return out; } - -bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) +/* + * This must be noinline to ensure anything that deals with FP registers + * is contained within this call; previously our compiling with hard-float + * would result in fp instructions being emitted outside of the boundaries + * of the DC_FP_START/END macros, which makes sense as the compiler has no + * idea about what is wrapped and what is not + * + * This is largely just a workaround to avoid breakage introduced with 5.6, + * ideally all fp-using code should be moved into its own file, only that + * should be compiled with hard-float, and all code exported from there + * should be strictly wrapped with DC_FP_START/END + */ +static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc, + struct dc_state *context, bool fast_validate) { bool voltage_supported = false; bool full_pstate_supported = false; bool dummy_pstate_supported = false; double p_state_latency_us; - DC_FP_START(); p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = dc->debug.disable_dram_clock_change_vactive_support; if (fast_validate) { - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true); - - DC_FP_END(); - return voltage_supported; + return dcn20_validate_bandwidth_internal(dc, context, true); } // Best case, we support full UCLK switch latency @@ -3115,7 +3122,15 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, restore_dml_state: context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; + return voltage_supported; +} +bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, + bool fast_validate) +{ + bool voltage_supported = false; + DC_FP_START(); + voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate); DC_FP_END(); return voltage_supported; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index a38baa73d484..b8ec08e3b7a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -1200,7 +1200,7 @@ static void dml_rq_dlg_get_dlg_params( min_hratio_fact_l = 1.0; min_hratio_fact_c = 1.0; - if (htaps_l <= 1) + if (hratio_l <= 1) min_hratio_fact_l = 2.0; else if (htaps_l <= 6) { if ((hratio_l * 2.0) > 4.0) @@ -1216,7 +1216,7 @@ static void dml_rq_dlg_get_dlg_params( hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz; - if (htaps_c <= 1) + if (hratio_c <= 1) min_hratio_fact_c = 2.0; else if (htaps_c <= 6) { if ((hratio_c * 2.0) > 4.0) @@ -1522,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params( disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; - disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; - disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; + disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10); + disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10); // Clamp to max for now if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23)) diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index c34eba19860a..6d7bca562eec 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -108,7 +108,7 @@ #define ASSERT(expr) ASSERT_CRITICAL(expr) #else -#define ASSERT(expr) WARN_ON(!(expr)) +#define ASSERT(expr) WARN_ON_ONCE(!(expr)) #endif #define BREAK_TO_DEBUGGER() ASSERT(0) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index e4e5a53b2b4e..8e2acb4df860 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -319,12 +319,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, if (*level & profile_mode_mask) { hwmgr->saved_dpm_level = hwmgr->dpm_level; hwmgr->en_umd_pstate = true; - amdgpu_device_ip_set_clockgating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); } } else { /* exit umd pstate, restore level, enable gfx cg*/ diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index e8b27fab6aa1..e77046931e4c 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1476,7 +1476,7 @@ static int smu_disable_dpm(struct smu_context *smu) bool use_baco = !smu->is_apu && ((adev->in_gpu_reset && (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || - (adev->in_runpm && amdgpu_asic_supports_baco(adev))); + ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); ret = smu_get_smc_version(smu, NULL, &smu_version); if (ret) { @@ -1744,12 +1744,12 @@ static int smu_enable_umd_pstate(void *handle, if (*level & profile_mode_mask) { smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; smu_dpm_ctx->enable_umd_pstate = true; - amdgpu_device_ip_set_clockgating_state(smu->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); amdgpu_device_ip_set_powergating_state(smu->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(smu->adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); } } else { /* exit umd pstate, restore level, enable gfx cg*/ diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c index 7f386adcf872..910108ccaae1 100644 --- a/drivers/gpu/drm/drm_hdcp.c +++ b/drivers/gpu/drm/drm_hdcp.c @@ -241,8 +241,12 @@ static int drm_hdcp_request_srm(struct drm_device *drm_dev, ret = request_firmware_direct(&fw, (const char *)fw_name, drm_dev->dev); - if (ret < 0) + if (ret < 0) { + *revoked_ksv_cnt = 0; + *revoked_ksv_list = NULL; + ret = 0; goto exit; + } if (fw->size && fw->data) ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list, @@ -287,6 +291,8 @@ int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs, ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list, &revoked_ksv_cnt); + if (ret) + return ret; /* revoked_ksv_cnt will be zero when above function failed */ for (i = 0; i < revoked_ksv_cnt; i++) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 2e5d835a9eaa..c125ca9ab9b3 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -485,8 +485,7 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, if (!ret) goto err_llb; else if (ret > 1) { - DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); - + DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); } fbc->threshold = ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 0cc40e77bbd2..4f96c8788a2e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -368,7 +368,6 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_vma *vma; - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); if (!atomic_read(&obj->bind_count)) return; @@ -400,12 +399,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) { - struct drm_i915_gem_object *obj = vma->obj; - - assert_object_held(obj); - /* Bump the LRU to try and avoid premature eviction whilst flipping */ - i915_gem_object_bump_inactive_ggtt(obj); + i915_gem_object_bump_inactive_ggtt(vma->obj); i915_vma_unpin(vma); } diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 07cb83a0d017..ca0d4f4f3615 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -69,7 +69,13 @@ struct intel_context { #define CONTEXT_NOPREEMPT 7 u32 *lrc_reg_state; - u64 lrc_desc; + union { + struct { + u32 lrca; + u32 ccid; + }; + u64 desc; + } lrc; u32 tag; /* cookie passed to HW to track this context on submission */ /* Time on GPU as tracked by the hw. */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index b469de0dd9b6..a1aa0d3e8be1 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -333,13 +333,4 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) return intel_engine_has_preemption(engine); } -static inline bool -intel_engine_has_timeslices(const struct intel_engine_cs *engine) -{ - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) - return false; - - return intel_engine_has_semaphores(engine); -} - #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 3aa8a652c16d..883a9b7fe88d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1295,6 +1295,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7)) drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); + if (HAS_EXECLISTS(dev_priv)) { + drm_printf(m, "\tEL_STAT_HI: 0x%08x\n", + ENGINE_READ(engine, RING_EXECLIST_STATUS_HI)); + drm_printf(m, "\tEL_STAT_LO: 0x%08x\n", + ENGINE_READ(engine, RING_EXECLIST_STATUS_LO)); + } drm_printf(m, "\tRING_START: 0x%08x\n", ENGINE_READ(engine, RING_START)); drm_printf(m, "\tRING_HEAD: 0x%08x\n", diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 80cdde712842..0be674ae1cf6 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -157,6 +157,20 @@ struct intel_engine_execlists { struct i915_priolist default_priolist; /** + * @ccid: identifier for contexts submitted to this engine + */ + u32 ccid; + + /** + * @yield: CCID at the time of the last semaphore-wait interrupt. + * + * Instead of leaving a semaphore busy-spinning on an engine, we would + * like to switch to another ready context, i.e. yielding the semaphore + * timeslice. + */ + u32 yield; + + /** * @error_interrupt: CS Master EIR * * The CS generates an interrupt when it detects an error. We capture @@ -295,8 +309,7 @@ struct intel_engine_cs { u32 context_size; u32 mmio_base; - unsigned int context_tag; -#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS) + unsigned long context_tag; struct rb_node uabi_node; @@ -483,10 +496,11 @@ struct intel_engine_cs { #define I915_ENGINE_SUPPORTS_STATS BIT(1) #define I915_ENGINE_HAS_PREEMPTION BIT(2) #define I915_ENGINE_HAS_SEMAPHORES BIT(3) -#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) -#define I915_ENGINE_IS_VIRTUAL BIT(5) -#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) -#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) +#define I915_ENGINE_HAS_TIMESLICES BIT(4) +#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5) +#define I915_ENGINE_IS_VIRTUAL BIT(6) +#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7) +#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8) unsigned int flags; /* @@ -585,6 +599,15 @@ intel_engine_has_semaphores(const struct intel_engine_cs *engine) } static inline bool +intel_engine_has_timeslices(const struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return false; + + return engine->flags & I915_ENGINE_HAS_TIMESLICES; +} + +static inline bool intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine) { return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index f0e7fd95165a..0cc7dd54f4f9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -39,6 +39,15 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir) } } + if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) { + WRITE_ONCE(engine->execlists.yield, + ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); + ENGINE_TRACE(engine, "semaphore yield: %08x\n", + engine->execlists.yield); + if (del_timer(&engine->execlists.timer)) + tasklet = true; + } + if (iir & GT_CONTEXT_SWITCH_INTERRUPT) tasklet = true; @@ -228,7 +237,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) const u32 irqs = GT_CS_MASTER_ERROR_INTERRUPT | GT_RENDER_USER_INTERRUPT | - GT_CONTEXT_SWITCH_INTERRUPT; + GT_CONTEXT_SWITCH_INTERRUPT | + GT_WAIT_SEMAPHORE_INTERRUPT; struct intel_uncore *uncore = gt->uncore; const u32 dmask = irqs << 16 | irqs; const u32 smask = irqs << 16; @@ -366,7 +376,8 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt) const u32 irqs = GT_CS_MASTER_ERROR_INTERRUPT | GT_RENDER_USER_INTERRUPT | - GT_CONTEXT_SWITCH_INTERRUPT; + GT_CONTEXT_SWITCH_INTERRUPT | + GT_WAIT_SEMAPHORE_INTERRUPT; const u32 gt_interrupts[] = { irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT, irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 683014e7bc51..2dfaddb8811e 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -456,10 +456,10 @@ assert_priority_queue(const struct i915_request *prev, * engine info, SW context ID and SW counter need to form a unique number * (Context ID) per lrc. */ -static u64 +static u32 lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) { - u64 desc; + u32 desc; desc = INTEL_LEGACY_32B_CONTEXT; if (i915_vm_is_4lvl(ce->vm)) @@ -470,21 +470,7 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) if (IS_GEN(engine->i915, 8)) desc |= GEN8_CTX_L3LLC_COHERENT; - desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */ - /* - * The following 32bits are copied into the OA reports (dword 2). - * Consider updating oa_get_render_ctx_id in i915_perf.c when changing - * anything below. - */ - if (INTEL_GEN(engine->i915) >= 11) { - desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT; - /* bits 48-53 */ - - desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT; - /* bits 61-63 */ - } - - return desc; + return i915_ggtt_offset(ce->state) | desc; } static inline unsigned int dword_in_page(void *addr) @@ -1192,7 +1178,7 @@ static void reset_active(struct i915_request *rq, __execlists_update_reg_state(ce, engine, head); /* We've switched away, so this should be a no-op, but intent matters */ - ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; } static u32 intel_context_get_runtime(const struct intel_context *ce) @@ -1251,18 +1237,23 @@ __execlists_schedule_in(struct i915_request *rq) if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) execlists_check_context(ce, engine); - ce->lrc_desc &= ~GENMASK_ULL(47, 37); if (ce->tag) { /* Use a fixed tag for OA and friends */ - ce->lrc_desc |= (u64)ce->tag << 32; + GEM_BUG_ON(ce->tag <= BITS_PER_LONG); + ce->lrc.ccid = ce->tag; } else { /* We don't need a strict matching tag, just different values */ - ce->lrc_desc |= - (u64)(++engine->context_tag % NUM_CONTEXT_TAG) << - GEN11_SW_CTX_ID_SHIFT; - BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID); + unsigned int tag = ffs(engine->context_tag); + + GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG); + clear_bit(tag - 1, &engine->context_tag); + ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32); + + BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID); } + ce->lrc.ccid |= engine->execlists.ccid; + __intel_gt_pm_get(engine->gt); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); intel_engine_context_in(engine); @@ -1302,7 +1293,8 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce) static inline void __execlists_schedule_out(struct i915_request *rq, - struct intel_engine_cs * const engine) + struct intel_engine_cs * const engine, + unsigned int ccid) { struct intel_context * const ce = rq->context; @@ -1320,6 +1312,14 @@ __execlists_schedule_out(struct i915_request *rq, i915_request_completed(rq)) intel_engine_add_retire(engine, ce->timeline); + ccid >>= GEN11_SW_CTX_ID_SHIFT - 32; + ccid &= GEN12_MAX_CONTEXT_HW_ID; + if (ccid < BITS_PER_LONG) { + GEM_BUG_ON(ccid == 0); + GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag)); + set_bit(ccid - 1, &engine->context_tag); + } + intel_context_update_runtime(ce); intel_engine_context_out(engine); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); @@ -1345,15 +1345,17 @@ execlists_schedule_out(struct i915_request *rq) { struct intel_context * const ce = rq->context; struct intel_engine_cs *cur, *old; + u32 ccid; trace_i915_request_out(rq); + ccid = rq->context->lrc.ccid; old = READ_ONCE(ce->inflight); do cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL; while (!try_cmpxchg(&ce->inflight, &old, cur)); if (!cur) - __execlists_schedule_out(rq, old); + __execlists_schedule_out(rq, old, ccid); i915_request_put(rq); } @@ -1361,7 +1363,7 @@ execlists_schedule_out(struct i915_request *rq) static u64 execlists_update_context(struct i915_request *rq) { struct intel_context *ce = rq->context; - u64 desc = ce->lrc_desc; + u64 desc = ce->lrc.desc; u32 tail, prev; /* @@ -1400,7 +1402,7 @@ static u64 execlists_update_context(struct i915_request *rq) */ wmb(); - ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; + ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE; return desc; } @@ -1719,6 +1721,9 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl) struct i915_request *w = container_of(p->waiter, typeof(*w), sched); + if (p->flags & I915_DEPENDENCY_WEAK) + continue; + /* Leave semaphores spinning on the other engines */ if (w->engine != rq->engine) continue; @@ -1754,7 +1759,8 @@ static void defer_active(struct intel_engine_cs *engine) } static bool -need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) +need_timeslice(const struct intel_engine_cs *engine, + const struct i915_request *rq) { int hint; @@ -1768,6 +1774,32 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) return hint >= effective_prio(rq); } +static bool +timeslice_yield(const struct intel_engine_execlists *el, + const struct i915_request *rq) +{ + /* + * Once bitten, forever smitten! + * + * If the active context ever busy-waited on a semaphore, + * it will be treated as a hog until the end of its timeslice (i.e. + * until it is scheduled out and replaced by a new submission, + * possibly even its own lite-restore). The HW only sends an interrupt + * on the first miss, and we do know if that semaphore has been + * signaled, or even if it is now stuck on another semaphore. Play + * safe, yield if it might be stuck -- it will be given a fresh + * timeslice in the near future. + */ + return rq->context->lrc.ccid == READ_ONCE(el->yield); +} + +static bool +timeslice_expired(const struct intel_engine_execlists *el, + const struct i915_request *rq) +{ + return timer_expired(&el->timer) || timeslice_yield(el, rq); +} + static int switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) { @@ -1783,8 +1815,7 @@ timeslice(const struct intel_engine_cs *engine) return READ_ONCE(engine->props.timeslice_duration_ms); } -static unsigned long -active_timeslice(const struct intel_engine_cs *engine) +static unsigned long active_timeslice(const struct intel_engine_cs *engine) { const struct intel_engine_execlists *execlists = &engine->execlists; const struct i915_request *rq = *execlists->active; @@ -1946,13 +1977,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) last = NULL; } else if (need_timeslice(engine, last) && - timer_expired(&engine->execlists.timer)) { + timeslice_expired(execlists, last)) { ENGINE_TRACE(engine, - "expired last=%llx:%lld, prio=%d, hint=%d\n", + "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n", last->fence.context, last->fence.seqno, last->sched.attr.priority, - execlists->queue_priority_hint); + execlists->queue_priority_hint, + yesno(timeslice_yield(execlists, last))); ring_set_paused(engine, 1); defer_active(engine); @@ -2213,6 +2245,7 @@ done: } clear_ports(port + 1, last_port - port); + WRITE_ONCE(execlists->yield, -1); execlists_submit_ports(engine); set_preempt_timeout(engine, *active); } else { @@ -3043,7 +3076,7 @@ __execlists_context_pin(struct intel_context *ce, if (IS_ERR(vaddr)) return PTR_ERR(vaddr); - ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; + ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; __execlists_update_reg_state(ce, engine, ce->ring->tail); @@ -3072,7 +3105,7 @@ static void execlists_context_reset(struct intel_context *ce) ce, ce->engine, ce->ring, true); __execlists_update_reg_state(ce, ce->engine, ce->ring->tail); - ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; } static const struct intel_context_ops execlists_context_ops = { @@ -3541,7 +3574,7 @@ static void enable_execlists(struct intel_engine_cs *engine) enable_error_interrupt(engine); - engine->context_tag = 0; + engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); } static bool unexpected_starting_state(struct intel_engine_cs *engine) @@ -3753,7 +3786,7 @@ out_replay: head, ce->ring->tail); __execlists_reset_reg_state(ce, engine); __execlists_update_reg_state(ce, engine, head); - ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ + ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ unwind: /* Push back any incomplete requests for replay after the reset. */ @@ -4369,8 +4402,11 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->flags |= I915_ENGINE_SUPPORTS_STATS; if (!intel_vgpu_active(engine->i915)) { engine->flags |= I915_ENGINE_HAS_SEMAPHORES; - if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) + if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { engine->flags |= I915_ENGINE_HAS_PREEMPTION; + if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + engine->flags |= I915_ENGINE_HAS_TIMESLICES; + } } if (INTEL_GEN(engine->i915) >= 12) @@ -4449,6 +4485,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift; + engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift; } static void rcs_submission_override(struct intel_engine_cs *engine) @@ -4516,6 +4553,11 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) else execlists->csb_size = GEN11_CSB_ENTRIES; + if (INTEL_GEN(engine->i915) >= 11) { + execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); + execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); + } + reset_csb_pointers(engine); /* Finally, take ownership and responsibility for cleanup! */ diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 6f06ba750a0a..f95ae15ce865 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -929,7 +929,7 @@ create_rewinder(struct intel_context *ce, goto err; } - cs = intel_ring_begin(rq, 10); + cs = intel_ring_begin(rq, 14); if (IS_ERR(cs)) { err = PTR_ERR(cs); goto err; @@ -941,8 +941,8 @@ create_rewinder(struct intel_context *ce, *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT | MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_NEQ_SDD; - *cs++ = 0; + MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = idx; *cs++ = offset; *cs++ = 0; @@ -951,6 +951,11 @@ create_rewinder(struct intel_context *ce, *cs++ = offset + idx * sizeof(u32); *cs++ = 0; + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = offset; + *cs++ = 0; + *cs++ = idx + 1; + intel_ring_advance(rq, cs); rq->sched.attr.priority = I915_PRIORITY_MASK; @@ -984,7 +989,7 @@ static int live_timeslice_rewind(void *arg) for_each_engine(engine, gt, id) { enum { A1, A2, B1 }; - enum { X = 1, Y, Z }; + enum { X = 1, Z, Y }; struct i915_request *rq[3] = {}; struct intel_context *ce; unsigned long heartbeat; @@ -1017,13 +1022,13 @@ static int live_timeslice_rewind(void *arg) goto err; } - rq[0] = create_rewinder(ce, NULL, slot, 1); + rq[0] = create_rewinder(ce, NULL, slot, X); if (IS_ERR(rq[0])) { intel_context_put(ce); goto err; } - rq[1] = create_rewinder(ce, NULL, slot, 2); + rq[1] = create_rewinder(ce, NULL, slot, Y); intel_context_put(ce); if (IS_ERR(rq[1])) goto err; @@ -1041,7 +1046,7 @@ static int live_timeslice_rewind(void *arg) goto err; } - rq[2] = create_rewinder(ce, rq[0], slot, 3); + rq[2] = create_rewinder(ce, rq[0], slot, Z); intel_context_put(ce); if (IS_ERR(rq[2])) goto err; @@ -1055,15 +1060,12 @@ static int live_timeslice_rewind(void *arg) GEM_BUG_ON(!timer_pending(&engine->execlists.timer)); /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */ - GEM_BUG_ON(!i915_request_is_active(rq[A1])); - GEM_BUG_ON(!i915_request_is_active(rq[A2])); - GEM_BUG_ON(!i915_request_is_active(rq[B1])); - - /* Wait for the timeslice to kick in */ - del_timer(&engine->execlists.timer); - tasklet_hi_schedule(&engine->execlists.tasklet); - intel_engine_flush_submission(engine); - + if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */ + /* Wait for the timeslice to kick in */ + del_timer(&engine->execlists.timer); + tasklet_hi_schedule(&engine->execlists.tasklet); + intel_engine_flush_submission(engine); + } /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */ GEM_BUG_ON(!i915_request_is_active(rq[A1])); GEM_BUG_ON(!i915_request_is_active(rq[B1])); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index fe7778c28d2d..aa6d56e25a10 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -217,7 +217,7 @@ static void guc_wq_item_append(struct intel_guc *guc, static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; - u32 ctx_desc = lower_32_bits(rq->context->lrc_desc); + u32 ctx_desc = rq->context->lrc.ccid; u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); guc_wq_item_append(guc, engine->guc_id, ctx_desc, diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index a83df2f84eb9..a1696e9ce4b6 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -208,14 +208,41 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | SKL_FUSE_PG_DIST_STATUS(SKL_PG2); - vgpu_vreg_t(vgpu, LCPLL1_CTL) |= - LCPLL_PLL_ENABLE | - LCPLL_PLL_LOCK; - vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; - + /* + * Only 1 PIPE enabled in current vGPU display and PIPE_A is + * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, + * TRANSCODER_A can be enabled. PORT_x depends on the input of + * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x + * so we fixed to DPLL0 here. + * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode + */ + vgpu_vreg_t(vgpu, DPLL_CTRL1) = + DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0); + vgpu_vreg_t(vgpu, DPLL_CTRL1) |= + DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0); + vgpu_vreg_t(vgpu, LCPLL1_CTL) = + LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK; + vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0); + /* + * Golden M/N are calculated based on: + * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), + * DP link clk 1620 MHz and non-constant_n. + * TODO: calculate DP link symbol clk and stream clk m/n. + */ + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; + vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; + vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; + vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B); vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | @@ -236,6 +263,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | @@ -256,6 +289,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index cb11c3184085..e92ed96c9b23 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -290,7 +290,7 @@ static void shadow_context_descriptor_update(struct intel_context *ce, struct intel_vgpu_workload *workload) { - u64 desc = ce->lrc_desc; + u64 desc = ce->lrc.desc; /* * Update bits 0-11 of the context descriptor which includes flags @@ -300,7 +300,7 @@ shadow_context_descriptor_update(struct intel_context *ce, desc |= (u64)workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; - ce->lrc_desc = desc; + ce->lrc.desc = desc; } static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) @@ -379,7 +379,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { struct i915_page_directory * const pd = i915_pd_entry(ppgtt->pd, i); - + /* skip now as current i915 ppgtt alloc won't allocate + top level pdp for non 4-level table, won't impact + shadow ppgtt. */ + if (!pd) + break; px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; } } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 4518b9b35c3d..02ad1acd117c 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -128,6 +128,13 @@ search_again: active = NULL; INIT_LIST_HEAD(&eviction_list); list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { + if (vma == active) { /* now seen this vma twice */ + if (flags & PIN_NONBLOCK) + break; + + active = ERR_PTR(-EAGAIN); + } + /* * We keep this list in a rough least-recently scanned order * of active elements (inactive elements are cheap to reap). @@ -143,21 +150,12 @@ search_again: * To notice when we complete one full cycle, we record the * first active element seen, before moving it to the tail. */ - if (i915_vma_is_active(vma)) { - if (vma == active) { - if (flags & PIN_NONBLOCK) - break; - - active = ERR_PTR(-EAGAIN); - } - - if (active != ERR_PTR(-EAGAIN)) { - if (!active) - active = vma; + if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) { + if (!active) + active = vma; - list_move_tail(&vma->vm_link, &vm->bound_list); - continue; - } + list_move_tail(&vma->vm_link, &vm->bound_list); + continue; } if (mark_free(&scan, vma, flags, &eviction_list)) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 2a4cd0ba5464..5c8e51d2ba5b 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1207,8 +1207,6 @@ static void engine_record_registers(struct intel_engine_coredump *ee) static void record_request(const struct i915_request *request, struct i915_request_coredump *erq) { - const struct i915_gem_context *ctx; - erq->flags = request->fence.flags; erq->context = request->fence.context; erq->seqno = request->fence.seqno; @@ -1219,9 +1217,13 @@ static void record_request(const struct i915_request *request, erq->pid = 0; rcu_read_lock(); - ctx = rcu_dereference(request->context->gem_context); - if (ctx) - erq->pid = pid_nr(ctx->pid); + if (!intel_context_is_closed(request->context)) { + const struct i915_gem_context *ctx; + + ctx = rcu_dereference(request->context->gem_context); + if (ctx) + erq->pid = pid_nr(ctx->pid); + } rcu_read_unlock(); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d91557d842dc..8a2b83807ffc 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3361,7 +3361,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | GEN8_PIPE_CDCLK_CRC_DONE; u32 de_pipe_enables; - u32 de_port_masked = GEN8_AUX_CHANNEL_A; + u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); u32 de_port_enables; u32 de_misc_masked = GEN8_DE_EDP_PSR; enum pipe pipe; @@ -3369,18 +3369,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) <= 10) de_misc_masked |= GEN8_DE_MISC_GSE; - if (INTEL_GEN(dev_priv) >= 9) { - de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | - GEN9_AUX_CHANNEL_D; - if (IS_GEN9_LP(dev_priv)) - de_port_masked |= BXT_DE_PORT_GMBUS; - } - - if (INTEL_GEN(dev_priv) >= 11) - de_port_masked |= ICL_AUX_CHANNEL_E; - - if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) - de_port_masked |= CNL_AUX_CHANNEL_F; + if (IS_GEN9_LP(dev_priv)) + de_port_masked |= BXT_DE_PORT_GMBUS; de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 66a46e41d5ef..cf2c01f17da8 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1310,8 +1310,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) * dropped by GuC. They won't be part of the context * ID in the OA reports, so squash those lower bits. */ - stream->specific_ctx_id = - lower_32_bits(ce->lrc_desc) >> 12; + stream->specific_ctx_id = ce->lrc.lrca >> 12; /* * GuC uses the top bit to signal proxy submission, so @@ -1328,11 +1327,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); /* * Pick an unused context id - * 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts + * 0 - BITS_PER_LONG are used by other contexts * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context */ stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); - BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG); break; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e0c6021fdaf9..6e12000c4b6b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3094,6 +3094,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) #define GT_BSD_USER_INTERRUPT (1 << 12) #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */ +#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) /* bdw+ */ #define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8) #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index c0df71d7d0ff..e2b78db685ea 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1017,11 +1017,15 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) GEM_BUG_ON(to == from); GEM_BUG_ON(to->timeline == from->timeline); - if (i915_request_completed(from)) + if (i915_request_completed(from)) { + i915_sw_fence_set_error_once(&to->submit, from->fence.error); return 0; + } if (to->engine->schedule) { - ret = i915_sched_node_add_dependency(&to->sched, &from->sched); + ret = i915_sched_node_add_dependency(&to->sched, + &from->sched, + I915_DEPENDENCY_EXTERNAL); if (ret < 0) return ret; } @@ -1183,7 +1187,9 @@ __i915_request_await_execution(struct i915_request *to, /* Couple the dependency tree for PI on this exposed to->fence */ if (to->engine->schedule) { - err = i915_sched_node_add_dependency(&to->sched, &from->sched); + err = i915_sched_node_add_dependency(&to->sched, + &from->sched, + I915_DEPENDENCY_WEAK); if (err < 0) return err; } diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 68b06a7ba667..f0a9e8958ca0 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -456,7 +456,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, } int i915_sched_node_add_dependency(struct i915_sched_node *node, - struct i915_sched_node *signal) + struct i915_sched_node *signal, + unsigned long flags) { struct i915_dependency *dep; @@ -465,8 +466,7 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node, return -ENOMEM; if (!__i915_sched_node_add_dependency(node, signal, dep, - I915_DEPENDENCY_EXTERNAL | - I915_DEPENDENCY_ALLOC)) + flags | I915_DEPENDENCY_ALLOC)) i915_dependency_free(dep); return 0; diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index d1dc4efef77b..6f0bf00fc569 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -34,7 +34,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, unsigned long flags); int i915_sched_node_add_dependency(struct i915_sched_node *node, - struct i915_sched_node *signal); + struct i915_sched_node *signal, + unsigned long flags); void i915_sched_node_fini(struct i915_sched_node *node); diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h index d18e70550054..7186875088a0 100644 --- a/drivers/gpu/drm/i915/i915_scheduler_types.h +++ b/drivers/gpu/drm/i915/i915_scheduler_types.h @@ -78,6 +78,7 @@ struct i915_dependency { unsigned long flags; #define I915_DEPENDENCY_ALLOC BIT(0) #define I915_DEPENDENCY_EXTERNAL BIT(1) +#define I915_DEPENDENCY_WEAK BIT(2) }; #endif /* _I915_SCHEDULER_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 82e3bc280622..2cd7a7e87c0a 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1228,18 +1228,6 @@ int __i915_vma_unbind(struct i915_vma *vma) lockdep_assert_held(&vma->vm->mutex); - /* - * First wait upon any activity as retiring the request may - * have side-effects such as unpinning or even unbinding this vma. - * - * XXX Actually waiting under the vm->mutex is a hinderance and - * should be pipelined wherever possible. In cases where that is - * unavoidable, we should lift the wait to before the mutex. - */ - ret = i915_vma_sync(vma); - if (ret) - return ret; - if (i915_vma_is_pinned(vma)) { vma_print_allocator(vma, "is pinned"); return -EAGAIN; @@ -1313,15 +1301,20 @@ int i915_vma_unbind(struct i915_vma *vma) if (!drm_mm_node_allocated(&vma->node)) return 0; - if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) - /* XXX not always required: nop_clear_range */ - wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); - /* Optimistic wait before taking the mutex */ err = i915_vma_sync(vma); if (err) goto out_rpm; + if (i915_vma_is_pinned(vma)) { + vma_print_allocator(vma, "is pinned"); + return -EAGAIN; + } + + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + /* XXX not always required: nop_clear_range */ + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); + err = mutex_lock_interruptible(&vm->mutex); if (err) goto out_rpm; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8375054ba27d..a52986a9e7a6 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4992,7 +4992,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, * WaIncreaseLatencyIPCEnabled: kbl,cfl * Display WA #1141: kbl,cfl */ - if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) || + if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) && dev_priv->ipc_enabled) latency += 4; diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 58b5f40a07dd..af89c7fc8f59 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -173,7 +173,7 @@ static int igt_vma_create(void *arg) } nc = 0; - for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) { + for_each_prime_number(num_ctx, 2 * BITS_PER_LONG) { for (; nc < num_ctx; nc++) { ctx = mock_context(i915, "mock"); if (!ctx) diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index 9dfe7cb530e1..1754c0547069 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -843,6 +843,7 @@ static const struct of_device_id ingenic_drm_of_match[] = { { .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, ingenic_drm_of_match); static struct platform_driver ingenic_drm_driver = { .driver = { diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index b5f5eb7b4bb9..8c2e1b47e81a 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -412,9 +412,7 @@ static int __maybe_unused meson_drv_pm_resume(struct device *dev) if (priv->afbcd.ops) priv->afbcd.ops->init(priv); - drm_mode_config_helper_resume(priv->drm); - - return 0; + return drm_mode_config_helper_resume(priv->drm); } static int compare_of(struct device *dev, void *data) diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 059939789730..3eb89f1eb0e1 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -717,7 +717,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder) struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder); struct mipi_dsi_device *device = dsi->device; - union phy_configure_opts opts = { 0 }; + union phy_configure_opts opts = { }; struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy; u16 delay; int err; diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index bd268028fb3d..583cd6e0ae27 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1039,6 +1039,7 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, static bool host1x_drm_wants_iommu(struct host1x_device *dev) { + struct host1x *host1x = dev_get_drvdata(dev->dev.parent); struct iommu_domain *domain; /* @@ -1076,7 +1077,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) * sufficient and whether or not the host1x is attached to an IOMMU * doesn't matter. */ - if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32)) + if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32)) return true; return domain != NULL; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index c1824bdf2418..7879ff58236f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -221,6 +221,7 @@ struct virtio_gpu_fpriv { /* virtio_ioctl.c */ #define DRM_VIRTIO_NUM_IOCTLS 10 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; +void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); /* virtio_kms.c */ int virtio_gpu_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 0d6152c99a27..f0d5a8974677 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -39,6 +39,9 @@ int virtio_gpu_gem_create(struct drm_file *file, int ret; u32 handle; + if (vgdev->has_virgl_3d) + virtio_gpu_create_context(dev, file); + ret = virtio_gpu_object_create(vgdev, params, &obj, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 3f60bf2fe05a..512daff92038 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -34,8 +34,7 @@ #include "virtgpu_drv.h" -static void virtio_gpu_create_context(struct drm_device *dev, - struct drm_file *file) +void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file) { struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 388bcc2889aa..d24344e91922 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -192,17 +192,55 @@ static void host1x_setup_sid_table(struct host1x *host) } } +static bool host1x_wants_iommu(struct host1x *host1x) +{ + /* + * If we support addressing a maximum of 32 bits of physical memory + * and if the host1x firewall is enabled, there's no need to enable + * IOMMU support. This can happen for example on Tegra20, Tegra30 + * and Tegra114. + * + * Tegra124 and later can address up to 34 bits of physical memory and + * many platforms come equipped with more than 2 GiB of system memory, + * which requires crossing the 4 GiB boundary. But there's a catch: on + * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can + * only address up to 32 bits of memory in GATHER opcodes, which means + * that command buffers need to either be in the first 2 GiB of system + * memory (which could quickly lead to memory exhaustion), or command + * buffers need to be treated differently from other buffers (which is + * not possible with the current ABI). + * + * A third option is to use the IOMMU in these cases to make sure all + * buffers will be mapped into a 32-bit IOVA space that host1x can + * address. This allows all of the system memory to be used and works + * within the limitations of the host1x on these SoCs. + * + * In summary, default to enable IOMMU on Tegra124 and later. For any + * of the earlier SoCs, only use the IOMMU for additional safety when + * the host1x firewall is disabled. + */ + if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) { + if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) + return false; + } + + return true; +} + static struct iommu_domain *host1x_iommu_attach(struct host1x *host) { struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); int err; /* - * If the host1x firewall is enabled, there's no need to enable IOMMU - * support. Similarly, if host1x is already attached to an IOMMU (via - * the DMA API), don't try to attach again. + * We may not always want to enable IOMMU support (for example if the + * host1x firewall is already enabled and we don't support addressing + * more than 32 bits of physical memory), so check for that first. + * + * Similarly, if host1x is already attached to an IOMMU (via the DMA + * API), don't try to attach again. */ - if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain) + if (!host1x_wants_iommu(host) || domain) return domain; host->group = iommu_group_get(host->dev); @@ -502,6 +540,19 @@ static void __exit tegra_host1x_exit(void) } module_exit(tegra_host1x_exit); +/** + * host1x_get_dma_mask() - query the supported DMA mask for host1x + * @host1x: host1x instance + * + * Note that this returns the supported DMA mask for host1x, which can be + * different from the applicable DMA mask under certain circumstances. + */ +u64 host1x_get_dma_mask(struct host1x *host1x) +{ + return host1x->info->dma_mask; +} +EXPORT_SYMBOL(host1x_get_dma_mask); + MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>"); MODULE_DESCRIPTION("Host1x driver for Tegra products"); diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c index 53b517dbe7e6..4af2fc309c28 100644 --- a/drivers/hwmon/da9052-hwmon.c +++ b/drivers/hwmon/da9052-hwmon.c @@ -244,9 +244,9 @@ static ssize_t da9052_tsi_show(struct device *dev, int channel = to_sensor_dev_attr(devattr)->index; int ret; - mutex_lock(&hwmon->hwmon_lock); + mutex_lock(&hwmon->da9052->auxadc_lock); ret = __da9052_read_tsi(dev, channel); - mutex_unlock(&hwmon->hwmon_lock); + mutex_unlock(&hwmon->da9052->auxadc_lock); if (ret < 0) return ret; diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c index 9179460c2d9d..0d4f3d97ffc6 100644 --- a/drivers/hwmon/drivetemp.c +++ b/drivers/hwmon/drivetemp.c @@ -346,7 +346,7 @@ static int drivetemp_identify_sata(struct drivetemp_data *st) st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]); if (!have_sct_data_table) - goto skip_sct; + goto skip_sct_data; /* Request and read temperature history table */ memset(buf, '\0', sizeof(st->smartdata)); diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index 1f5743d68984..a7eb10d2a053 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -41,6 +41,7 @@ #define FANCTL_MAX 4 /* Counted from 1 */ #define TCPU_MAX 8 /* Counted from 1 */ #define TEMP_MAX 4 /* Counted from 1 */ +#define SMI_STS_MAX 10 /* Counted from 1 */ #define VT_ADC_CTRL0_REG 0x20 /* Bank 0 */ #define VT_ADC_CTRL1_REG 0x21 /* Bank 0 */ @@ -361,6 +362,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel, struct nct7904_data *data = dev_get_drvdata(dev); int ret, temp; unsigned int reg1, reg2, reg3; + s8 temps; switch (attr) { case hwmon_temp_input: @@ -466,7 +468,8 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel, if (ret < 0) return ret; - *val = ret * 1000; + temps = ret; + *val = temps * 1000; return 0; } @@ -1009,6 +1012,13 @@ static int nct7904_probe(struct i2c_client *client, data->fan_mode[i] = ret; } + /* Read all of SMI status register to clear alarms */ + for (i = 0; i < SMI_STS_MAX; i++) { + ret = nct7904_read_reg(data, BANK_0, SMI_STS1_REG + i); + if (ret < 0) + return ret; + } + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, &nct7904_chip_info, NULL); diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 717b798cddad..a670209bbce6 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1553,8 +1553,11 @@ int ib_cache_setup_one(struct ib_device *device) if (err) return err; - rdma_for_each_port (device, p) - ib_cache_update(device, p, true); + rdma_for_each_port (device, p) { + err = ib_cache_update(device, p, true); + if (err) + return err; + } return 0; } diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 9eec26d10d7b..e16105be2eb2 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -1292,11 +1292,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN); ret = fill_func(msg, has_cap_net_admin, res, port); - - rdma_restrack_put(res); if (ret) goto err_free; + rdma_restrack_put(res); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 177333d8bcda..bf8e149d3191 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -459,7 +459,8 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj, struct ib_uobject *uobj; struct file *filp; - if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release)) + if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && + fd_type->fops->release != &uverbs_async_event_release)) return ERR_PTR(-EINVAL); new_fd = get_unused_fd_flags(O_CLOEXEC); diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 7df71983212d..3d189c7ee59e 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -219,6 +219,7 @@ void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue); void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file); void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue); void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res); +int uverbs_async_event_release(struct inode *inode, struct file *filp); int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs); int ib_init_ucontext(struct uverbs_attr_bundle *attrs); @@ -227,6 +228,9 @@ void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file, struct ib_ucq_object *uobj); void ib_uverbs_release_uevent(struct ib_uevent_object *uobj); void ib_uverbs_release_file(struct kref *ref); +void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file, + __u64 element, __u64 event, + struct list_head *obj_list, u32 *counter); void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 17fc25db0311..1bab8de14757 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -346,7 +346,7 @@ const struct file_operations uverbs_async_event_fops = { .owner = THIS_MODULE, .read = ib_uverbs_async_event_read, .poll = ib_uverbs_async_event_poll, - .release = uverbs_uobject_fd_release, + .release = uverbs_async_event_release, .fasync = ib_uverbs_async_event_fasync, .llseek = no_llseek, }; @@ -386,10 +386,9 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN); } -static void -ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file, - __u64 element, __u64 event, struct list_head *obj_list, - u32 *counter) +void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file, + __u64 element, __u64 event, + struct list_head *obj_list, u32 *counter) { struct ib_uverbs_event *entry; unsigned long flags; @@ -1187,9 +1186,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, */ mutex_unlock(&uverbs_dev->lists_mutex); - ib_uverbs_async_handler(READ_ONCE(file->async_file), 0, - IB_EVENT_DEVICE_FATAL, NULL, NULL); - uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE); kref_put(&file->ref, ib_uverbs_release_file); diff --git a/drivers/infiniband/core/uverbs_std_types_async_fd.c b/drivers/infiniband/core/uverbs_std_types_async_fd.c index 82ec0806b34b..61899eaf1f91 100644 --- a/drivers/infiniband/core/uverbs_std_types_async_fd.c +++ b/drivers/infiniband/core/uverbs_std_types_async_fd.c @@ -26,10 +26,38 @@ static int uverbs_async_event_destroy_uobj(struct ib_uobject *uobj, container_of(uobj, struct ib_uverbs_async_event_file, uobj); ib_unregister_event_handler(&event_file->event_handler); - ib_uverbs_free_event_queue(&event_file->ev_queue); + + if (why == RDMA_REMOVE_DRIVER_REMOVE) + ib_uverbs_async_handler(event_file, 0, IB_EVENT_DEVICE_FATAL, + NULL, NULL); return 0; } +int uverbs_async_event_release(struct inode *inode, struct file *filp) +{ + struct ib_uverbs_async_event_file *event_file; + struct ib_uobject *uobj = filp->private_data; + int ret; + + if (!uobj) + return uverbs_uobject_fd_release(inode, filp); + + event_file = + container_of(uobj, struct ib_uverbs_async_event_file, uobj); + + /* + * The async event FD has to deliver IB_EVENT_DEVICE_FATAL even after + * disassociation, so cleaning the event list must only happen after + * release. The user knows it has reached the end of the event stream + * when it sees IB_EVENT_DEVICE_FATAL. + */ + uverbs_uobject_get(uobj); + ret = uverbs_uobject_fd_release(inode, filp); + ib_uverbs_free_event_queue(&event_file->ev_queue); + uverbs_uobject_put(uobj); + return ret; +} + DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_ASYNC_EVENT_ALLOC, UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE, diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index d69dece3b1d5..30e08bcc9afb 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) srqidx = ABORT_RSS_SRQIDX_G( be32_to_cpu(req->srqidx_status)); if (srqidx) { - complete_cached_srq_buffers(ep, - req->srqidx_status); + complete_cached_srq_buffers(ep, srqidx); } else { /* Hold ep ref until finish_peer_abort() */ c4iw_get_ep(&ep->com); @@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) return 0; } - ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W, - TCB_RQ_START_S); + ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M, + TCB_RQ_START_S); cleanup: pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx); diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 13e4203497b3..a92346e88628 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); pq->state = SDMA_PKT_Q_ACTIVE; - /* Send the first N packets in the request to buy us some time */ - ret = user_sdma_send_pkts(req, pcount); - if (unlikely(ret < 0 && ret != -EBUSY)) - goto free_req; /* * This is a somewhat blocking send implementation. diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index bb78d3280acc..fa7a5ff498c7 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1987,7 +1987,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev, struct rtable *rt; struct neighbour *neigh; int rc = arpindex; - struct net_device *netdev = iwdev->netdev; __be32 dst_ipaddr = htonl(dst_ip); __be32 src_ipaddr = htonl(src_ip); @@ -1997,9 +1996,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev, return rc; } - if (netif_is_bond_slave(netdev)) - netdev = netdev_master_upper_dev_get(netdev); - neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr); rcu_read_lock(); @@ -2065,7 +2061,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, { struct neighbour *neigh; int rc = arpindex; - struct net_device *netdev = iwdev->netdev; struct dst_entry *dst; struct sockaddr_in6 dst_addr; struct sockaddr_in6 src_addr; @@ -2086,9 +2081,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, return rc; } - if (netif_is_bond_slave(netdev)) - netdev = netdev_master_upper_dev_get(netdev); - neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32); rcu_read_lock(); diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c index 55a1fbf0e670..ae8b97c30665 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c @@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev, int arp_index; arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action); - if (arp_index == -1) + if (arp_index < 0) return; cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); if (!cqp_request) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 2f9f78912267..cf51e3cbd969 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, int send_size; int header_size; int spc; + int err; int i; if (wr->wr.opcode != IB_WR_SEND) @@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, sqp->ud_header.lrh.virtual_lane = 0; sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); - ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); + if (err) + return err; sqp->ud_header.bth.pkey = cpu_to_be16(pkey); if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); @@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr, } sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) - ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, + &pkey); else - ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, + &pkey); + if (err) + return err; + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c index 48f48122ddcb..6a413d73b95d 100644 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c @@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size, ip = kmalloc(sizeof(*ip), GFP_KERNEL); if (!ip) - return NULL; + return ERR_PTR(-ENOMEM); size = PAGE_ALIGN(size); diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index ff92704de32f..245040c3a35d 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, if (outbuf) { ip = rxe_create_mmap_info(rxe, buf_size, udata, buf); - if (!ip) + if (IS_ERR(ip)) { + err = PTR_ERR(ip); goto err1; + } - err = copy_to_user(outbuf, &ip->info, sizeof(ip->info)); - if (err) + if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) { + err = -EFAULT; goto err2; + } spin_lock_bh(&rxe->pending_lock); list_add(&ip->pending_mmaps, &rxe->pending_mmaps); @@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, err2: kfree(ip); err1: - return -EINVAL; + return err; } inline void rxe_queue_reset(struct rxe_queue *q) diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c index a03c6d6833df..96fb9ff5ff2e 100644 --- a/drivers/interconnect/qcom/osm-l3.c +++ b/drivers/interconnect/qcom/osm-l3.c @@ -78,7 +78,7 @@ static struct qcom_icc_node *sdm845_osm_l3_nodes[] = { [SLAVE_OSM_L3] = &sdm845_osm_l3, }; -const static struct qcom_icc_desc sdm845_icc_osm_l3 = { +static const struct qcom_icc_desc sdm845_icc_osm_l3 = { .nodes = sdm845_osm_l3_nodes, .num_nodes = ARRAY_SIZE(sdm845_osm_l3_nodes), }; @@ -91,7 +91,7 @@ static struct qcom_icc_node *sc7180_osm_l3_nodes[] = { [SLAVE_OSM_L3] = &sc7180_osm_l3, }; -const static struct qcom_icc_desc sc7180_icc_osm_l3 = { +static const struct qcom_icc_desc sc7180_icc_osm_l3 = { .nodes = sc7180_osm_l3_nodes, .num_nodes = ARRAY_SIZE(sc7180_osm_l3_nodes), }; diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c index b013b80caa45..f6c7b969520d 100644 --- a/drivers/interconnect/qcom/sdm845.c +++ b/drivers/interconnect/qcom/sdm845.c @@ -192,7 +192,7 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = { [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc, }; -const static struct qcom_icc_desc sdm845_aggre1_noc = { +static const struct qcom_icc_desc sdm845_aggre1_noc = { .nodes = aggre1_noc_nodes, .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), .bcms = aggre1_noc_bcms, @@ -220,7 +220,7 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = { [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, }; -const static struct qcom_icc_desc sdm845_aggre2_noc = { +static const struct qcom_icc_desc sdm845_aggre2_noc = { .nodes = aggre2_noc_nodes, .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), .bcms = aggre2_noc_bcms, @@ -281,7 +281,7 @@ static struct qcom_icc_node *config_noc_nodes[] = { [SLAVE_SERVICE_CNOC] = &srvc_cnoc, }; -const static struct qcom_icc_desc sdm845_config_noc = { +static const struct qcom_icc_desc sdm845_config_noc = { .nodes = config_noc_nodes, .num_nodes = ARRAY_SIZE(config_noc_nodes), .bcms = config_noc_bcms, @@ -297,7 +297,7 @@ static struct qcom_icc_node *dc_noc_nodes[] = { [SLAVE_MEM_NOC_CFG] = &qhs_memnoc, }; -const static struct qcom_icc_desc sdm845_dc_noc = { +static const struct qcom_icc_desc sdm845_dc_noc = { .nodes = dc_noc_nodes, .num_nodes = ARRAY_SIZE(dc_noc_nodes), .bcms = dc_noc_bcms, @@ -315,7 +315,7 @@ static struct qcom_icc_node *gladiator_noc_nodes[] = { [SLAVE_SERVICE_GNOC] = &srvc_gnoc, }; -const static struct qcom_icc_desc sdm845_gladiator_noc = { +static const struct qcom_icc_desc sdm845_gladiator_noc = { .nodes = gladiator_noc_nodes, .num_nodes = ARRAY_SIZE(gladiator_noc_nodes), .bcms = gladiator_noc_bcms, @@ -350,7 +350,7 @@ static struct qcom_icc_node *mem_noc_nodes[] = { [SLAVE_EBI1] = &ebi, }; -const static struct qcom_icc_desc sdm845_mem_noc = { +static const struct qcom_icc_desc sdm845_mem_noc = { .nodes = mem_noc_nodes, .num_nodes = ARRAY_SIZE(mem_noc_nodes), .bcms = mem_noc_bcms, @@ -384,7 +384,7 @@ static struct qcom_icc_node *mmss_noc_nodes[] = { [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp, }; -const static struct qcom_icc_desc sdm845_mmss_noc = { +static const struct qcom_icc_desc sdm845_mmss_noc = { .nodes = mmss_noc_nodes, .num_nodes = ARRAY_SIZE(mmss_noc_nodes), .bcms = mmss_noc_bcms, @@ -430,7 +430,7 @@ static struct qcom_icc_node *system_noc_nodes[] = { [SLAVE_TCU] = &xs_sys_tcu_cfg, }; -const static struct qcom_icc_desc sdm845_system_noc = { +static const struct qcom_icc_desc sdm845_system_noc = { .nodes = system_noc_nodes, .num_nodes = ARRAY_SIZE(system_noc_nodes), .bcms = system_noc_bcms, diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 20cce366e951..1dc3718560d0 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -101,6 +101,8 @@ struct kmem_cache *amd_iommu_irq_cache; static void update_domain(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain); static void detach_device(struct device *dev); +static void update_and_flush_device_table(struct protection_domain *domain, + struct domain_pgtable *pgtable); /**************************************************************************** * @@ -151,6 +153,26 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom) return container_of(dom, struct protection_domain, domain); } +static void amd_iommu_domain_get_pgtable(struct protection_domain *domain, + struct domain_pgtable *pgtable) +{ + u64 pt_root = atomic64_read(&domain->pt_root); + + pgtable->root = (u64 *)(pt_root & PAGE_MASK); + pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */ +} + +static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode) +{ + u64 pt_root; + + /* lowest 3 bits encode pgtable mode */ + pt_root = mode & 7; + pt_root |= (u64)root; + + return pt_root; +} + static struct iommu_dev_data *alloc_dev_data(u16 devid) { struct iommu_dev_data *dev_data; @@ -1397,13 +1419,18 @@ static struct page *free_sub_pt(unsigned long root, int mode, static void free_pagetable(struct protection_domain *domain) { - unsigned long root = (unsigned long)domain->pt_root; + struct domain_pgtable pgtable; struct page *freelist = NULL; + unsigned long root; - BUG_ON(domain->mode < PAGE_MODE_NONE || - domain->mode > PAGE_MODE_6_LEVEL); + amd_iommu_domain_get_pgtable(domain, &pgtable); + atomic64_set(&domain->pt_root, 0); - freelist = free_sub_pt(root, domain->mode, freelist); + BUG_ON(pgtable.mode < PAGE_MODE_NONE || + pgtable.mode > PAGE_MODE_6_LEVEL); + + root = (unsigned long)pgtable.root; + freelist = free_sub_pt(root, pgtable.mode, freelist); free_page_list(freelist); } @@ -1417,24 +1444,39 @@ static bool increase_address_space(struct protection_domain *domain, unsigned long address, gfp_t gfp) { + struct domain_pgtable pgtable; unsigned long flags; - bool ret = false; - u64 *pte; + bool ret = true; + u64 *pte, root; spin_lock_irqsave(&domain->lock, flags); - if (address <= PM_LEVEL_SIZE(domain->mode) || - WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) + amd_iommu_domain_get_pgtable(domain, &pgtable); + + if (address <= PM_LEVEL_SIZE(pgtable.mode)) + goto out; + + ret = false; + if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL)) goto out; pte = (void *)get_zeroed_page(gfp); if (!pte) goto out; - *pte = PM_LEVEL_PDE(domain->mode, - iommu_virt_to_phys(domain->pt_root)); - domain->pt_root = pte; - domain->mode += 1; + *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root)); + + pgtable.root = pte; + pgtable.mode += 1; + update_and_flush_device_table(domain, &pgtable); + domain_flush_complete(domain); + + /* + * Device Table needs to be updated and flushed before the new root can + * be published. + */ + root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode); + atomic64_set(&domain->pt_root, root); ret = true; @@ -1451,16 +1493,29 @@ static u64 *alloc_pte(struct protection_domain *domain, gfp_t gfp, bool *updated) { + struct domain_pgtable pgtable; int level, end_lvl; u64 *pte, *page; BUG_ON(!is_power_of_2(page_size)); - while (address > PM_LEVEL_SIZE(domain->mode)) - *updated = increase_address_space(domain, address, gfp) || *updated; + amd_iommu_domain_get_pgtable(domain, &pgtable); + + while (address > PM_LEVEL_SIZE(pgtable.mode)) { + /* + * Return an error if there is no memory to update the + * page-table. + */ + if (!increase_address_space(domain, address, gfp)) + return NULL; + + /* Read new values to check if update was successful */ + amd_iommu_domain_get_pgtable(domain, &pgtable); + } + - level = domain->mode - 1; - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; + level = pgtable.mode - 1; + pte = &pgtable.root[PM_LEVEL_INDEX(level, address)]; address = PAGE_SIZE_ALIGN(address, page_size); end_lvl = PAGE_SIZE_LEVEL(page_size); @@ -1536,16 +1591,19 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address, unsigned long *page_size) { + struct domain_pgtable pgtable; int level; u64 *pte; *page_size = 0; - if (address > PM_LEVEL_SIZE(domain->mode)) + amd_iommu_domain_get_pgtable(domain, &pgtable); + + if (address > PM_LEVEL_SIZE(pgtable.mode)) return NULL; - level = domain->mode - 1; - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; + level = pgtable.mode - 1; + pte = &pgtable.root[PM_LEVEL_INDEX(level, address)]; *page_size = PTE_LEVEL_PAGE_SIZE(level); while (level > 0) { @@ -1660,7 +1718,13 @@ out: unsigned long flags; spin_lock_irqsave(&dom->lock, flags); - update_domain(dom); + /* + * Flush domain TLB(s) and wait for completion. Any Device-Table + * Updates and flushing already happened in + * increase_address_space(). + */ + domain_flush_tlb_pde(dom); + domain_flush_complete(dom); spin_unlock_irqrestore(&dom->lock, flags); } @@ -1806,6 +1870,7 @@ static void dma_ops_domain_free(struct protection_domain *domain) static struct protection_domain *dma_ops_domain_alloc(void) { struct protection_domain *domain; + u64 *pt_root, root; domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL); if (!domain) @@ -1814,12 +1879,14 @@ static struct protection_domain *dma_ops_domain_alloc(void) if (protection_domain_init(domain)) goto free_domain; - domain->mode = PAGE_MODE_3_LEVEL; - domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); - domain->flags = PD_DMA_OPS_MASK; - if (!domain->pt_root) + pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!pt_root) goto free_domain; + root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL); + atomic64_set(&domain->pt_root, root); + domain->flags = PD_DMA_OPS_MASK; + if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM) goto free_domain; @@ -1841,16 +1908,17 @@ static bool dma_ops_domain(struct protection_domain *domain) } static void set_dte_entry(u16 devid, struct protection_domain *domain, + struct domain_pgtable *pgtable, bool ats, bool ppr) { u64 pte_root = 0; u64 flags = 0; u32 old_domid; - if (domain->mode != PAGE_MODE_NONE) - pte_root = iommu_virt_to_phys(domain->pt_root); + if (pgtable->mode != PAGE_MODE_NONE) + pte_root = iommu_virt_to_phys(pgtable->root); - pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) + pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV; @@ -1923,6 +1991,7 @@ static void clear_dte_entry(u16 devid) static void do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { + struct domain_pgtable pgtable; struct amd_iommu *iommu; bool ats; @@ -1938,7 +2007,9 @@ static void do_attach(struct iommu_dev_data *dev_data, domain->dev_cnt += 1; /* Update device table */ - set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2); + amd_iommu_domain_get_pgtable(domain, &pgtable); + set_dte_entry(dev_data->devid, domain, &pgtable, + ats, dev_data->iommu_v2); clone_aliases(dev_data->pdev); device_flush_dte(dev_data); @@ -2249,23 +2320,36 @@ static int amd_iommu_domain_get_attr(struct iommu_domain *domain, * *****************************************************************************/ -static void update_device_table(struct protection_domain *domain) +static void update_device_table(struct protection_domain *domain, + struct domain_pgtable *pgtable) { struct iommu_dev_data *dev_data; list_for_each_entry(dev_data, &domain->dev_list, list) { - set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled, - dev_data->iommu_v2); + set_dte_entry(dev_data->devid, domain, pgtable, + dev_data->ats.enabled, dev_data->iommu_v2); clone_aliases(dev_data->pdev); } } +static void update_and_flush_device_table(struct protection_domain *domain, + struct domain_pgtable *pgtable) +{ + update_device_table(domain, pgtable); + domain_flush_devices(domain); +} + static void update_domain(struct protection_domain *domain) { - update_device_table(domain); + struct domain_pgtable pgtable; - domain_flush_devices(domain); + /* Update device table */ + amd_iommu_domain_get_pgtable(domain, &pgtable); + update_and_flush_device_table(domain, &pgtable); + + /* Flush domain TLB(s) and wait for completion */ domain_flush_tlb_pde(domain); + domain_flush_complete(domain); } int __init amd_iommu_init_api(void) @@ -2375,6 +2459,7 @@ out_err: static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) { struct protection_domain *pdomain; + u64 *pt_root, root; switch (type) { case IOMMU_DOMAIN_UNMANAGED: @@ -2382,13 +2467,15 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) if (!pdomain) return NULL; - pdomain->mode = PAGE_MODE_3_LEVEL; - pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); - if (!pdomain->pt_root) { + pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!pt_root) { protection_domain_free(pdomain); return NULL; } + root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL); + atomic64_set(&pdomain->pt_root, root); + pdomain->domain.geometry.aperture_start = 0; pdomain->domain.geometry.aperture_end = ~0ULL; pdomain->domain.geometry.force_aperture = true; @@ -2406,7 +2493,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) if (!pdomain) return NULL; - pdomain->mode = PAGE_MODE_NONE; + atomic64_set(&pdomain->pt_root, PAGE_MODE_NONE); break; default: return NULL; @@ -2418,6 +2505,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) static void amd_iommu_domain_free(struct iommu_domain *dom) { struct protection_domain *domain; + struct domain_pgtable pgtable; domain = to_pdomain(dom); @@ -2435,7 +2523,9 @@ static void amd_iommu_domain_free(struct iommu_domain *dom) dma_ops_domain_free(domain); break; default: - if (domain->mode != PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + + if (pgtable.mode != PAGE_MODE_NONE) free_pagetable(domain); if (domain->flags & PD_IOMMUV2_MASK) @@ -2518,10 +2608,12 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, gfp_t gfp) { struct protection_domain *domain = to_pdomain(dom); + struct domain_pgtable pgtable; int prot = 0; int ret; - if (domain->mode == PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + if (pgtable.mode == PAGE_MODE_NONE) return -EINVAL; if (iommu_prot & IOMMU_READ) @@ -2541,8 +2633,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, struct iommu_iotlb_gather *gather) { struct protection_domain *domain = to_pdomain(dom); + struct domain_pgtable pgtable; - if (domain->mode == PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + if (pgtable.mode == PAGE_MODE_NONE) return 0; return iommu_unmap_page(domain, iova, page_size); @@ -2553,9 +2647,11 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, { struct protection_domain *domain = to_pdomain(dom); unsigned long offset_mask, pte_pgsize; + struct domain_pgtable pgtable; u64 *pte, __pte; - if (domain->mode == PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + if (pgtable.mode == PAGE_MODE_NONE) return iova; pte = fetch_pte(domain, iova, &pte_pgsize); @@ -2708,16 +2804,26 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier); void amd_iommu_domain_direct_map(struct iommu_domain *dom) { struct protection_domain *domain = to_pdomain(dom); + struct domain_pgtable pgtable; unsigned long flags; + u64 pt_root; spin_lock_irqsave(&domain->lock, flags); + /* First save pgtable configuration*/ + amd_iommu_domain_get_pgtable(domain, &pgtable); + /* Update data structure */ - domain->mode = PAGE_MODE_NONE; + pt_root = amd_iommu_domain_encode_pgtable(NULL, PAGE_MODE_NONE); + atomic64_set(&domain->pt_root, pt_root); /* Make changes visible to IOMMUs */ update_domain(domain); + /* Restore old pgtable in domain->ptroot to free page-table */ + pt_root = amd_iommu_domain_encode_pgtable(pgtable.root, pgtable.mode); + atomic64_set(&domain->pt_root, pt_root); + /* Page-table is not visible to IOMMU anymore, so free it */ free_pagetable(domain); @@ -2908,9 +3014,11 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc) static int __set_gcr3(struct protection_domain *domain, int pasid, unsigned long cr3) { + struct domain_pgtable pgtable; u64 *pte; - if (domain->mode != PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + if (pgtable.mode != PAGE_MODE_NONE) return -EINVAL; pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); @@ -2924,9 +3032,11 @@ static int __set_gcr3(struct protection_domain *domain, int pasid, static int __clear_gcr3(struct protection_domain *domain, int pasid) { + struct domain_pgtable pgtable; u64 *pte; - if (domain->mode != PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + if (pgtable.mode != PAGE_MODE_NONE) return -EINVAL; pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index ca8c4522045b..7a8fdec138bd 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -468,8 +468,7 @@ struct protection_domain { iommu core code */ spinlock_t lock; /* mostly used to lock the page table*/ u16 id; /* the domain id written to the device table */ - int mode; /* paging mode (0-6 levels) */ - u64 *pt_root; /* page table root pointer */ + atomic64_t pt_root; /* pgtable root and pgtable mode */ int glx; /* Number of levels for GCR3 table */ u64 *gcr3_tbl; /* Guest CR3 table */ unsigned long flags; /* flags to find out type of domain */ @@ -477,6 +476,12 @@ struct protection_domain { unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ }; +/* For decocded pt_root */ +struct domain_pgtable { + int mode; + u64 *root; +}; + /* * Structure where we save information about one hardware AMD IOMMU in the * system. diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index d5cac4f46ca5..4e1d11af23c8 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -453,7 +453,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev, if (!region) return -ENOMEM; - list_add(&vdev->resv_regions, ®ion->list); + list_add(®ion->list, &vdev->resv_regions); return 0; } diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 668418d7ea77..f620442addf5 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1465,6 +1465,13 @@ static const struct mei_cfg mei_me_pch12_cfg = { MEI_CFG_DMA_128, }; +/* LBG with quirk for SPS Firmware exclusion */ +static const struct mei_cfg mei_me_pch12_sps_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_FW_SPS, +}; + /* Tiger Lake and newer devices */ static const struct mei_cfg mei_me_pch15_cfg = { MEI_CFG_PCH8_HFS, @@ -1487,6 +1494,7 @@ static const struct mei_cfg *const mei_cfg_list[] = { [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg, [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, + [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg, [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg, }; diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 4a8d4dcd5a91..b6b94e211464 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -80,6 +80,9 @@ struct mei_me_hw { * servers platforms with quirk for * SPS firmware exclusion. * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer + * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer + * servers platforms with quirk for + * SPS firmware exclusion. * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer * @MEI_ME_NUM_CFG: Upper Sentinel. */ @@ -93,6 +96,7 @@ enum mei_cfg_idx { MEI_ME_PCH8_CFG, MEI_ME_PCH8_SPS_CFG, MEI_ME_PCH12_CFG, + MEI_ME_PCH12_SPS_CFG, MEI_ME_PCH15_CFG, MEI_ME_NUM_CFG, }; diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 0c390fe421ad..a1ed375fed37 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -70,7 +70,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 8499b56a15a8..c5367e2c8487 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1370,6 +1370,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) struct mmc_request *mrq = &mqrq->brq.mrq; struct request_queue *q = req->q; struct mmc_host *host = mq->card->host; + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); unsigned long flags; bool put_card; int err; @@ -1399,7 +1400,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) spin_lock_irqsave(&mq->lock, flags); - mq->in_flight[mmc_issue_type(mq, req)] -= 1; + mq->in_flight[issue_type] -= 1; put_card = (mmc_tot_in_flight(mq) == 0); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 25bee3daf9e2..4b1eb89b401d 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -107,11 +107,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) case MMC_ISSUE_DCMD: if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { if (recovery_needed) - __mmc_cqe_recovery_notifier(mq); + mmc_cqe_recovery_notifier(mrq); return BLK_EH_RESET_TIMER; } - /* No timeout (XXX: huh? comment doesn't make much sense) */ - blk_mq_complete_request(req); + /* The request has gone already */ return BLK_EH_DONE; default: /* Timeout is handled by mmc core */ @@ -127,18 +126,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, struct mmc_card *card = mq->card; struct mmc_host *host = card->host; unsigned long flags; - int ret; + bool ignore_tout; spin_lock_irqsave(&mq->lock, flags); - - if (mq->recovery_needed || !mq->use_cqe || host->hsq_enabled) - ret = BLK_EH_RESET_TIMER; - else - ret = mmc_cqe_timed_out(req); - + ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled; spin_unlock_irqrestore(&mq->lock, flags); - return ret; + return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req); } static void mmc_mq_recovery_handler(struct work_struct *work) diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c index 1aee485d56d4..026ca9194ce5 100644 --- a/drivers/mmc/host/alcor.c +++ b/drivers/mmc/host/alcor.c @@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to get irq for data line\n"); - return ret; + goto free_host; } mutex_init(&host->cmd_mutex); @@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, host); mmc_add_host(mmc); return 0; + +free_host: + mmc_free_host(mmc); + return ret; } static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev) diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index faba53cf139b..d8b76cb8698a 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -605,10 +605,12 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, } static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = { - .chip = &sdhci_acpi_chip_amd, - .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, - .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE | - SDHCI_QUIRK_32BIT_ADMA_SIZE, + .chip = &sdhci_acpi_chip_amd, + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | + SDHCI_QUIRK_32BIT_ADMA_SIZE, + .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA, .probe_slot = sdhci_acpi_emmc_amd_probe_slot, }; diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index ce15a05f23d4..fd76aa672e02 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -26,6 +26,9 @@ #define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26) #define GLI_9750_DRIVING_1_VALUE 0xFFF #define GLI_9750_DRIVING_2_VALUE 0x3 +#define SDHCI_GLI_9750_SEL_1 BIT(29) +#define SDHCI_GLI_9750_SEL_2 BIT(31) +#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30)) #define SDHCI_GLI_9750_PLL 0x864 #define SDHCI_GLI_9750_PLL_TX2_INV BIT(23) @@ -122,6 +125,8 @@ static void gli_set_9750(struct sdhci_host *host) GLI_9750_DRIVING_1_VALUE); driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2, GLI_9750_DRIVING_2_VALUE); + driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST); + driving_value |= SDHCI_GLI_9750_SEL_2; sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING); sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4; @@ -334,6 +339,18 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg) return value; } +#ifdef CONFIG_PM_SLEEP +static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip) +{ + struct sdhci_pci_slot *slot = chip->slots[0]; + + pci_free_irq_vectors(slot->chip->pdev); + gli_pcie_enable_msi(slot); + + return sdhci_pci_resume_host(chip); +} +#endif + static const struct sdhci_ops sdhci_gl9755_ops = { .set_clock = sdhci_set_clock, .enable_dma = sdhci_pci_enable_dma, @@ -348,6 +365,9 @@ const struct sdhci_pci_fixes sdhci_gl9755 = { .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50, .probe_slot = gli_probe_slot_gl9755, .ops = &sdhci_gl9755_ops, +#ifdef CONFIG_PM_SLEEP + .resume = sdhci_pci_gli_resume, +#endif }; static const struct sdhci_ops sdhci_gl9750_ops = { @@ -366,4 +386,7 @@ const struct sdhci_pci_fixes sdhci_gl9750 = { .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50, .probe_slot = gli_probe_slot_gl9750, .ops = &sdhci_gl9750_ops, +#ifdef CONFIG_PM_SLEEP + .resume = sdhci_pci_gli_resume, +#endif }; diff --git a/drivers/most/core.c b/drivers/most/core.c index 06426fc5c990..f781c46cd4af 100644 --- a/drivers/most/core.c +++ b/drivers/most/core.c @@ -1483,7 +1483,7 @@ static void __exit most_exit(void) ida_destroy(&mdev_id); } -module_init(most_init); +subsys_initcall(most_init); module_exit(most_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>"); diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index cc0703c3d57f..efd1a1d1f35e 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -136,25 +136,21 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) oiph = skb_network_header(skb); skb_reset_network_header(skb); - if (family == AF_INET) + if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET) err = IP_ECN_decapsulate(oiph, skb); -#if IS_ENABLED(CONFIG_IPV6) else err = IP6_ECN_decapsulate(oiph, skb); -#endif if (unlikely(err)) { if (log_ecn_error) { - if (family == AF_INET) + if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET) net_info_ratelimited("non-ECT from %pI4 " "with TOS=%#x\n", &((struct iphdr *)oiph)->saddr, ((struct iphdr *)oiph)->tos); -#if IS_ENABLED(CONFIG_IPV6) else net_info_ratelimited("non-ECT from %pI6\n", &((struct ipv6hdr *)oiph)->saddr); -#endif } if (err > 1) { ++bareudp->dev->stats.rx_frame_errors; @@ -350,7 +346,6 @@ free_dst: return err; } -#if IS_ENABLED(CONFIG_IPV6) static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct bareudp_dev *bareudp, const struct ip_tunnel_info *info) @@ -411,7 +406,6 @@ free_dst: dst_release(dst); return err; } -#endif static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -435,11 +429,9 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) } rcu_read_lock(); -#if IS_ENABLED(CONFIG_IPV6) - if (info->mode & IP_TUNNEL_INFO_IPV6) + if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6) err = bareudp6_xmit_skb(skb, dev, bareudp, info); else -#endif err = bareudp_xmit_skb(skb, dev, bareudp, info); rcu_read_unlock(); @@ -467,7 +459,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, use_cache = ip_tunnel_dst_cache_usable(skb, info); - if (ip_tunnel_info_af(info) == AF_INET) { + if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; __be32 saddr; @@ -478,7 +470,6 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, ip_rt_put(rt); info->key.u.ipv4.src = saddr; -#if IS_ENABLED(CONFIG_IPV6) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct in6_addr saddr; @@ -492,7 +483,6 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, dst_release(dst); info->key.u.ipv6.src = saddr; -#endif } else { return -EINVAL; } diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index fdcb70b9f0e4..400207c5c7de 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -360,6 +360,7 @@ static void __exit dsa_loop_exit(void) } module_exit(dsa_loop_exit); +MODULE_SOFTDEP("pre: dsa_loop_bdinfo"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Florian Fainelli"); MODULE_DESCRIPTION("DSA loopback driver"); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 53055ce5dfd6..2a69c0d06f3c 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -69,6 +69,7 @@ config BCMGENET select BCM7XXX_PHY select MDIO_BCM_UNIMAC select DIMLIB + select BROADCOM_PHY if ARCH_BCM2835 help This driver supports the built-in Ethernet MACs found in the Broadcom BCM7xxx Set Top Box family chipset. diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 2bd7ace0a953..bfc6bfe94d0a 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -77,6 +77,7 @@ config UCC_GETH depends on QUICC_ENGINE && PPC32 select FSL_PQ_MDIO select PHYLIB + select FIXED_PHY ---help--- This driver supports the Gigabit Ethernet mode of the QUICC Engine, which is available on some Freescale SOCs. @@ -90,6 +91,7 @@ config GIANFAR depends on HAS_DMA select FSL_PQ_MDIO select PHYLIB + select FIXED_PHY select CRC32 ---help--- This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig index 3b325733a4f8..0a54c7e0e4ae 100644 --- a/drivers/net/ethernet/freescale/dpaa/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa/Kconfig @@ -3,6 +3,7 @@ menuconfig FSL_DPAA_ETH tristate "DPAA Ethernet" depends on FSL_DPAA && FSL_FMAN select PHYLIB + select FIXED_PHY select FSL_FMAN_MAC ---help--- Data Path Acceleration Architecture Ethernet driver, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 8c8d95aa1dfd..5fbaa51b38bc 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv, for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { addr = dpaa2_sg_get_addr(&sgt[i]); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); - dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); free_pages((unsigned long)sg_vaddr, 0); @@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, /* Get the address and length from the S/G entry */ sg_addr = dpaa2_sg_get_addr(sge); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); - dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, + dma_unmap_page(dev, sg_addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); sg_length = dpaa2_sg_get_len(sge); @@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, (page_address(page) - page_address(head_page)); skb_add_rx_frag(skb, i - 1, head_page, page_offset, - sg_length, DPAA2_ETH_RX_BUF_SIZE); + sg_length, priv->rx_buf_size); } if (dpaa2_sg_is_final(sge)) @@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) for (i = 0; i < count; i++) { vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); - dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, + dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, DMA_BIDIRECTIONAL); free_pages((unsigned long)vaddr, 0); } @@ -367,7 +367,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, break; case XDP_REDIRECT: dma_unmap_page(priv->net_dev->dev.parent, addr, - DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL); + priv->rx_buf_size, DMA_BIDIRECTIONAL); ch->buf_count--; /* Allow redirect use of full headroom */ @@ -410,7 +410,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, trace_dpaa2_rx_fd(priv->net_dev, fd); vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); - dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); fas = dpaa2_get_fas(vaddr, false); @@ -429,13 +429,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, return; } - dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); skb = build_linear_skb(ch, fd, vaddr); } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog); - dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); skb = build_frag_skb(priv, ch, buf_data); free_pages((unsigned long)vaddr, 0); @@ -1011,7 +1011,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, if (!page) goto err_alloc; - addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE, + addr = dma_map_page(dev, page, 0, priv->rx_buf_size, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, addr))) goto err_map; @@ -1021,7 +1021,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, /* tracing point */ trace_dpaa2_eth_buf_seed(priv->net_dev, page, DPAA2_ETH_RX_BUF_RAW_SIZE, - addr, DPAA2_ETH_RX_BUF_SIZE, + addr, priv->rx_buf_size, bpid); } @@ -1757,7 +1757,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) int mfl, linear_mfl; mfl = DPAA2_ETH_L2_MAX_FRM(mtu); - linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE - + linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; if (mfl > linear_mfl) { @@ -2492,6 +2492,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) else rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; + /* We need to ensure that the buffer size seen by WRIOP is a multiple + * of 64 or 256 bytes depending on the WRIOP version. + */ + priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); + /* tx buffer */ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; buf_layout.pass_timestamp = true; @@ -3177,7 +3182,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv) pools_params.num_dpbp = 1; pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; pools_params.pools[0].backup_pool = 0; - pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; + pools_params.pools[0].buffer_size = priv->rx_buf_size; err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); if (err) { dev_err(dev, "dpni_set_pools() failed\n"); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index b5f7dbbc2a02..42f0a7a80afa 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -393,6 +393,7 @@ struct dpaa2_eth_priv { u16 tx_data_offset; struct fsl_mc_device *dpbp_dev; + u16 rx_buf_size; u16 bpid; struct iommu_domain *iommu_domain; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index bd13ee48d623..049afd1d6252 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -635,7 +635,7 @@ static int num_rules(struct dpaa2_eth_priv *priv) static int update_cls_rule(struct net_device *net_dev, struct ethtool_rx_flow_spec *new_fs, - int location) + unsigned int location) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct dpaa2_eth_cls_rule *rule; diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 3892a2062404..2fff43509098 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -64,7 +64,7 @@ config HNS_MDIO the PHY config HNS - tristate "Hisilicon Network Subsystem Support (Framework)" + tristate ---help--- This selects the framework support for Hisilicon Network Subsystem. It is needed by any driver which provides HNS acceleration engine or make diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index eef855f11a01..c33eb1147055 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -45,6 +45,8 @@ #define MGMT_MSG_TIMEOUT 5000 +#define SET_FUNC_PORT_MGMT_TIMEOUT 25000 + #define mgmt_to_pfhwdev(pf_mgmt) \ container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) @@ -238,12 +240,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, u8 *buf_in, u16 in_size, u8 *buf_out, u16 *out_size, enum mgmt_direction_type direction, - u16 resp_msg_id) + u16 resp_msg_id, u32 timeout) { struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_recv_msg *recv_msg; struct completion *recv_done; + unsigned long timeo; u16 msg_id; int err; @@ -267,8 +270,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, goto unlock_sync_msg; } - if (!wait_for_completion_timeout(recv_done, - msecs_to_jiffies(MGMT_MSG_TIMEOUT))) { + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + + if (!wait_for_completion_timeout(recv_done, timeo)) { dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); err = -ETIMEDOUT; goto unlock_sync_msg; @@ -342,6 +346,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, { struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct pci_dev *pdev = hwif->pdev; + u32 timeout = 0; if (sync != HINIC_MGMT_MSG_SYNC) { dev_err(&pdev->dev, "Invalid MGMT msg type\n"); @@ -353,13 +358,16 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, return -EINVAL; } + if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE) + timeout = SET_FUNC_PORT_MGMT_TIMEOUT; + if (HINIC_IS_VF(hwif)) return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in, in_size, buf_out, out_size, 0); else return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, buf_out, out_size, MGMT_DIRECT_SEND, - MSG_NOT_RESP); + MSG_NOT_RESP, timeout); } /** diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index e3ff119fe341..c8ab129a7ae8 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -488,7 +488,6 @@ int hinic_close(struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); unsigned int flags; - int err; down(&nic_dev->mgmt_lock); @@ -505,20 +504,9 @@ int hinic_close(struct net_device *netdev) if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0); - err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to set func port state\n"); - nic_dev->flags |= (flags & HINIC_INTF_UP); - return err; - } + hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); - err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); - nic_dev->flags |= (flags & HINIC_INTF_UP); - return err; - } + hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); if (nic_dev->flags & HINIC_RSS_ENABLE) { hinic_rss_deinit(nic_dev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 187c633a7af5..f4227517dc8e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -497,13 +497,17 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, GFP_KERNEL); - if (!hw->irq_name) + if (!hw->irq_name) { + err = -ENOMEM; goto err_free_netdev; + } hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, sizeof(cpumask_var_t), GFP_KERNEL); - if (!hw->affinity_mask) + if (!hw->affinity_mask) { + err = -ENOMEM; goto err_free_netdev; + } err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); if (err < 0) { diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index fccc4805247f..2c0dcd7acf3f 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -1062,7 +1062,7 @@ static int encx24j600_spi_probe(struct spi_device *spi) if (unlikely(ret)) { netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n", ret); - goto out_free; + goto out_stop; } eidled = encx24j600_read_reg(priv, EIDLED); @@ -1080,6 +1080,8 @@ static int encx24j600_spi_probe(struct spi_device *spi) out_unregister: unregister_netdev(priv->ndev); +out_stop: + kthread_stop(priv->kworker_task); out_free: free_netdev(ndev); @@ -1092,6 +1094,7 @@ static int encx24j600_spi_remove(struct spi_device *spi) struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev); unregister_netdev(priv->ndev); + kthread_stop(priv->kworker_task); free_netdev(priv->ndev); diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 354efffac0f9..bdbf0726145e 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -333,8 +333,10 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) goto err_free_alink; alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL); - if (!alink->prio_map) + if (!alink->prio_map) { + err = -ENOMEM; goto err_free_alink; + } /* This is a multi-host app, make sure MAC/PHY is up, but don't * make the MAC/PHY state follow the state of any of the ports. diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 80b4d8332109..7321a92f8395 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2175,6 +2175,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) dev_info(ionic->dev, "FW Up: restarting LIFs\n"); ionic_init_devinfo(ionic); + ionic_port_init(ionic); err = ionic_qcqs_alloc(lif); if (err) goto err_out; @@ -2406,7 +2407,17 @@ static int ionic_station_set(struct ionic_lif *lif) if (is_zero_ether_addr(ctx.comp.lif_getattr.mac)) return 0; - if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) { + if (!is_zero_ether_addr(netdev->dev_addr)) { + /* If the netdev mac is non-zero and doesn't match the default + * device address, it was set by something earlier and we're + * likely here again after a fw-upgrade reset. We need to be + * sure the netdev mac is in our filter list. + */ + if (!ether_addr_equal(ctx.comp.lif_getattr.mac, + netdev->dev_addr)) + ionic_lif_addr(lif, netdev->dev_addr, true); + } else { + /* Update the netdev mac with the device's mac */ memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); addr.sa_family = AF_INET; err = eth_prepare_mac_addr_change(netdev, &addr); @@ -2416,12 +2427,6 @@ static int ionic_station_set(struct ionic_lif *lif) return 0; } - if (!is_zero_ether_addr(netdev->dev_addr)) { - netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n", - netdev->dev_addr); - ionic_lif_addr(lif, netdev->dev_addr, false); - } - eth_commit_mac_addr_change(netdev, &addr); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 92110abcff96..df5b9bcc3aba 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -512,16 +512,16 @@ int ionic_port_init(struct ionic *ionic) size_t sz; int err; - if (idev->port_info) - return 0; - - idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE); - idev->port_info = dma_alloc_coherent(ionic->dev, idev->port_info_sz, - &idev->port_info_pa, - GFP_KERNEL); if (!idev->port_info) { - dev_err(ionic->dev, "Failed to allocate port info, aborting\n"); - return -ENOMEM; + idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE); + idev->port_info = dma_alloc_coherent(ionic->dev, + idev->port_info_sz, + &idev->port_info_pa, + GFP_KERNEL); + if (!idev->port_info) { + dev_err(ionic->dev, "Failed to allocate port info\n"); + return -ENOMEM; + } } sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data)); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index d926583b407f..97a7e27fff16 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2048,6 +2048,8 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, + /* RTL8401, reportedly works if treated as RTL8101e */ + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 }, { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index e0a5fe83d8e0..bfc4a92f1d92 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -75,6 +75,11 @@ struct ethqos_emac_por { unsigned int value; }; +struct ethqos_emac_driver_data { + const struct ethqos_emac_por *por; + unsigned int num_por; +}; + struct qcom_ethqos { struct platform_device *pdev; void __iomem *rgmii_base; @@ -171,6 +176,11 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = { { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 }, }; +static const struct ethqos_emac_driver_data emac_v2_3_0_data = { + .por = emac_v2_3_0_por, + .num_por = ARRAY_SIZE(emac_v2_3_0_por), +}; + static int ethqos_dll_configure(struct qcom_ethqos *ethqos) { unsigned int val; @@ -442,6 +452,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; + const struct ethqos_emac_driver_data *data; struct qcom_ethqos *ethqos; struct resource *res; int ret; @@ -471,7 +482,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev) goto err_mem; } - ethqos->por = of_device_get_match_data(&pdev->dev); + data = of_device_get_match_data(&pdev->dev); + ethqos->por = data->por; + ethqos->num_por = data->num_por; ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii"); if (IS_ERR(ethqos->rgmii_clk)) { @@ -526,7 +539,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev) } static const struct of_device_id qcom_ethqos_match[] = { - { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por}, + { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data}, { } }; MODULE_DEVICE_TABLE(of, qcom_ethqos_match); diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 4d4852f00ff7..182d10f171f6 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -49,6 +49,7 @@ config TI_CPSW_PHY_SEL config TI_CPSW tristate "TI CPSW Switch Support" depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST + depends on TI_CPTS || !TI_CPTS select TI_DAVINCI_MDIO select MFD_SYSCON select PAGE_POOL @@ -64,6 +65,7 @@ config TI_CPSW_SWITCHDEV tristate "TI CPSW Switch Support with switchdev" depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST depends on NET_SWITCHDEV + depends on TI_CPTS || !TI_CPTS select PAGE_POOL select TI_DAVINCI_MDIO select MFD_SYSCON @@ -77,23 +79,16 @@ config TI_CPSW_SWITCHDEV will be called cpsw_new. config TI_CPTS - bool "TI Common Platform Time Sync (CPTS) Support" - depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST + tristate "TI Common Platform Time Sync (CPTS) Support" + depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST depends on COMMON_CLK - depends on POSIX_TIMERS + depends on PTP_1588_CLOCK ---help--- This driver supports the Common Platform Time Sync unit of the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem. The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the driver offers a PTP Hardware Clock. -config TI_CPTS_MOD - tristate - depends on TI_CPTS - depends on PTP_1588_CLOCK - default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y - default m - config TI_K3_AM65_CPSW_NUSS tristate "TI K3 AM654x/J721E CPSW Ethernet driver" depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER @@ -137,6 +132,7 @@ config TI_KEYSTONE_NETCP select TI_DAVINCI_MDIO depends on OF depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS + depends on TI_CPTS || !TI_CPTS ---help--- This driver supports TI's Keystone NETCP Core. diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index be95512d80b5..6e779292545d 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o -obj-$(CONFIG_TI_CPTS_MOD) += cpts.o +obj-$(CONFIG_TI_CPTS) += cpts.o obj-$(CONFIG_TI_CPSW) += ti_cpsw.o ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 206688154fdf..60dcaf2a04a9 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -146,7 +146,8 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev) { struct bpqdev *bpq; - list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) { + list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list, + lockdep_rtnl_is_held()) { if (bpq->ethdev == dev) return bpq->axdev; } diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 2fd21d75367d..bdbfeed359db 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -399,13 +399,14 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size, /* assert(which < trans->tre_count); */ /* Set the page information for the buffer. We also need to fill in - * the DMA address for the buffer (something dma_map_sg() normally - * does). + * the DMA address and length for the buffer (something dma_map_sg() + * normally does). */ sg = &trans->sgl[which]; sg_set_buf(sg, buf, size); sg_dma_address(sg) = addr; + sg_dma_len(sg) = sg->length; info = &trans->info[which]; info->opcode = opcode; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index 394f8a6df086..c9ab865e7290 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -569,23 +569,15 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size) void ipa_cmd_tag_process_add(struct gsi_trans *trans) { - ipa_cmd_register_write_add(trans, 0, 0, 0, true); -#if 1 - /* Reference these functions to avoid a compile error */ - (void)ipa_cmd_ip_packet_init_add; - (void)ipa_cmd_ip_tag_status_add; - (void) ipa_cmd_transfer_add; -#else struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); - struct gsi_endpoint *endpoint; + struct ipa_endpoint *endpoint; endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; - ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id); + ipa_cmd_register_write_add(trans, 0, 0, 0, true); + ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id); ipa_cmd_ip_tag_status_add(trans, 0xcba987654321); - ipa_cmd_transfer_add(trans, 4); -#endif } /* Returns the number of commands required for the tag process */ diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index 4d33aa7ebfbb..a5f7a79a1923 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -53,7 +53,7 @@ * @clock_on: Whether IPA clock is on * @notified: Whether modem has been notified of clock state * @disabled: Whether setup ready interrupt handling is disabled - * @mutex mutex: Motex protecting ready interrupt/shutdown interlock + * @mutex: Mutex protecting ready-interrupt/shutdown interlock * @panic_notifier: Panic notifier structure */ struct ipa_smp2p { diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 9bdc924eea83..d4bbf79dab6c 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1240,9 +1240,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) /* Restart autonegotiation so the new modes get sent to the * link partner. */ - ret = phy_restart_aneg(phydev); - if (ret < 0) - return ret; + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = phy_restart_aneg(phydev); + if (ret < 0) + return ret; + } } return 0; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index d760a36db28c..beedaad08255 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -490,6 +490,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, if (!skb) goto out; + if (skb->pkt_type != PACKET_HOST) + goto abort; + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto abort; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 417e42c9fd03..bb8c34d746ab 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2659,7 +2659,7 @@ static struct hso_device *hso_create_bulk_serial_device( if (! (serial->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) { - dev_err(&interface->dev, "Failed to find BULK IN ep\n"); + dev_err(&interface->dev, "Failed to find BULK OUT ep\n"); goto exit2; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 9e1b5d748586..b6951aa76295 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1252,9 +1252,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, break; } while (rq->vq->num_free); if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { - u64_stats_update_begin(&rq->stats.syncp); + unsigned long flags; + + flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); rq->stats.kicks++; - u64_stats_update_end(&rq->stats.syncp); + u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); } return !oom; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f2adea96b04c..f3c037f5a9ba 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1110,7 +1110,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, * Don't treat an error as fatal, as we potentially already * have a NGUID or EUI-64. */ - if (status > 0) + if (status > 0 && !(status & NVME_SC_DNR)) status = 0; goto free_data; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 4e79e412b276..e13c370de830 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -973,9 +973,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) { - if (++nvmeq->cq_head == nvmeq->q_depth) { + u16 tmp = nvmeq->cq_head + 1; + + if (tmp == nvmeq->q_depth) { nvmeq->cq_head = 0; nvmeq->cq_phase ^= 1; + } else { + nvmeq->cq_head = tmp; } } diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c index 3708d43b7508..393011a05b48 100644 --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c @@ -816,6 +816,13 @@ static const struct of_device_id qusb2_phy_of_match_table[] = { .compatible = "qcom,msm8998-qusb2-phy", .data = &msm8998_phy_cfg, }, { + /* + * Deprecated. Only here to support legacy device + * trees that didn't include "qcom,qusb2-v2-phy" + */ + .compatible = "qcom,sdm845-qusb2-phy", + .data = &qusb2_v2_phy_cfg, + }, { .compatible = "qcom,qusb2-v2-phy", .data = &qusb2_v2_phy_cfg, }, diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c b/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c index d998e65c89c8..a52a9bf13b75 100644 --- a/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c +++ b/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c @@ -160,18 +160,11 @@ static int qcom_snps_hsphy_power_on(struct phy *phy) ret = regulator_bulk_enable(VREG_NUM, priv->vregs); if (ret) return ret; - ret = clk_bulk_prepare_enable(priv->num_clks, priv->clks); - if (ret) - goto err_disable_regulator; + qcom_snps_hsphy_disable_hv_interrupts(priv); qcom_snps_hsphy_exit_retention(priv); return 0; - -err_disable_regulator: - regulator_bulk_disable(VREG_NUM, priv->vregs); - - return ret; } static int qcom_snps_hsphy_power_off(struct phy *phy) @@ -180,7 +173,6 @@ static int qcom_snps_hsphy_power_off(struct phy *phy) qcom_snps_hsphy_enter_retention(priv); qcom_snps_hsphy_enable_hv_interrupts(priv); - clk_bulk_disable_unprepare(priv->num_clks, priv->clks); regulator_bulk_disable(VREG_NUM, priv->vregs); return 0; @@ -266,21 +258,39 @@ static int qcom_snps_hsphy_init(struct phy *phy) struct hsphy_priv *priv = phy_get_drvdata(phy); int ret; - ret = qcom_snps_hsphy_reset(priv); + ret = clk_bulk_prepare_enable(priv->num_clks, priv->clks); if (ret) return ret; + ret = qcom_snps_hsphy_reset(priv); + if (ret) + goto disable_clocks; + qcom_snps_hsphy_init_sequence(priv); ret = qcom_snps_hsphy_por_reset(priv); if (ret) - return ret; + goto disable_clocks; + + return 0; + +disable_clocks: + clk_bulk_disable_unprepare(priv->num_clks, priv->clks); + return ret; +} + +static int qcom_snps_hsphy_exit(struct phy *phy) +{ + struct hsphy_priv *priv = phy_get_drvdata(phy); + + clk_bulk_disable_unprepare(priv->num_clks, priv->clks); return 0; } static const struct phy_ops qcom_snps_hsphy_ops = { .init = qcom_snps_hsphy_init, + .exit = qcom_snps_hsphy_exit, .power_on = qcom_snps_hsphy_power_on, .power_off = qcom_snps_hsphy_power_off, .set_mode = qcom_snps_hsphy_set_mode, diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index c340505150b6..7486f6e4e613 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5754,10 +5754,6 @@ static DECLARE_DELAYED_WORK(regulator_init_complete_work, static int __init regulator_init_complete(void) { - int delay = driver_deferred_probe_timeout; - - if (delay < 0) - delay = 0; /* * Since DT doesn't provide an idiomatic mechanism for * enabling full constraints and since it's much more natural @@ -5768,17 +5764,18 @@ static int __init regulator_init_complete(void) has_full_constraints = true; /* - * If driver_deferred_probe_timeout is set, we punt - * completion for that many seconds since systems like - * distros will load many drivers from userspace so consumers - * might not always be ready yet, this is particularly an - * issue with laptops where this might bounce the display off - * then on. Ideally we'd get a notification from userspace - * when this happens but we don't so just wait a bit and hope - * we waited long enough. It'd be better if we'd only do - * this on systems that need it. + * We punt completion for an arbitrary amount of time since + * systems like distros will load many drivers from userspace + * so consumers might not always be ready yet, this is + * particularly an issue with laptops where this might bounce + * the display off then on. Ideally we'd get a notification + * from userspace when this happens but we don't so just wait + * a bit and hope we waited long enough. It'd be better if + * we'd only do this on systems that need it, and a kernel + * command line option might be useful. */ - schedule_delayed_work(®ulator_init_complete_work, delay * HZ); + schedule_delayed_work(®ulator_init_complete_work, + msecs_to_jiffies(30000)); return 0; } diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index c75112ee7b97..c7fade836d83 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops, ISM_NR_DMBS); - if (!ism->smcd) + if (!ism->smcd) { + ret = -ENOMEM; goto err_resource; + } ism->smcd->priv = ism; ret = ism_dev_init(ism); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 7da9e060b270..635f6f9cffc4 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -3640,6 +3640,11 @@ static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt) struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; + if (!vhost->logged_in) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + return; + } + if (vhost->discovery_threads >= disc_threads) return; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 7f66a7783209..59f0f1030c54 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -2320,16 +2320,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); - unsigned long flags; srp_remove_host(hostdata->host); scsi_remove_host(hostdata->host); purge_requests(hostdata, DID_ERROR); - - spin_lock_irqsave(hostdata->host->host_lock, flags); release_event_pool(&hostdata->pool, hostdata); - spin_unlock_irqrestore(hostdata->host->host_lock, flags); ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events); diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 97cabd7e0014..33255968f03a 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -3031,11 +3031,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) msleep(1000); - qla_nvme_delete(vha); qla24xx_disable_vp(vha); qla2x00_wait_for_sess_deletion(vha); + qla_nvme_delete(vha); vha->flags.delete_progress = 1; qlt_remove_target(ha, vha); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 4ed90437e8c4..d6c991bd1bde 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3153,7 +3153,7 @@ qla24xx_abort_command(srb_t *sp) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, "Entered %s.\n", __func__); - if (vha->flags.qpairs_available && sp->qpair) + if (sp->qpair) req = sp->qpair->req; else return QLA_FUNCTION_FAILED; diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c index 8e0575fcb4c8..67325fbaf760 100644 --- a/drivers/staging/gasket/gasket_core.c +++ b/drivers/staging/gasket/gasket_core.c @@ -925,6 +925,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma, gasket_get_bar_index(gasket_dev, (vma->vm_pgoff << PAGE_SHIFT) + driver_desc->legacy_mmap_address_offset); + + if (bar_index < 0) + return DO_MAP_REGION_INVALID; + phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset; while (mapped_bytes < map_length) { /* diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO index 87a6dac4890d..ab6f39175d99 100644 --- a/drivers/staging/ks7010/TODO +++ b/drivers/staging/ks7010/TODO @@ -30,5 +30,4 @@ Now the TODOs: Please send any patches to: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -Wolfram Sang <wsa@the-dreams.de> Linux Driver Project Developer List <driverdev-devel@linuxdriverproject.org> diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 3d084cec136f..50c7534ba31e 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -182,6 +182,9 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) return ret; ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); + if (ret) + return ret; + if (val & ROUTER_CS_26_ONS) return -EOPNOTSUPP; diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index ed0aa5c0d9b7..5674da2b76f0 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -843,10 +843,8 @@ static int bcm_uart_probe(struct platform_device *pdev) if (IS_ERR(clk) && pdev->dev.of_node) clk = of_clk_get(pdev->dev.of_node, 0); - if (IS_ERR(clk)) { - clk_put(clk); + if (IS_ERR(clk)) return -ENODEV; - } port->iotype = UPIO_MEM; port->irq = res_irq->start; diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index ac137b6a1dc1..35e9e8faf8de 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1459,6 +1459,7 @@ static int cdns_uart_probe(struct platform_device *pdev) cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE cdns_uart_uart_driver.cons = &cdns_uart_console; + cdns_uart_console.index = id; #endif rc = uart_register_driver(&cdns_uart_uart_driver); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index e5ffed795e4c..48a8199f7845 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -365,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) return uniscr; } +static void vc_uniscr_free(struct uni_screen *uniscr) +{ + vfree(uniscr); +} + static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr) { - vfree(vc->vc_uni_screen); + vc_uniscr_free(vc->vc_uni_screen); vc->vc_uni_screen = new_uniscr; } @@ -1230,7 +1235,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, err = resize_screen(vc, new_cols, new_rows, user); if (err) { kfree(newscreen); - kfree(new_uniscr); + vc_uniscr_free(new_uniscr); return err; } diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c index af648ba6544d..46105457e1ca 100644 --- a/drivers/usb/chipidea/ci_hdrc_msm.c +++ b/drivers/usb/chipidea/ci_hdrc_msm.c @@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) hw_write_id_reg(ci, HS_PHY_GENCONFIG_2, HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0); - if (!IS_ERR(ci->platdata->vbus_extcon.edev)) { + if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) { hw_write_id_reg(ci, HS_PHY_GENCONFIG_2, HS_PHY_SESS_VLD_CTRL_EN, HS_PHY_SESS_VLD_CTRL_EN); diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6833c918abce..b9db9812d6c5 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -217,6 +217,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) { struct usb_memory *usbm = NULL; struct usb_dev_state *ps = file->private_data; + struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); size_t size = vma->vm_end - vma->vm_start; void *mem; unsigned long flags; @@ -250,9 +251,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); - if (remap_pfn_range(vma, vma->vm_start, - virt_to_phys(usbm->mem) >> PAGE_SHIFT, - size, vma->vm_page_prot) < 0) { + if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, size)) { dec_usb_memory_use_count(usbm, &usbm->vma_use_count); return -EAGAIN; } diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index a48678a0c83a..6197938dcc2d 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1144,11 +1144,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, if (usb_endpoint_out(epaddr)) { ep = dev->ep_out[epnum]; - if (reset_hardware) + if (reset_hardware && epnum != 0) dev->ep_out[epnum] = NULL; } else { ep = dev->ep_in[epnum]; - if (reset_hardware) + if (reset_hardware && epnum != 0) dev->ep_in[epnum] = NULL; } if (ep) { diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index ffd984142171..d63072fee099 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p, send it directly to the tty port */ if (garmin_data_p->flags & FLAGS_QUEUING) { pkt_add(garmin_data_p, data, data_length); - } else if (bulk_data || - getLayerId(data) == GARMIN_LAYERID_APPL) { + } else if (bulk_data || (data_length >= sizeof(u32) && + getLayerId(data) == GARMIN_LAYERID_APPL)) { spin_lock_irqsave(&garmin_data_p->lock, flags); garmin_data_p->flags |= APP_RESP_SEEN; diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 613f91add03d..ce0401d3137f 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 1b23741036ee..37157ed9a881 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -28,6 +28,13 @@ * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org> */ +/* Reported-by: Julian Groß <julian.g@posteo.de> */ +UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, + "LaCie", + "2Big Quadra USB3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index f5c5e0aef66f..67c5139cfa0d 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -157,6 +157,10 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) req.mode_data |= (state->mode - TYPEC_STATE_MODAL) << PMC_USB_ALTMODE_DP_MODE_SHIFT; + if (data->status & DP_STATUS_HPD_STATE) + req.mode_data |= PMC_USB_DP_HPD_LVL << + PMC_USB_ALTMODE_DP_MODE_SHIFT; + return pmc_usb_command(port, (void *)&req, sizeof(req)); } @@ -298,11 +302,11 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index, struct typec_mux_desc mux_desc = { }; int ret; - ret = fwnode_property_read_u8(fwnode, "usb2-port", &port->usb2_port); + ret = fwnode_property_read_u8(fwnode, "usb2-port-number", &port->usb2_port); if (ret) return ret; - ret = fwnode_property_read_u8(fwnode, "usb3-port", &port->usb3_port); + ret = fwnode_property_read_u8(fwnode, "usb3-port-number", &port->usb3_port); if (ret) return ret; |