diff options
Diffstat (limited to 'drivers')
800 files changed, 19889 insertions, 6462 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index ecb743bf05a5..536562c626a2 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -24,7 +24,7 @@ acpi-y += nvs.o # Power management related files acpi-y += wakeup.o acpi-y += sleep.o -acpi-$(CONFIG_PM) += device_pm.o +acpi-y += device_pm.o acpi-$(CONFIG_ACPI_SLEEP) += proc.o @@ -38,7 +38,6 @@ acpi-y += processor_core.o acpi-y += ec.o acpi-$(CONFIG_ACPI_DOCK) += dock.o acpi-y += pci_root.o pci_link.o pci_irq.o -acpi-y += csrt.o acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o acpi-y += acpi_platform.o acpi-y += power.o diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 00d2efd674df..4f4e741d34b2 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -28,6 +28,8 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/types.h> +#include <linux/dmi.h> +#include <linux/delay.h> #ifdef CONFIG_ACPI_PROCFS_POWER #include <linux/proc_fs.h> #include <linux/seq_file.h> @@ -74,6 +76,8 @@ static int acpi_ac_resume(struct device *dev); #endif static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); +static int ac_sleep_before_get_state_ms; + static struct acpi_driver acpi_ac_driver = { .name = "ac", .class = ACPI_AC_CLASS, @@ -252,6 +256,16 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) case ACPI_AC_NOTIFY_STATUS: case ACPI_NOTIFY_BUS_CHECK: case ACPI_NOTIFY_DEVICE_CHECK: + /* + * A buggy BIOS may notify AC first and then sleep for + * a specific time before doing actual operations in the + * EC event handler (_Qxx). This will cause the AC state + * reported by the ACPI event to be incorrect, so wait for a + * specific time for the EC event handler to make progress. + */ + if (ac_sleep_before_get_state_ms > 0) + msleep(ac_sleep_before_get_state_ms); + acpi_ac_get_state(ac); acpi_bus_generate_proc_event(device, event, (u32) ac->state); acpi_bus_generate_netlink_event(device->pnp.device_class, @@ -264,6 +278,24 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) return; } +static int thinkpad_e530_quirk(const struct dmi_system_id *d) +{ + ac_sleep_before_get_state_ms = 1000; + return 0; +} + +static struct dmi_system_id ac_dmi_table[] = { + { + .callback = thinkpad_e530_quirk, + .ident = "thinkpad e530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"), + }, + }, + {}, +}; + static int acpi_ac_add(struct acpi_device *device) { int result = 0; @@ -312,6 +344,7 @@ static int acpi_ac_add(struct acpi_device *device) kfree(ac); } + dmi_check_system(ac_dmi_table); return result; } diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index b1c95422ce74..652fd5ce303c 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -35,11 +35,16 @@ ACPI_MODULE_NAME("acpi_lpss"); struct lpss_device_desc { bool clk_required; - const char *clk_parent; + const char *clkdev_name; bool ltr_required; unsigned int prv_offset; }; +static struct lpss_device_desc lpss_dma_desc = { + .clk_required = true, + .clkdev_name = "hclk", +}; + struct lpss_private_data { void __iomem *mmio_base; resource_size_t mmio_size; @@ -49,7 +54,6 @@ struct lpss_private_data { static struct lpss_device_desc lpt_dev_desc = { .clk_required = true, - .clk_parent = "lpss_clk", .prv_offset = 0x800, .ltr_required = true, }; @@ -60,6 +64,9 @@ static struct lpss_device_desc lpt_sdio_dev_desc = { }; static const struct acpi_device_id acpi_lpss_device_ids[] = { + /* Generic LPSS devices */ + { "INTL9C60", (unsigned long)&lpss_dma_desc }, + /* Lynxpoint LPSS devices */ { "INT33C0", (unsigned long)&lpt_dev_desc }, { "INT33C1", (unsigned long)&lpt_dev_desc }, @@ -91,16 +98,27 @@ static int register_device_clock(struct acpi_device *adev, struct lpss_private_data *pdata) { const struct lpss_device_desc *dev_desc = pdata->dev_desc; + struct lpss_clk_data *clk_data; if (!lpss_clk_dev) lpt_register_clock_device(); - if (!dev_desc->clk_parent || !pdata->mmio_base + clk_data = platform_get_drvdata(lpss_clk_dev); + if (!clk_data) + return -ENODEV; + + if (dev_desc->clkdev_name) { + clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name, + dev_name(&adev->dev)); + return 0; + } + + if (!pdata->mmio_base || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) return -ENODATA; pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev), - dev_desc->clk_parent, 0, + clk_data->name, 0, pdata->mmio_base + dev_desc->prv_offset, 0, 0, NULL); if (IS_ERR(pdata->clk)) diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c index fefc2ca7cc3e..33dc6a004802 100644 --- a/drivers/acpi/apei/cper.c +++ b/drivers/acpi/apei/cper.c @@ -250,10 +250,6 @@ static const char *cper_pcie_port_type_strs[] = { static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, const struct acpi_hest_generic_data *gdata) { -#ifdef CONFIG_ACPI_APEI_PCIEAER - struct pci_dev *dev; -#endif - if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ? @@ -285,20 +281,6 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, printk( "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", pfx, pcie->bridge.secondary_status, pcie->bridge.control); -#ifdef CONFIG_ACPI_APEI_PCIEAER - dev = pci_get_domain_bus_and_slot(pcie->device_id.segment, - pcie->device_id.bus, pcie->device_id.function); - if (!dev) { - pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n", - pcie->device_id.segment, pcie->device_id.bus, - pcie->device_id.slot, pcie->device_id.function); - return; - } - if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) - cper_print_aer(pfx, dev, gdata->error_severity, - (struct aer_capability_regs *) pcie->aer_info); - pci_dev_put(dev); -#endif } static const char *apei_estatus_section_flag_strs[] = { diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index d668a8ae602b..fcd7d91cec34 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -454,7 +454,9 @@ static void ghes_do_proc(struct ghes *ghes, aer_severity = cper_severity_to_aer(sev); aer_recover_queue(pcie_err->device_id.segment, pcie_err->device_id.bus, - devfn, aer_severity); + devfn, aer_severity, + (struct aer_capability_regs *) + pcie_err->aer_info); } } @@ -917,13 +919,14 @@ static int ghes_probe(struct platform_device *ghes_dev) break; case ACPI_HEST_NOTIFY_EXTERNAL: /* External interrupt vector is GSI */ - if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) { + rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq); + if (rc) { pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", generic->header.source_id); goto err_edac_unreg; } - if (request_irq(ghes->irq, ghes_irq_func, - 0, "GHES IRQ", ghes)) { + rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes); + if (rc) { pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", generic->header.source_id); goto err_edac_unreg; diff --git a/drivers/acpi/csrt.c b/drivers/acpi/csrt.c deleted file mode 100644 index 5c15a91faf0b..000000000000 --- a/drivers/acpi/csrt.c +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Support for Core System Resources Table (CSRT) - * - * Copyright (C) 2013, Intel Corporation - * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> - * Andy Shevchenko <andriy.shevchenko@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#define pr_fmt(fmt) "ACPI: CSRT: " fmt - -#include <linux/acpi.h> -#include <linux/device.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/sizes.h> - -ACPI_MODULE_NAME("CSRT"); - -static int __init acpi_csrt_parse_shared_info(struct platform_device *pdev, - const struct acpi_csrt_group *grp) -{ - const struct acpi_csrt_shared_info *si; - struct resource res[3]; - size_t nres; - int ret; - - memset(res, 0, sizeof(res)); - nres = 0; - - si = (const struct acpi_csrt_shared_info *)&grp[1]; - /* - * The peripherals that are listed on CSRT typically support only - * 32-bit addresses so we only use the low part of MMIO base for - * now. - */ - if (!si->mmio_base_high && si->mmio_base_low) { - /* - * There is no size of the memory resource in shared_info - * so we assume that it is 4k here. - */ - res[nres].start = si->mmio_base_low; - res[nres].end = res[0].start + SZ_4K - 1; - res[nres++].flags = IORESOURCE_MEM; - } - - if (si->gsi_interrupt) { - int irq = acpi_register_gsi(NULL, si->gsi_interrupt, - si->interrupt_mode, - si->interrupt_polarity); - res[nres].start = irq; - res[nres].end = irq; - res[nres++].flags = IORESOURCE_IRQ; - } - - if (si->base_request_line || si->num_handshake_signals) { - /* - * We pass the driver a DMA resource describing the range - * of request lines the device supports. - */ - res[nres].start = si->base_request_line; - res[nres].end = res[nres].start + si->num_handshake_signals - 1; - res[nres++].flags = IORESOURCE_DMA; - } - - ret = platform_device_add_resources(pdev, res, nres); - if (ret) { - if (si->gsi_interrupt) - acpi_unregister_gsi(si->gsi_interrupt); - return ret; - } - - return 0; -} - -static int __init -acpi_csrt_parse_resource_group(const struct acpi_csrt_group *grp) -{ - struct platform_device *pdev; - char vendor[5], name[16]; - int ret, i; - - vendor[0] = grp->vendor_id; - vendor[1] = grp->vendor_id >> 8; - vendor[2] = grp->vendor_id >> 16; - vendor[3] = grp->vendor_id >> 24; - vendor[4] = '\0'; - - if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info)) - return -ENODEV; - - snprintf(name, sizeof(name), "%s%04X", vendor, grp->device_id); - pdev = platform_device_alloc(name, PLATFORM_DEVID_AUTO); - if (!pdev) - return -ENOMEM; - - /* Add resources based on the shared info */ - ret = acpi_csrt_parse_shared_info(pdev, grp); - if (ret) - goto fail; - - ret = platform_device_add(pdev); - if (ret) - goto fail; - - for (i = 0; i < pdev->num_resources; i++) - dev_dbg(&pdev->dev, "%pR\n", &pdev->resource[i]); - - return 0; - -fail: - platform_device_put(pdev); - return ret; -} - -/* - * CSRT or Core System Resources Table is a proprietary ACPI table - * introduced by Microsoft. This table can contain devices that are not in - * the system DSDT table. In particular DMA controllers might be described - * here. - * - * We present these devices as normal platform devices that don't have ACPI - * IDs or handle. The platform device name will be something like - * <VENDOR><DEVID>.<n>.auto for example: INTL9C06.0.auto. - */ -void __init acpi_csrt_init(void) -{ - struct acpi_csrt_group *grp, *end; - struct acpi_table_csrt *csrt; - acpi_status status; - int ret; - - status = acpi_get_table(ACPI_SIG_CSRT, 0, - (struct acpi_table_header **)&csrt); - if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) - pr_warn("failed to get the CSRT table\n"); - return; - } - - pr_debug("parsing CSRT table for devices\n"); - - grp = (struct acpi_csrt_group *)(csrt + 1); - end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length); - - while (grp < end) { - ret = acpi_csrt_parse_resource_group(grp); - if (ret) { - pr_warn("error in parsing resource group: %d\n", ret); - return; - } - - grp = (struct acpi_csrt_group *)((void *)grp + grp->length); - } -} diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 96de787e6104..318fa32a141e 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -37,68 +37,6 @@ #define _COMPONENT ACPI_POWER_COMPONENT ACPI_MODULE_NAME("device_pm"); -static DEFINE_MUTEX(acpi_pm_notifier_lock); - -/** - * acpi_add_pm_notifier - Register PM notifier for given ACPI device. - * @adev: ACPI device to add the notifier for. - * @context: Context information to pass to the notifier routine. - * - * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of - * PM wakeup events. For example, wakeup events may be generated for bridges - * if one of the devices below the bridge is signaling wakeup, even if the - * bridge itself doesn't have a wakeup GPE associated with it. - */ -acpi_status acpi_add_pm_notifier(struct acpi_device *adev, - acpi_notify_handler handler, void *context) -{ - acpi_status status = AE_ALREADY_EXISTS; - - mutex_lock(&acpi_pm_notifier_lock); - - if (adev->wakeup.flags.notifier_present) - goto out; - - status = acpi_install_notify_handler(adev->handle, - ACPI_SYSTEM_NOTIFY, - handler, context); - if (ACPI_FAILURE(status)) - goto out; - - adev->wakeup.flags.notifier_present = true; - - out: - mutex_unlock(&acpi_pm_notifier_lock); - return status; -} - -/** - * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. - * @adev: ACPI device to remove the notifier from. - */ -acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, - acpi_notify_handler handler) -{ - acpi_status status = AE_BAD_PARAMETER; - - mutex_lock(&acpi_pm_notifier_lock); - - if (!adev->wakeup.flags.notifier_present) - goto out; - - status = acpi_remove_notify_handler(adev->handle, - ACPI_SYSTEM_NOTIFY, - handler); - if (ACPI_FAILURE(status)) - goto out; - - adev->wakeup.flags.notifier_present = false; - - out: - mutex_unlock(&acpi_pm_notifier_lock); - return status; -} - /** * acpi_power_state_string - String representation of ACPI device power state. * @state: ACPI device power state to return the string representation of. @@ -340,11 +278,13 @@ int acpi_bus_init_power(struct acpi_device *device) if (result) return result; } else if (state == ACPI_STATE_UNKNOWN) { - /* No power resources and missing _PSC? Try to force D0. */ + /* + * No power resources and missing _PSC? Cross fingers and make + * it D0 in hope that this is what the BIOS put the device into. + * [We tried to force D0 here by executing _PS0, but that broke + * Toshiba P870-303 in a nasty way.] + */ state = ACPI_STATE_D0; - result = acpi_dev_pm_explicit_set(device, state); - if (result) - return result; } device->power.state = state; return 0; @@ -385,6 +325,69 @@ bool acpi_bus_power_manageable(acpi_handle handle) } EXPORT_SYMBOL(acpi_bus_power_manageable); +#ifdef CONFIG_PM +static DEFINE_MUTEX(acpi_pm_notifier_lock); + +/** + * acpi_add_pm_notifier - Register PM notifier for given ACPI device. + * @adev: ACPI device to add the notifier for. + * @context: Context information to pass to the notifier routine. + * + * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of + * PM wakeup events. For example, wakeup events may be generated for bridges + * if one of the devices below the bridge is signaling wakeup, even if the + * bridge itself doesn't have a wakeup GPE associated with it. + */ +acpi_status acpi_add_pm_notifier(struct acpi_device *adev, + acpi_notify_handler handler, void *context) +{ + acpi_status status = AE_ALREADY_EXISTS; + + mutex_lock(&acpi_pm_notifier_lock); + + if (adev->wakeup.flags.notifier_present) + goto out; + + status = acpi_install_notify_handler(adev->handle, + ACPI_SYSTEM_NOTIFY, + handler, context); + if (ACPI_FAILURE(status)) + goto out; + + adev->wakeup.flags.notifier_present = true; + + out: + mutex_unlock(&acpi_pm_notifier_lock); + return status; +} + +/** + * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. + * @adev: ACPI device to remove the notifier from. + */ +acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, + acpi_notify_handler handler) +{ + acpi_status status = AE_BAD_PARAMETER; + + mutex_lock(&acpi_pm_notifier_lock); + + if (!adev->wakeup.flags.notifier_present) + goto out; + + status = acpi_remove_notify_handler(adev->handle, + ACPI_SYSTEM_NOTIFY, + handler); + if (ACPI_FAILURE(status)) + goto out; + + adev->wakeup.flags.notifier_present = false; + + out: + mutex_unlock(&acpi_pm_notifier_lock); + return status; +} + bool acpi_bus_can_wakeup(acpi_handle handle) { struct acpi_device *device; @@ -1023,3 +1026,4 @@ void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev) mutex_unlock(&adev->physical_node_lock); } EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent); +#endif /* CONFIG_PM */ diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d45b2871d33b..edc00818c803 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) static int ec_poll(struct acpi_ec *ec) { unsigned long flags; - int repeat = 2; /* number of command restarts */ + int repeat = 5; /* number of command restarts */ while (repeat--) { unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); @@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec) } advance_transaction(ec, acpi_ec_read_status(ec)); } while (time_before(jiffies, delay)); - if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) - break; pr_debug(PREFIX "controller reset, restart transaction\n"); spin_lock_irqsave(&ec->lock, flags); start_transaction(ec); diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 6f1afd9118c8..297cbf456f86 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -35,7 +35,6 @@ void acpi_pci_link_init(void); void acpi_pci_root_hp_init(void); void acpi_platform_init(void); int acpi_sysfs_init(void); -void acpi_csrt_init(void); #ifdef CONFIG_ACPI_CONTAINER void acpi_container_init(void); #else diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 1dd6f6c85874..e427dc516c76 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -641,7 +641,9 @@ static void _handle_hotplug_event_root(struct work_struct *work) /* bus enumerate */ printk(KERN_DEBUG "%s: Bus check notify on %s\n", __func__, (char *)buffer.pointer); - if (!root) + if (root) + acpiphp_check_host_bridge(handle); + else handle_root_bridge_insertion(handle); break; diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index bec717ffd25f..c266cdc11784 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -95,9 +95,6 @@ static const struct acpi_device_id processor_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, processor_device_ids); -static SIMPLE_DEV_PM_OPS(acpi_processor_pm, - acpi_processor_suspend, acpi_processor_resume); - static struct acpi_driver acpi_processor_driver = { .name = "processor", .class = ACPI_PROCESSOR_CLASS, @@ -107,7 +104,6 @@ static struct acpi_driver acpi_processor_driver = { .remove = acpi_processor_remove, .notify = acpi_processor_notify, }, - .drv.pm = &acpi_processor_pm, }; #define INSTALL_NOTIFY_HANDLER 1 @@ -934,6 +930,8 @@ static int __init acpi_processor_init(void) if (result < 0) return result; + acpi_processor_syscore_init(); + acpi_processor_install_hotplug_notify(); acpi_thermal_cpufreq_init(); @@ -956,6 +954,8 @@ static void __exit acpi_processor_exit(void) acpi_processor_uninstall_hotplug_notify(); + acpi_processor_syscore_exit(); + acpi_bus_unregister_driver(&acpi_processor_driver); return; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f0df2c9434d2..eb133c77aadb 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -34,6 +34,7 @@ #include <linux/sched.h> /* need_resched() */ #include <linux/clockchips.h> #include <linux/cpuidle.h> +#include <linux/syscore_ops.h> /* * Include the apic definitions for x86 to have the APIC timer related defines @@ -210,33 +211,41 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, #endif +#ifdef CONFIG_PM_SLEEP static u32 saved_bm_rld; -static void acpi_idle_bm_rld_save(void) +int acpi_processor_suspend(void) { acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); + return 0; } -static void acpi_idle_bm_rld_restore(void) + +void acpi_processor_resume(void) { u32 resumed_bm_rld; acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); + if (resumed_bm_rld == saved_bm_rld) + return; - if (resumed_bm_rld != saved_bm_rld) - acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); } -int acpi_processor_suspend(struct device *dev) +static struct syscore_ops acpi_processor_syscore_ops = { + .suspend = acpi_processor_suspend, + .resume = acpi_processor_resume, +}; + +void acpi_processor_syscore_init(void) { - acpi_idle_bm_rld_save(); - return 0; + register_syscore_ops(&acpi_processor_syscore_ops); } -int acpi_processor_resume(struct device *dev) +void acpi_processor_syscore_exit(void) { - acpi_idle_bm_rld_restore(); - return 0; + unregister_syscore_ops(&acpi_processor_syscore_ops); } +#endif /* CONFIG_PM_SLEEP */ #if defined(CONFIG_X86) static void tsc_check_state(int state) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index fe158fd4f1df..44225cb15f3a 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1785,7 +1785,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) acpi_set_pnp_ids(handle, &pnp, type); if (!pnp.type.hardware_id) - return; + goto out; /* * This relies on the fact that acpi_install_notify_handler() will not @@ -1800,6 +1800,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) } } +out: acpi_free_pnp_ids(&pnp); } @@ -2042,7 +2043,6 @@ int __init acpi_scan_init(void) acpi_pci_link_init(); acpi_platform_init(); acpi_lpss_init(); - acpi_csrt_init(); acpi_container_init(); acpi_memory_hotplug_init(); diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index c3932d0876e0..5d7075d25700 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -456,6 +456,30 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), }, }, + { + .callback = video_ignore_initial_backlight, + .ident = "HP Pavilion g6 Notebook PC", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"), + }, + }, + { + .callback = video_ignore_initial_backlight, + .ident = "HP 1000 Notebook PC", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), + }, + }, + { + .callback = video_ignore_initial_backlight, + .ident = "HP Pavilion m4", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"), + }, + }, {} }; diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 66f67626f02e..e6bd910bc6ed 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -161,6 +161,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"), }, }, + { + .callback = video_detect_force_vendor, + .ident = "Asus UL30A", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), + }, + }, { }, }; diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 4e94ba29cb8d..9d0cf019ce59 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -2,7 +2,7 @@ /* * acard-ahci.c - ACard AHCI SATA support * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 251e57d38942..2b50dfdf1cfc 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1,7 +1,7 @@ /* * ahci.c - AHCI SATA support * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -423,6 +423,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192), .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index b830e6c9fe49..10b14d45cfd2 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -1,7 +1,7 @@ /* * ahci.h - Common AHCI SATA definitions and declarations * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 2f48123d74c4..9a8a674e8fac 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -1,7 +1,7 @@ /* * ata_piix.c - Intel PATA/SATA controllers * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -151,6 +151,7 @@ enum piix_controller_ids { piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ ich8_sata_snb, ich8_2port_sata_snb, + ich8_2port_sata_byt, }; struct piix_map_db { @@ -334,6 +335,9 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Wellsburg) */ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (BayTrail) */ + { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, + { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, { } /* terminate list */ }; @@ -441,6 +445,7 @@ static const struct piix_map_db *piix_map_db_table[] = { [tolapai_sata] = &tolapai_map_db, [ich8_sata_snb] = &ich8_map_db, [ich8_2port_sata_snb] = &ich8_2port_map_db, + [ich8_2port_sata_byt] = &ich8_2port_map_db, }; static struct pci_bits piix_enable_bits[] = { @@ -1254,6 +1259,16 @@ static struct ata_port_info piix_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, + + [ich8_2port_sata_byt] = + { + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + }; #define AHCI_PCI_BAR 5 diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 34c82167b962..a70ff154f586 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1,7 +1,7 @@ /* * libahci.c - Common AHCI SATA low-level routines * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 63c743baf920..f2184276539d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1,7 +1,7 @@ /* * libata-core.c - helper library for ATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -1602,6 +1602,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, qc->tf = *tf; if (cdb) memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); + + /* some SATA bridges need us to indicate data xfer direction */ + if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && + dma_dir == DMA_FROM_DEVICE) + qc->tf.feature |= ATAPI_DMADIR; + qc->flags |= ATA_QCFLAG_RESULT_TF; qc->dma_dir = dma_dir; if (dma_dir != DMA_NONE) { diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index f9476fb3ac43..c69fcce505c0 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1,7 +1,7 @@ /* * libata-eh.c - libata error handling * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index dd310b27b24c..0101af541436 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1,7 +1,7 @@ /* * libata-scsi.c - helper library for ATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index d8af325a6bda..b603720b877d 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1,7 +1,7 @@ /* * libata-sff.c - helper library for PCI IDE BMDMA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index c1bfaf43d109..980b88e109fc 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -933,11 +933,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev) } mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem_res) { - err = -ENXIO; - goto err_rel_gpio; - } - ide_base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(ide_base)) { err = PTR_ERR(ide_base); diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index 505333340ad5..8ea6e6afd041 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c @@ -1,7 +1,7 @@ /* * pdc_adma.c - Pacific Digital Corporation ADMA * - * Maintained by: Mark Lord <mlord@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * * Copyright 2005 Mark Lord * diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index fb0dd87f8893..958ba2a420c3 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -1,7 +1,7 @@ /* * sata_promise.c - Promise SATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Mikael Pettersson <mikpe@it.uu.se> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 4799868bd733..249c8a289bfd 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -549,6 +549,7 @@ static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc) /* start host DMA transaction */ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG); + dmactl &= ~ATAPI_CONTROL1_STOP; dmactl |= ATAPI_CONTROL1_START; iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG); } @@ -618,17 +619,16 @@ static struct ata_port_operations sata_rcar_port_ops = { .bmdma_status = sata_rcar_bmdma_status, }; -static int sata_rcar_serr_interrupt(struct ata_port *ap) +static void sata_rcar_serr_interrupt(struct ata_port *ap) { struct sata_rcar_priv *priv = ap->host->private_data; struct ata_eh_info *ehi = &ap->link.eh_info; int freeze = 0; - int handled = 0; u32 serror; serror = ioread32(priv->base + SCRSERR_REG); if (!serror) - return 0; + return; DPRINTK("SError @host_intr: 0x%x\n", serror); @@ -641,7 +641,6 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap) ata_ehi_push_desc(ehi, "%s", "hotplug"); freeze = serror & SERR_COMM_WAKE ? 0 : 1; - handled = 1; } /* freeze or abort */ @@ -649,11 +648,9 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap) ata_port_freeze(ap); else ata_port_abort(ap); - - return handled; } -static int sata_rcar_ata_interrupt(struct ata_port *ap) +static void sata_rcar_ata_interrupt(struct ata_port *ap) { struct ata_queued_cmd *qc; int handled = 0; @@ -662,7 +659,9 @@ static int sata_rcar_ata_interrupt(struct ata_port *ap) if (qc) handled |= ata_bmdma_port_intr(ap, qc); - return handled; + /* be sure to clear ATA interrupt */ + if (!handled) + sata_rcar_check_status(ap); } static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance) @@ -677,20 +676,21 @@ static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance) spin_lock_irqsave(&host->lock, flags); sataintstat = ioread32(priv->base + SATAINTSTAT_REG); + sataintstat &= SATA_RCAR_INT_MASK; if (!sataintstat) goto done; /* ack */ - iowrite32(sataintstat & ~SATA_RCAR_INT_MASK, - priv->base + SATAINTSTAT_REG); + iowrite32(~sataintstat & 0x7ff, priv->base + SATAINTSTAT_REG); ap = host->ports[0]; if (sataintstat & SATAINTSTAT_ATA) - handled |= sata_rcar_ata_interrupt(ap); + sata_rcar_ata_interrupt(ap); if (sataintstat & SATAINTSTAT_SERR) - handled |= sata_rcar_serr_interrupt(ap); + sata_rcar_serr_interrupt(ap); + handled = 1; done: spin_unlock_irqrestore(&host->lock, flags); diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index a7b31672c4b7..0ae3ca4bf5c0 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -1,7 +1,7 @@ /* * sata_sil.c - Silicon Image SATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 7b7127a58f51..9947010afc0f 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -1,7 +1,7 @@ /* * sata_sx4.c - Promise SATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 5913ea9d57b2..87f056e54a9d 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -1,7 +1,7 @@ /* * sata_via.c - VIA Serial ATA controllers * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 1a68f947ded8..d414331b480e 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -1295,6 +1295,7 @@ int subsys_virtual_register(struct bus_type *subsys, return subsys_register(subsys, groups, virtual_dir); } +EXPORT_SYMBOL_GPL(subsys_virtual_register); int __init buses_init(void) { diff --git a/drivers/base/core.c b/drivers/base/core.c index 016312437577..2499cefdcdf2 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -572,9 +572,11 @@ int device_create_file(struct device *dev, if (dev) { WARN(((attr->attr.mode & S_IWUGO) && !attr->store), - "Write permission without 'store'\n"); + "Attribute %s: write permission without 'store'\n", + attr->attr.name); WARN(((attr->attr.mode & S_IRUGO) && !attr->show), - "Read permission without 'show'\n"); + "Attribute %s: read permission without 'show'\n", + attr->attr.name); error = sysfs_create_file(&dev->kobj, &attr->attr); } diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 39c32529b833..5da914041305 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -61,24 +61,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); int dev_pm_put_subsys_data(struct device *dev) { struct pm_subsys_data *psd; - int ret = 0; + int ret = 1; spin_lock_irq(&dev->power.lock); psd = dev_to_psd(dev); - if (!psd) { - ret = -EINVAL; + if (!psd) goto out; - } if (--psd->refcount == 0) { dev->power.subsys_data = NULL; - kfree(psd); - ret = 1; + } else { + psd = NULL; + ret = 0; } out: spin_unlock_irq(&dev->power.lock); + kfree(psd); return ret; } diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index aa0875f6f1b7..02f490bad30f 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -143,7 +143,7 @@ static int rbtree_show(struct seq_file *s, void *ignored) int registers = 0; int this_registers, average; - map->lock(map); + map->lock(map->lock_arg); mem_size = sizeof(*rbtree_ctx); mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long); @@ -170,7 +170,7 @@ static int rbtree_show(struct seq_file *s, void *ignored) seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", nodes, registers, average, mem_size); - map->unlock(map); + map->unlock(map->lock_arg); return 0; } @@ -391,8 +391,6 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { rbnode = rb_entry(node, struct regcache_rbtree_node, node); - if (rbnode->base_reg < min) - continue; if (rbnode->base_reg > max) break; if (rbnode->base_reg + rbnode->blklen < min) diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 75923f2396bd..507ee2da0f6e 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -270,7 +270,7 @@ int regcache_sync(struct regmap *map) BUG_ON(!map->cache_ops || !map->cache_ops->sync); - map->lock(map); + map->lock(map->lock_arg); /* Remember the initial bypass state */ bypass = map->cache_bypass; dev_dbg(map->dev, "Syncing %s cache\n", @@ -306,7 +306,7 @@ out: trace_regcache_sync(map->dev, name, "stop"); /* Restore the bypass state */ map->cache_bypass = bypass; - map->unlock(map); + map->unlock(map->lock_arg); return ret; } @@ -333,7 +333,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, BUG_ON(!map->cache_ops || !map->cache_ops->sync); - map->lock(map); + map->lock(map->lock_arg); /* Remember the initial bypass state */ bypass = map->cache_bypass; @@ -352,7 +352,7 @@ out: trace_regcache_sync(map->dev, name, "stop region"); /* Restore the bypass state */ map->cache_bypass = bypass; - map->unlock(map); + map->unlock(map->lock_arg); return ret; } @@ -372,11 +372,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region); */ void regcache_cache_only(struct regmap *map, bool enable) { - map->lock(map); + map->lock(map->lock_arg); WARN_ON(map->cache_bypass && enable); map->cache_only = enable; trace_regmap_cache_only(map->dev, enable); - map->unlock(map); + map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_cache_only); @@ -391,9 +391,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only); */ void regcache_mark_dirty(struct regmap *map) { - map->lock(map); + map->lock(map->lock_arg); map->cache_dirty = true; - map->unlock(map); + map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_mark_dirty); @@ -410,11 +410,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty); */ void regcache_cache_bypass(struct regmap *map, bool enable) { - map->lock(map); + map->lock(map->lock_arg); WARN_ON(map->cache_only && enable); map->cache_bypass = enable; trace_regmap_cache_bypass(map->dev, enable); - map->unlock(map); + map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_cache_bypass); diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 23b701f5fd2f..975719bc3450 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -265,6 +265,7 @@ static ssize_t regmap_map_write_file(struct file *file, char *start = buf; unsigned long reg, value; struct regmap *map = file->private_data; + int ret; buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) @@ -282,7 +283,9 @@ static ssize_t regmap_map_write_file(struct file *file, /* Userspace has been fiddling around behind the kernel's back */ add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); - regmap_write(map, reg, value); + ret = regmap_write(map, reg, value); + if (ret < 0) + return ret; return buf_size; } #else diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index bca9c80056fe..8bffa5c9818c 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -84,6 +84,8 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = { { BCMA_CORE_I2S, "I2S" }, { BCMA_CORE_SDR_DDR1_MEM_CTL, "SDR/DDR1 Memory Controller" }, { BCMA_CORE_SHIM, "SHIM" }, + { BCMA_CORE_PCIE2, "PCIe Gen2" }, + { BCMA_CORE_ARM_CR4, "ARM CR4" }, { BCMA_CORE_DEFAULT, "Default" }, }; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index f1a29f8e9d33..9bf4371755f2 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) spin_lock(&brd->brd_lock); idx = sector >> PAGE_SECTORS_SHIFT; + page->index = idx; if (radix_tree_insert(&brd->brd_pages, idx, page)) { __free_page(page); page = radix_tree_lookup(&brd->brd_pages, idx); BUG_ON(!page); BUG_ON(page->index != idx); - } else - page->index = idx; + } spin_unlock(&brd->brd_lock); radix_tree_preload_end(); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 6374dc103521..62b6c2cc80b5 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -168,8 +168,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); static int cciss_open(struct block_device *bdev, fmode_t mode); static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); static void cciss_release(struct gendisk *disk, fmode_t mode); -static int do_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg); static int cciss_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); @@ -235,7 +233,7 @@ static const struct block_device_operations cciss_fops = { .owner = THIS_MODULE, .open = cciss_unlocked_open, .release = cciss_release, - .ioctl = do_ioctl, + .ioctl = cciss_ioctl, .getgeo = cciss_getgeo, #ifdef CONFIG_COMPAT .compat_ioctl = cciss_compat_ioctl, @@ -1143,16 +1141,6 @@ static void cciss_release(struct gendisk *disk, fmode_t mode) mutex_unlock(&cciss_mutex); } -static int do_ioctl(struct block_device *bdev, fmode_t mode, - unsigned cmd, unsigned long arg) -{ - int ret; - mutex_lock(&cciss_mutex); - ret = cciss_ioctl(bdev, mode, cmd, arg); - mutex_unlock(&cciss_mutex); - return ret; -} - #ifdef CONFIG_COMPAT static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, @@ -1179,7 +1167,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, case CCISS_REGNEWD: case CCISS_RESCANDISK: case CCISS_GETLUNINFO: - return do_ioctl(bdev, mode, cmd, arg); + return cciss_ioctl(bdev, mode, cmd, arg); case CCISS_PASSTHRU32: return cciss_ioctl32_passthru(bdev, mode, cmd, arg); @@ -1219,7 +1207,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, if (err) return -EFAULT; - err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); + err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); if (err) return err; err |= @@ -1261,7 +1249,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, if (err) return -EFAULT; - err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); + err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); if (err) return err; err |= @@ -1311,11 +1299,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp) static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) { cciss_coalint_struct intinfo; + unsigned long flags; if (!argp) return -EINVAL; + spin_lock_irqsave(&h->lock, flags); intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); + spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user (argp, &intinfo, sizeof(cciss_coalint_struct))) return -EFAULT; @@ -1356,12 +1347,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp) static int cciss_getnodename(ctlr_info_t *h, void __user *argp) { NodeName_type NodeName; + unsigned long flags; int i; if (!argp) return -EINVAL; + spin_lock_irqsave(&h->lock, flags); for (i = 0; i < 16; i++) NodeName[i] = readb(&h->cfgtable->ServerName[i]); + spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) return -EFAULT; return 0; @@ -1398,10 +1392,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp) static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) { Heartbeat_type heartbeat; + unsigned long flags; if (!argp) return -EINVAL; + spin_lock_irqsave(&h->lock, flags); heartbeat = readl(&h->cfgtable->HeartBeat); + spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) return -EFAULT; return 0; @@ -1410,10 +1407,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) { BusTypes_type BusTypes; + unsigned long flags; if (!argp) return -EINVAL; + spin_lock_irqsave(&h->lock, flags); BusTypes = readl(&h->cfgtable->BusTypes); + spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) return -EFAULT; return 0; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 847107ef0cce..20dd52a2f92f 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3002,7 +3002,8 @@ static int mtip_hw_debugfs_init(struct driver_data *dd) static void mtip_hw_debugfs_exit(struct driver_data *dd) { - debugfs_remove_recursive(dd->dfs_node); + if (dd->dfs_node) + debugfs_remove_recursive(dd->dfs_node); } @@ -3863,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) struct driver_data *dd = queue->queuedata; struct scatterlist *sg; struct bio_vec *bvec; - int nents = 0; + int i, nents = 0; int tag = 0, unaligned = 0; if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { @@ -3921,11 +3922,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) } /* Create the scatter list for this bio. */ - bio_for_each_segment(bvec, bio, nents) { + bio_for_each_segment(bvec, bio, i) { sg_set_page(&sg[nents], bvec->bv_page, bvec->bv_len, bvec->bv_offset); + nents++; } /* Issue the read/write. */ diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 8efdfaa44a59..ce79a590b45b 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, struct nvme_command *cmnd; struct nvme_iod *iod; enum dma_data_direction dma_dir; - int cmdid, length, result = -ENOMEM; + int cmdid, length, result; u16 control; u32 dsmgmt; int psegs = bio_phys_segments(ns->queue, bio); @@ -640,6 +640,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, return result; } + result = -ENOMEM; iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); if (!iod) goto nomem; @@ -977,6 +978,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) if (timeout && !time_after(now, info[cmdid].timeout)) continue; + if (info[cmdid].ctx == CMD_CTX_CANCELLED) + continue; dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); ctx = cancel_cmdid(nvmeq, cmdid, &fn); fn(nvmeq->dev, ctx, &cqe); @@ -1206,7 +1209,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, if (addr & 3) return ERR_PTR(-EINVAL); - if (!length) + if (!length || length > INT_MAX - PAGE_SIZE) return ERR_PTR(-EINVAL); offset = offset_in_page(addr); @@ -1227,7 +1230,8 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, sg_init_table(sg, count); for (i = 0; i < count; i++) { sg_set_page(&sg[i], pages[i], - min_t(int, length, PAGE_SIZE - offset), offset); + min_t(unsigned, length, PAGE_SIZE - offset), + offset); length -= (PAGE_SIZE - offset); offset = 0; } @@ -1435,7 +1439,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, nvme_free_iod(dev, iod); } - if (!status && copy_to_user(&ucmd->result, &cmd.result, + if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result, sizeof(cmd.result))) status = -EFAULT; @@ -1633,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count) static int nvme_setup_io_queues(struct nvme_dev *dev) { - int result, cpu, i, nr_io_queues, db_bar_size, q_depth; + struct pci_dev *pdev = dev->pci_dev; + int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count; nr_io_queues = num_online_cpus(); result = set_queue_count(dev, nr_io_queues); @@ -1642,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (result < nr_io_queues) nr_io_queues = result; + q_count = nr_io_queues; /* Deregister the admin queue's interrupt */ free_irq(dev->entry[0].vector, dev->queues[0]); db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); if (db_bar_size > 8192) { iounmap(dev->bar); - dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), - db_bar_size); + dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size); dev->dbs = ((void __iomem *)dev->bar) + 4096; dev->queues[0]->q_db = dev->dbs; } @@ -1657,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) for (i = 0; i < nr_io_queues; i++) dev->entry[i].entry = i; for (;;) { - result = pci_enable_msix(dev->pci_dev, dev->entry, - nr_io_queues); + result = pci_enable_msix(pdev, dev->entry, nr_io_queues); if (result == 0) { break; } else if (result > 0) { nr_io_queues = result; continue; } else { - nr_io_queues = 1; + nr_io_queues = 0; break; } } + if (nr_io_queues == 0) { + nr_io_queues = q_count; + for (;;) { + result = pci_enable_msi_block(pdev, nr_io_queues); + if (result == 0) { + for (i = 0; i < nr_io_queues; i++) + dev->entry[i].vector = i + pdev->irq; + break; + } else if (result > 0) { + nr_io_queues = result; + continue; + } else { + nr_io_queues = 1; + break; + } + } + } + result = queue_request_irq(dev, dev->queues[0], "nvme admin"); /* XXX: handle failure here */ @@ -1850,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref) { struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); nvme_dev_remove(dev); - pci_disable_msix(dev->pci_dev); + if (dev->pci_dev->msi_enabled) + pci_disable_msi(dev->pci_dev); + else if (dev->pci_dev->msix_enabled) + pci_disable_msix(dev->pci_dev); iounmap(dev->bar); nvme_release_instance(dev); nvme_release_prp_pools(dev); @@ -1923,8 +1948,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&dev->namespaces); dev->pci_dev = pdev; pci_set_drvdata(pdev, dev); - dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + else + goto disable; + result = nvme_set_instance(dev); if (result) goto disable; @@ -1977,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) unmap: iounmap(dev->bar); disable_msix: - pci_disable_msix(pdev); + if (dev->pci_dev->msi_enabled) + pci_disable_msi(dev->pci_dev); + else if (dev->pci_dev->msix_enabled) + pci_disable_msix(dev->pci_dev); nvme_release_instance(dev); nvme_release_prp_pools(dev); disable: diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index fed54b039893..102de2f52b5c 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c @@ -44,7 +44,6 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/types.h> -#include <linux/version.h> #include <scsi/sg.h> #include <scsi/scsi.h> @@ -1654,7 +1653,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, } } -static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, +static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 *mode_page, u8 page_code) { int res = SNTI_TRANSLATION_SUCCESS; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 3c08983e600a..f5d0ea11d9fd 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -83,7 +83,8 @@ #define MAX_SPEED 0xffff -#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1)) +#define ZONE(sector, pd) (((sector) + (pd)->offset) & \ + ~(sector_t)((pd)->settings.size - 1)) static DEFINE_MUTEX(pktcdvd_mutex); static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ca63104136e0..3063452e55da 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -55,6 +55,39 @@ #define SECTOR_SHIFT 9 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) +/* + * Increment the given counter and return its updated value. + * If the counter is already 0 it will not be incremented. + * If the counter is already at its maximum value returns + * -EINVAL without updating it. + */ +static int atomic_inc_return_safe(atomic_t *v) +{ + unsigned int counter; + + counter = (unsigned int)__atomic_add_unless(v, 1, 0); + if (counter <= (unsigned int)INT_MAX) + return (int)counter; + + atomic_dec(v); + + return -EINVAL; +} + +/* Decrement the counter. Return the resulting value, or -EINVAL */ +static int atomic_dec_return_safe(atomic_t *v) +{ + int counter; + + counter = atomic_dec_return(v); + if (counter >= 0) + return counter; + + atomic_inc(v); + + return -EINVAL; +} + #define RBD_DRV_NAME "rbd" #define RBD_DRV_NAME_LONG "rbd (rados block device)" @@ -100,21 +133,20 @@ * block device image metadata (in-memory version) */ struct rbd_image_header { - /* These four fields never change for a given rbd image */ + /* These six fields never change for a given rbd image */ char *object_prefix; - u64 features; __u8 obj_order; __u8 crypt_type; __u8 comp_type; + u64 stripe_unit; + u64 stripe_count; + u64 features; /* Might be changeable someday? */ /* The remaining fields need to be updated occasionally */ u64 image_size; struct ceph_snap_context *snapc; - char *snap_names; - u64 *snap_sizes; - - u64 stripe_unit; - u64 stripe_count; + char *snap_names; /* format 1 only */ + u64 *snap_sizes; /* format 1 only */ }; /* @@ -225,6 +257,7 @@ struct rbd_obj_request { }; }; struct page **copyup_pages; + u32 copyup_page_count; struct ceph_osd_request *osd_req; @@ -257,6 +290,7 @@ struct rbd_img_request { struct rbd_obj_request *obj_request; /* obj req initiator */ }; struct page **copyup_pages; + u32 copyup_page_count; spinlock_t completion_lock;/* protects next_completion */ u32 next_completion; rbd_img_callback_t callback; @@ -311,6 +345,7 @@ struct rbd_device { struct rbd_spec *parent_spec; u64 parent_overlap; + atomic_t parent_ref; struct rbd_device *parent; /* protects updating the header */ @@ -359,7 +394,8 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count); static ssize_t rbd_remove(struct bus_type *bus, const char *buf, size_t count); -static int rbd_dev_image_probe(struct rbd_device *rbd_dev); +static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping); +static void rbd_spec_put(struct rbd_spec *spec); static struct bus_attribute rbd_bus_attrs[] = { __ATTR(add, S_IWUSR, NULL, rbd_add), @@ -426,7 +462,8 @@ static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); static int rbd_dev_refresh(struct rbd_device *rbd_dev); -static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); +static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); +static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev); static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u64 snap_id); static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, @@ -482,8 +519,8 @@ static const struct block_device_operations rbd_bd_ops = { }; /* - * Initialize an rbd client instance. - * We own *ceph_opts. + * Initialize an rbd client instance. Success or not, this function + * consumes ceph_opts. */ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) { @@ -638,7 +675,8 @@ static int parse_rbd_opts_token(char *c, void *private) /* * Get a ceph client with specific addr and configuration, if one does - * not exist create it. + * not exist create it. Either way, ceph_opts is consumed by this + * function. */ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) { @@ -726,88 +764,123 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) } /* - * Create a new header structure, translate header format from the on-disk - * header. + * Fill an rbd image header with information from the given format 1 + * on-disk header. */ -static int rbd_header_from_disk(struct rbd_image_header *header, +static int rbd_header_from_disk(struct rbd_device *rbd_dev, struct rbd_image_header_ondisk *ondisk) { + struct rbd_image_header *header = &rbd_dev->header; + bool first_time = header->object_prefix == NULL; + struct ceph_snap_context *snapc; + char *object_prefix = NULL; + char *snap_names = NULL; + u64 *snap_sizes = NULL; u32 snap_count; - size_t len; size_t size; + int ret = -ENOMEM; u32 i; - memset(header, 0, sizeof (*header)); + /* Allocate this now to avoid having to handle failure below */ - snap_count = le32_to_cpu(ondisk->snap_count); + if (first_time) { + size_t len; - len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix)); - header->object_prefix = kmalloc(len + 1, GFP_KERNEL); - if (!header->object_prefix) - return -ENOMEM; - memcpy(header->object_prefix, ondisk->object_prefix, len); - header->object_prefix[len] = '\0'; + len = strnlen(ondisk->object_prefix, + sizeof (ondisk->object_prefix)); + object_prefix = kmalloc(len + 1, GFP_KERNEL); + if (!object_prefix) + return -ENOMEM; + memcpy(object_prefix, ondisk->object_prefix, len); + object_prefix[len] = '\0'; + } + + /* Allocate the snapshot context and fill it in */ + snap_count = le32_to_cpu(ondisk->snap_count); + snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); + if (!snapc) + goto out_err; + snapc->seq = le64_to_cpu(ondisk->snap_seq); if (snap_count) { + struct rbd_image_snap_ondisk *snaps; u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); - /* Save a copy of the snapshot names */ + /* We'll keep a copy of the snapshot names... */ - if (snap_names_len > (u64) SIZE_MAX) - return -EIO; - header->snap_names = kmalloc(snap_names_len, GFP_KERNEL); - if (!header->snap_names) + if (snap_names_len > (u64)SIZE_MAX) + goto out_2big; + snap_names = kmalloc(snap_names_len, GFP_KERNEL); + if (!snap_names) + goto out_err; + + /* ...as well as the array of their sizes. */ + + size = snap_count * sizeof (*header->snap_sizes); + snap_sizes = kmalloc(size, GFP_KERNEL); + if (!snap_sizes) goto out_err; + /* - * Note that rbd_dev_v1_header_read() guarantees - * the ondisk buffer we're working with has + * Copy the names, and fill in each snapshot's id + * and size. + * + * Note that rbd_dev_v1_header_info() guarantees the + * ondisk buffer we're working with has * snap_names_len bytes beyond the end of the * snapshot id array, this memcpy() is safe. */ - memcpy(header->snap_names, &ondisk->snaps[snap_count], - snap_names_len); + memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len); + snaps = ondisk->snaps; + for (i = 0; i < snap_count; i++) { + snapc->snaps[i] = le64_to_cpu(snaps[i].id); + snap_sizes[i] = le64_to_cpu(snaps[i].image_size); + } + } - /* Record each snapshot's size */ + /* We won't fail any more, fill in the header */ - size = snap_count * sizeof (*header->snap_sizes); - header->snap_sizes = kmalloc(size, GFP_KERNEL); - if (!header->snap_sizes) - goto out_err; - for (i = 0; i < snap_count; i++) - header->snap_sizes[i] = - le64_to_cpu(ondisk->snaps[i].image_size); + down_write(&rbd_dev->header_rwsem); + if (first_time) { + header->object_prefix = object_prefix; + header->obj_order = ondisk->options.order; + header->crypt_type = ondisk->options.crypt_type; + header->comp_type = ondisk->options.comp_type; + /* The rest aren't used for format 1 images */ + header->stripe_unit = 0; + header->stripe_count = 0; + header->features = 0; } else { - header->snap_names = NULL; - header->snap_sizes = NULL; + ceph_put_snap_context(header->snapc); + kfree(header->snap_names); + kfree(header->snap_sizes); } - header->features = 0; /* No features support in v1 images */ - header->obj_order = ondisk->options.order; - header->crypt_type = ondisk->options.crypt_type; - header->comp_type = ondisk->options.comp_type; - - /* Allocate and fill in the snapshot context */ + /* The remaining fields always get updated (when we refresh) */ header->image_size = le64_to_cpu(ondisk->image_size); + header->snapc = snapc; + header->snap_names = snap_names; + header->snap_sizes = snap_sizes; - header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); - if (!header->snapc) - goto out_err; - header->snapc->seq = le64_to_cpu(ondisk->snap_seq); - for (i = 0; i < snap_count; i++) - header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id); + /* Make sure mapping size is consistent with header info */ - return 0; + if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time) + if (rbd_dev->mapping.size != header->image_size) + rbd_dev->mapping.size = header->image_size; + up_write(&rbd_dev->header_rwsem); + + return 0; +out_2big: + ret = -EIO; out_err: - kfree(header->snap_sizes); - header->snap_sizes = NULL; - kfree(header->snap_names); - header->snap_names = NULL; - kfree(header->object_prefix); - header->object_prefix = NULL; + kfree(snap_sizes); + kfree(snap_names); + ceph_put_snap_context(snapc); + kfree(object_prefix); - return -ENOMEM; + return ret; } static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) @@ -934,20 +1007,11 @@ static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id, static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) { - const char *snap_name = rbd_dev->spec->snap_name; - u64 snap_id; + u64 snap_id = rbd_dev->spec->snap_id; u64 size = 0; u64 features = 0; int ret; - if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) { - snap_id = rbd_snap_id_by_name(rbd_dev, snap_name); - if (snap_id == CEPH_NOSNAP) - return -ENOENT; - } else { - snap_id = CEPH_NOSNAP; - } - ret = rbd_snap_size(rbd_dev, snap_id, &size); if (ret) return ret; @@ -958,11 +1022,6 @@ static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) rbd_dev->mapping.size = size; rbd_dev->mapping.features = features; - /* If we are mapping a snapshot it must be marked read-only */ - - if (snap_id != CEPH_NOSNAP) - rbd_dev->mapping.read_only = true; - return 0; } @@ -970,14 +1029,6 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) { rbd_dev->mapping.size = 0; rbd_dev->mapping.features = 0; - rbd_dev->mapping.read_only = true; -} - -static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev) -{ - rbd_dev->mapping.size = 0; - rbd_dev->mapping.features = 0; - rbd_dev->mapping.read_only = true; } static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) @@ -1342,20 +1393,18 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request) kref_put(&obj_request->kref, rbd_obj_request_destroy); } -static void rbd_img_request_get(struct rbd_img_request *img_request) -{ - dout("%s: img %p (was %d)\n", __func__, img_request, - atomic_read(&img_request->kref.refcount)); - kref_get(&img_request->kref); -} - +static bool img_request_child_test(struct rbd_img_request *img_request); +static void rbd_parent_request_destroy(struct kref *kref); static void rbd_img_request_destroy(struct kref *kref); static void rbd_img_request_put(struct rbd_img_request *img_request) { rbd_assert(img_request != NULL); dout("%s: img %p (was %d)\n", __func__, img_request, atomic_read(&img_request->kref.refcount)); - kref_put(&img_request->kref, rbd_img_request_destroy); + if (img_request_child_test(img_request)) + kref_put(&img_request->kref, rbd_parent_request_destroy); + else + kref_put(&img_request->kref, rbd_img_request_destroy); } static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, @@ -1472,6 +1521,12 @@ static void img_request_child_set(struct rbd_img_request *img_request) smp_mb(); } +static void img_request_child_clear(struct rbd_img_request *img_request) +{ + clear_bit(IMG_REQ_CHILD, &img_request->flags); + smp_mb(); +} + static bool img_request_child_test(struct rbd_img_request *img_request) { smp_mb(); @@ -1484,6 +1539,12 @@ static void img_request_layered_set(struct rbd_img_request *img_request) smp_mb(); } +static void img_request_layered_clear(struct rbd_img_request *img_request) +{ + clear_bit(IMG_REQ_LAYERED, &img_request->flags); + smp_mb(); +} + static bool img_request_layered_test(struct rbd_img_request *img_request) { smp_mb(); @@ -1827,6 +1888,74 @@ static void rbd_obj_request_destroy(struct kref *kref) kmem_cache_free(rbd_obj_request_cache, obj_request); } +/* It's OK to call this for a device with no parent */ + +static void rbd_spec_put(struct rbd_spec *spec); +static void rbd_dev_unparent(struct rbd_device *rbd_dev) +{ + rbd_dev_remove_parent(rbd_dev); + rbd_spec_put(rbd_dev->parent_spec); + rbd_dev->parent_spec = NULL; + rbd_dev->parent_overlap = 0; +} + +/* + * Parent image reference counting is used to determine when an + * image's parent fields can be safely torn down--after there are no + * more in-flight requests to the parent image. When the last + * reference is dropped, cleaning them up is safe. + */ +static void rbd_dev_parent_put(struct rbd_device *rbd_dev) +{ + int counter; + + if (!rbd_dev->parent_spec) + return; + + counter = atomic_dec_return_safe(&rbd_dev->parent_ref); + if (counter > 0) + return; + + /* Last reference; clean up parent data structures */ + + if (!counter) + rbd_dev_unparent(rbd_dev); + else + rbd_warn(rbd_dev, "parent reference underflow\n"); +} + +/* + * If an image has a non-zero parent overlap, get a reference to its + * parent. + * + * We must get the reference before checking for the overlap to + * coordinate properly with zeroing the parent overlap in + * rbd_dev_v2_parent_info() when an image gets flattened. We + * drop it again if there is no overlap. + * + * Returns true if the rbd device has a parent with a non-zero + * overlap and a reference for it was successfully taken, or + * false otherwise. + */ +static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) +{ + int counter; + + if (!rbd_dev->parent_spec) + return false; + + counter = atomic_inc_return_safe(&rbd_dev->parent_ref); + if (counter > 0 && rbd_dev->parent_overlap) + return true; + + /* Image was flattened, but parent is not yet torn down */ + + if (counter < 0) + rbd_warn(rbd_dev, "parent reference overflow\n"); + + return false; +} + /* * Caller is responsible for filling in the list of object requests * that comprises the image request, and the Linux request pointer @@ -1835,8 +1964,7 @@ static void rbd_obj_request_destroy(struct kref *kref) static struct rbd_img_request *rbd_img_request_create( struct rbd_device *rbd_dev, u64 offset, u64 length, - bool write_request, - bool child_request) + bool write_request) { struct rbd_img_request *img_request; @@ -1861,9 +1989,7 @@ static struct rbd_img_request *rbd_img_request_create( } else { img_request->snap_id = rbd_dev->spec->snap_id; } - if (child_request) - img_request_child_set(img_request); - if (rbd_dev->parent_spec) + if (rbd_dev_parent_get(rbd_dev)) img_request_layered_set(img_request); spin_lock_init(&img_request->completion_lock); img_request->next_completion = 0; @@ -1873,9 +1999,6 @@ static struct rbd_img_request *rbd_img_request_create( INIT_LIST_HEAD(&img_request->obj_requests); kref_init(&img_request->kref); - rbd_img_request_get(img_request); /* Avoid a warning */ - rbd_img_request_put(img_request); /* TEMPORARY */ - dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev, write_request ? "write" : "read", offset, length, img_request); @@ -1897,15 +2020,54 @@ static void rbd_img_request_destroy(struct kref *kref) rbd_img_obj_request_del(img_request, obj_request); rbd_assert(img_request->obj_request_count == 0); + if (img_request_layered_test(img_request)) { + img_request_layered_clear(img_request); + rbd_dev_parent_put(img_request->rbd_dev); + } + if (img_request_write_test(img_request)) ceph_put_snap_context(img_request->snapc); - if (img_request_child_test(img_request)) - rbd_obj_request_put(img_request->obj_request); - kmem_cache_free(rbd_img_request_cache, img_request); } +static struct rbd_img_request *rbd_parent_request_create( + struct rbd_obj_request *obj_request, + u64 img_offset, u64 length) +{ + struct rbd_img_request *parent_request; + struct rbd_device *rbd_dev; + + rbd_assert(obj_request->img_request); + rbd_dev = obj_request->img_request->rbd_dev; + + parent_request = rbd_img_request_create(rbd_dev->parent, + img_offset, length, false); + if (!parent_request) + return NULL; + + img_request_child_set(parent_request); + rbd_obj_request_get(obj_request); + parent_request->obj_request = obj_request; + + return parent_request; +} + +static void rbd_parent_request_destroy(struct kref *kref) +{ + struct rbd_img_request *parent_request; + struct rbd_obj_request *orig_request; + + parent_request = container_of(kref, struct rbd_img_request, kref); + orig_request = parent_request->obj_request; + + parent_request->obj_request = NULL; + rbd_obj_request_put(orig_request); + img_request_child_clear(parent_request); + + rbd_img_request_destroy(kref); +} + static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; @@ -2114,7 +2276,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; struct rbd_device *rbd_dev; - u64 length; + struct page **pages; u32 page_count; rbd_assert(obj_request->type == OBJ_REQUEST_BIO); @@ -2124,12 +2286,14 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) rbd_dev = img_request->rbd_dev; rbd_assert(rbd_dev); - length = (u64)1 << rbd_dev->header.obj_order; - page_count = (u32)calc_pages_for(0, length); - rbd_assert(obj_request->copyup_pages); - ceph_release_page_vector(obj_request->copyup_pages, page_count); + pages = obj_request->copyup_pages; + rbd_assert(pages != NULL); obj_request->copyup_pages = NULL; + page_count = obj_request->copyup_page_count; + rbd_assert(page_count); + obj_request->copyup_page_count = 0; + ceph_release_page_vector(pages, page_count); /* * We want the transfer count to reflect the size of the @@ -2153,9 +2317,11 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) struct ceph_osd_client *osdc; struct rbd_device *rbd_dev; struct page **pages; - int result; - u64 obj_size; - u64 xferred; + u32 page_count; + int img_result; + u64 parent_length; + u64 offset; + u64 length; rbd_assert(img_request_child_test(img_request)); @@ -2164,46 +2330,74 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) pages = img_request->copyup_pages; rbd_assert(pages != NULL); img_request->copyup_pages = NULL; + page_count = img_request->copyup_page_count; + rbd_assert(page_count); + img_request->copyup_page_count = 0; orig_request = img_request->obj_request; rbd_assert(orig_request != NULL); - rbd_assert(orig_request->type == OBJ_REQUEST_BIO); - result = img_request->result; - obj_size = img_request->length; - xferred = img_request->xferred; + rbd_assert(obj_request_type_valid(orig_request->type)); + img_result = img_request->result; + parent_length = img_request->length; + rbd_assert(parent_length == img_request->xferred); + rbd_img_request_put(img_request); - rbd_dev = img_request->rbd_dev; + rbd_assert(orig_request->img_request); + rbd_dev = orig_request->img_request->rbd_dev; rbd_assert(rbd_dev); - rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order); - rbd_img_request_put(img_request); + /* + * If the overlap has become 0 (most likely because the + * image has been flattened) we need to free the pages + * and re-submit the original write request. + */ + if (!rbd_dev->parent_overlap) { + struct ceph_osd_client *osdc; - if (result) - goto out_err; + ceph_release_page_vector(pages, page_count); + osdc = &rbd_dev->rbd_client->client->osdc; + img_result = rbd_obj_request_submit(osdc, orig_request); + if (!img_result) + return; + } - /* Allocate the new copyup osd request for the original request */ + if (img_result) + goto out_err; - result = -ENOMEM; - rbd_assert(!orig_request->osd_req); + /* + * The original osd request is of no use to use any more. + * We need a new one that can hold the two ops in a copyup + * request. Allocate the new copyup osd request for the + * original request, and release the old one. + */ + img_result = -ENOMEM; osd_req = rbd_osd_req_create_copyup(orig_request); if (!osd_req) goto out_err; + rbd_osd_req_destroy(orig_request->osd_req); orig_request->osd_req = osd_req; orig_request->copyup_pages = pages; + orig_request->copyup_page_count = page_count; /* Initialize the copyup op */ osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); - osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0, + osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0, false, false); /* Then the original write request op */ + offset = orig_request->offset; + length = orig_request->length; osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE, - orig_request->offset, - orig_request->length, 0, 0); - osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list, - orig_request->length); + offset, length, 0, 0); + if (orig_request->type == OBJ_REQUEST_BIO) + osd_req_op_extent_osd_data_bio(osd_req, 1, + orig_request->bio_list, length); + else + osd_req_op_extent_osd_data_pages(osd_req, 1, + orig_request->pages, length, + offset & ~PAGE_MASK, false, false); rbd_osd_req_format_write(orig_request); @@ -2211,13 +2405,13 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) orig_request->callback = rbd_img_obj_copyup_callback; osdc = &rbd_dev->rbd_client->client->osdc; - result = rbd_obj_request_submit(osdc, orig_request); - if (!result) + img_result = rbd_obj_request_submit(osdc, orig_request); + if (!img_result) return; out_err: /* Record the error code and complete the request */ - orig_request->result = result; + orig_request->result = img_result; orig_request->xferred = 0; obj_request_done_set(orig_request); rbd_obj_request_complete(orig_request); @@ -2249,7 +2443,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) int result; rbd_assert(obj_request_img_data_test(obj_request)); - rbd_assert(obj_request->type == OBJ_REQUEST_BIO); + rbd_assert(obj_request_type_valid(obj_request->type)); img_request = obj_request->img_request; rbd_assert(img_request != NULL); @@ -2257,15 +2451,6 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) rbd_assert(rbd_dev->parent != NULL); /* - * First things first. The original osd request is of no - * use to use any more, we'll need a new one that can hold - * the two ops in a copyup request. We'll get that later, - * but for now we can release the old one. - */ - rbd_osd_req_destroy(obj_request->osd_req); - obj_request->osd_req = NULL; - - /* * Determine the byte range covered by the object in the * child image to which the original request was to be sent. */ @@ -2295,18 +2480,16 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) } result = -ENOMEM; - parent_request = rbd_img_request_create(rbd_dev->parent, - img_offset, length, - false, true); + parent_request = rbd_parent_request_create(obj_request, + img_offset, length); if (!parent_request) goto out_err; - rbd_obj_request_get(obj_request); - parent_request->obj_request = obj_request; result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); if (result) goto out_err; parent_request->copyup_pages = pages; + parent_request->copyup_page_count = page_count; parent_request->callback = rbd_img_obj_parent_read_full_callback; result = rbd_img_request_submit(parent_request); @@ -2314,6 +2497,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) return 0; parent_request->copyup_pages = NULL; + parent_request->copyup_page_count = 0; parent_request->obj_request = NULL; rbd_obj_request_put(obj_request); out_err: @@ -2331,6 +2515,7 @@ out_err: static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) { struct rbd_obj_request *orig_request; + struct rbd_device *rbd_dev; int result; rbd_assert(!obj_request_img_data_test(obj_request)); @@ -2353,8 +2538,21 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) obj_request->xferred, obj_request->length); rbd_obj_request_put(obj_request); - rbd_assert(orig_request); - rbd_assert(orig_request->img_request); + /* + * If the overlap has become 0 (most likely because the + * image has been flattened) we need to free the pages + * and re-submit the original write request. + */ + rbd_dev = orig_request->img_request->rbd_dev; + if (!rbd_dev->parent_overlap) { + struct ceph_osd_client *osdc; + + rbd_obj_request_put(orig_request); + osdc = &rbd_dev->rbd_client->client->osdc; + result = rbd_obj_request_submit(osdc, orig_request); + if (!result) + return; + } /* * Our only purpose here is to determine whether the object @@ -2512,14 +2710,36 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) struct rbd_obj_request *obj_request; struct rbd_device *rbd_dev; u64 obj_end; + u64 img_xferred; + int img_result; rbd_assert(img_request_child_test(img_request)); + /* First get what we need from the image request and release it */ + obj_request = img_request->obj_request; + img_xferred = img_request->xferred; + img_result = img_request->result; + rbd_img_request_put(img_request); + + /* + * If the overlap has become 0 (most likely because the + * image has been flattened) we need to re-submit the + * original request. + */ rbd_assert(obj_request); rbd_assert(obj_request->img_request); + rbd_dev = obj_request->img_request->rbd_dev; + if (!rbd_dev->parent_overlap) { + struct ceph_osd_client *osdc; - obj_request->result = img_request->result; + osdc = &rbd_dev->rbd_client->client->osdc; + img_result = rbd_obj_request_submit(osdc, obj_request); + if (!img_result) + return; + } + + obj_request->result = img_result; if (obj_request->result) goto out; @@ -2532,7 +2752,6 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) */ rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); obj_end = obj_request->img_offset + obj_request->length; - rbd_dev = obj_request->img_request->rbd_dev; if (obj_end > rbd_dev->parent_overlap) { u64 xferred = 0; @@ -2540,43 +2759,39 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) xferred = rbd_dev->parent_overlap - obj_request->img_offset; - obj_request->xferred = min(img_request->xferred, xferred); + obj_request->xferred = min(img_xferred, xferred); } else { - obj_request->xferred = img_request->xferred; + obj_request->xferred = img_xferred; } out: - rbd_img_request_put(img_request); rbd_img_obj_request_read_callback(obj_request); rbd_obj_request_complete(obj_request); } static void rbd_img_parent_read(struct rbd_obj_request *obj_request) { - struct rbd_device *rbd_dev; struct rbd_img_request *img_request; int result; rbd_assert(obj_request_img_data_test(obj_request)); rbd_assert(obj_request->img_request != NULL); rbd_assert(obj_request->result == (s32) -ENOENT); - rbd_assert(obj_request->type == OBJ_REQUEST_BIO); + rbd_assert(obj_request_type_valid(obj_request->type)); - rbd_dev = obj_request->img_request->rbd_dev; - rbd_assert(rbd_dev->parent != NULL); /* rbd_read_finish(obj_request, obj_request->length); */ - img_request = rbd_img_request_create(rbd_dev->parent, + img_request = rbd_parent_request_create(obj_request, obj_request->img_offset, - obj_request->length, - false, true); + obj_request->length); result = -ENOMEM; if (!img_request) goto out_err; - rbd_obj_request_get(obj_request); - img_request->obj_request = obj_request; - - result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, - obj_request->bio_list); + if (obj_request->type == OBJ_REQUEST_BIO) + result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, + obj_request->bio_list); + else + result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES, + obj_request->pages); if (result) goto out_err; @@ -2626,6 +2841,7 @@ out: static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) { struct rbd_device *rbd_dev = (struct rbd_device *)data; + int ret; if (!rbd_dev) return; @@ -2633,7 +2849,9 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, rbd_dev->header_name, (unsigned long long)notify_id, (unsigned int)opcode); - (void)rbd_dev_refresh(rbd_dev); + ret = rbd_dev_refresh(rbd_dev); + if (ret) + rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret); rbd_obj_notify_ack(rbd_dev, notify_id); } @@ -2642,7 +2860,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) * Request sync osd watch/unwatch. The value of "start" determines * whether a watch request is being initiated or torn down. */ -static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) +static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; @@ -2676,7 +2894,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) rbd_dev->watch_request->osd_req); osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, - rbd_dev->watch_event->cookie, 0, start); + rbd_dev->watch_event->cookie, 0, start ? 1 : 0); rbd_osd_req_format_write(obj_request); ret = rbd_obj_request_submit(osdc, obj_request); @@ -2869,9 +3087,16 @@ static void rbd_request_fn(struct request_queue *q) goto end_request; /* Shouldn't happen */ } + result = -EIO; + if (offset + length > rbd_dev->mapping.size) { + rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n", + offset, length, rbd_dev->mapping.size); + goto end_request; + } + result = -ENOMEM; img_request = rbd_img_request_create(rbd_dev, offset, length, - write_request, false); + write_request); if (!img_request) goto end_request; @@ -3022,17 +3247,11 @@ out: } /* - * Read the complete header for the given rbd device. - * - * Returns a pointer to a dynamically-allocated buffer containing - * the complete and validated header. Caller can pass the address - * of a variable that will be filled in with the version of the - * header object at the time it was read. - * - * Returns a pointer-coded errno if a failure occurs. + * Read the complete header for the given rbd device. On successful + * return, the rbd_dev->header field will contain up-to-date + * information about the image. */ -static struct rbd_image_header_ondisk * -rbd_dev_v1_header_read(struct rbd_device *rbd_dev) +static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) { struct rbd_image_header_ondisk *ondisk = NULL; u32 snap_count = 0; @@ -3057,22 +3276,22 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev) size += names_size; ondisk = kmalloc(size, GFP_KERNEL); if (!ondisk) - return ERR_PTR(-ENOMEM); + return -ENOMEM; ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, 0, size, ondisk); if (ret < 0) - goto out_err; + goto out; if ((size_t)ret < size) { ret = -ENXIO; rbd_warn(rbd_dev, "short header read (want %zd got %d)", size, ret); - goto out_err; + goto out; } if (!rbd_dev_ondisk_valid(ondisk)) { ret = -ENXIO; rbd_warn(rbd_dev, "invalid header"); - goto out_err; + goto out; } names_size = le64_to_cpu(ondisk->snap_names_len); @@ -3080,85 +3299,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev) snap_count = le32_to_cpu(ondisk->snap_count); } while (snap_count != want_count); - return ondisk; - -out_err: - kfree(ondisk); - - return ERR_PTR(ret); -} - -/* - * reload the ondisk the header - */ -static int rbd_read_header(struct rbd_device *rbd_dev, - struct rbd_image_header *header) -{ - struct rbd_image_header_ondisk *ondisk; - int ret; - - ondisk = rbd_dev_v1_header_read(rbd_dev); - if (IS_ERR(ondisk)) - return PTR_ERR(ondisk); - ret = rbd_header_from_disk(header, ondisk); + ret = rbd_header_from_disk(rbd_dev, ondisk); +out: kfree(ondisk); return ret; } -static void rbd_update_mapping_size(struct rbd_device *rbd_dev) -{ - if (rbd_dev->spec->snap_id != CEPH_NOSNAP) - return; - - if (rbd_dev->mapping.size != rbd_dev->header.image_size) { - sector_t size; - - rbd_dev->mapping.size = rbd_dev->header.image_size; - size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; - dout("setting size to %llu sectors", (unsigned long long)size); - set_capacity(rbd_dev->disk, size); - } -} - -/* - * only read the first part of the ondisk header, without the snaps info - */ -static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev) -{ - int ret; - struct rbd_image_header h; - - ret = rbd_read_header(rbd_dev, &h); - if (ret < 0) - return ret; - - down_write(&rbd_dev->header_rwsem); - - /* Update image size, and check for resize of mapped image */ - rbd_dev->header.image_size = h.image_size; - rbd_update_mapping_size(rbd_dev); - - /* rbd_dev->header.object_prefix shouldn't change */ - kfree(rbd_dev->header.snap_sizes); - kfree(rbd_dev->header.snap_names); - /* osd requests may still refer to snapc */ - ceph_put_snap_context(rbd_dev->header.snapc); - - rbd_dev->header.image_size = h.image_size; - rbd_dev->header.snapc = h.snapc; - rbd_dev->header.snap_names = h.snap_names; - rbd_dev->header.snap_sizes = h.snap_sizes; - /* Free the extra copy of the object prefix */ - if (strcmp(rbd_dev->header.object_prefix, h.object_prefix)) - rbd_warn(rbd_dev, "object prefix changed (ignoring)"); - kfree(h.object_prefix); - - up_write(&rbd_dev->header_rwsem); - - return ret; -} - /* * Clear the rbd device's EXISTS flag if the snapshot it's mapped to * has disappeared from the (just updated) snapshot context. @@ -3180,26 +3327,29 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev) static int rbd_dev_refresh(struct rbd_device *rbd_dev) { - u64 image_size; + u64 mapping_size; int ret; rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); - image_size = rbd_dev->header.image_size; + mapping_size = rbd_dev->mapping.size; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); if (rbd_dev->image_format == 1) - ret = rbd_dev_v1_refresh(rbd_dev); + ret = rbd_dev_v1_header_info(rbd_dev); else - ret = rbd_dev_v2_refresh(rbd_dev); + ret = rbd_dev_v2_header_info(rbd_dev); /* If it's a mapped snapshot, validate its EXISTS flag */ rbd_exists_validate(rbd_dev); mutex_unlock(&ctl_mutex); - if (ret) - rbd_warn(rbd_dev, "got notification but failed to " - " update snaps: %d\n", ret); - if (image_size != rbd_dev->header.image_size) + if (mapping_size != rbd_dev->mapping.size) { + sector_t size; + + size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; + dout("setting size to %llu sectors", (unsigned long long)size); + set_capacity(rbd_dev->disk, size); revalidate_disk(rbd_dev->disk); + } return ret; } @@ -3403,6 +3553,8 @@ static ssize_t rbd_image_refresh(struct device *dev, int ret; ret = rbd_dev_refresh(rbd_dev); + if (ret) + rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret); return ret < 0 ? ret : size; } @@ -3501,6 +3653,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, spin_lock_init(&rbd_dev->lock); rbd_dev->flags = 0; + atomic_set(&rbd_dev->parent_ref, 0); INIT_LIST_HEAD(&rbd_dev->node); init_rwsem(&rbd_dev->header_rwsem); @@ -3650,6 +3803,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) __le64 snapid; void *p; void *end; + u64 pool_id; char *image_id; u64 overlap; int ret; @@ -3680,18 +3834,37 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) p = reply_buf; end = reply_buf + ret; ret = -ERANGE; - ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); - if (parent_spec->pool_id == CEPH_NOPOOL) + ceph_decode_64_safe(&p, end, pool_id, out_err); + if (pool_id == CEPH_NOPOOL) { + /* + * Either the parent never existed, or we have + * record of it but the image got flattened so it no + * longer has a parent. When the parent of a + * layered image disappears we immediately set the + * overlap to 0. The effect of this is that all new + * requests will be treated as if the image had no + * parent. + */ + if (rbd_dev->parent_overlap) { + rbd_dev->parent_overlap = 0; + smp_mb(); + rbd_dev_parent_put(rbd_dev); + pr_info("%s: clone image has been flattened\n", + rbd_dev->disk->disk_name); + } + goto out; /* No parent? No problem. */ + } /* The ceph file layout needs to fit pool id in 32 bits */ ret = -EIO; - if (parent_spec->pool_id > (u64)U32_MAX) { + if (pool_id > (u64)U32_MAX) { rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", - (unsigned long long)parent_spec->pool_id, U32_MAX); + (unsigned long long)pool_id, U32_MAX); goto out_err; } + parent_spec->pool_id = pool_id; image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); if (IS_ERR(image_id)) { @@ -3702,9 +3875,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err); ceph_decode_64_safe(&p, end, overlap, out_err); - rbd_dev->parent_overlap = overlap; - rbd_dev->parent_spec = parent_spec; - parent_spec = NULL; /* rbd_dev now owns this */ + if (overlap) { + rbd_spec_put(rbd_dev->parent_spec); + rbd_dev->parent_spec = parent_spec; + parent_spec = NULL; /* rbd_dev now owns this */ + rbd_dev->parent_overlap = overlap; + } else { + rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n"); + } out: ret = 0; out_err: @@ -4002,6 +4180,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) for (i = 0; i < snap_count; i++) snapc->snaps[i] = ceph_decode_64(&p); + ceph_put_snap_context(rbd_dev->header.snapc); rbd_dev->header.snapc = snapc; dout(" snap context seq = %llu, snap_count = %u\n", @@ -4053,21 +4232,56 @@ out: return snap_name; } -static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) +static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) { + bool first_time = rbd_dev->header.object_prefix == NULL; int ret; down_write(&rbd_dev->header_rwsem); + if (first_time) { + ret = rbd_dev_v2_header_onetime(rbd_dev); + if (ret) + goto out; + } + + /* + * If the image supports layering, get the parent info. We + * need to probe the first time regardless. Thereafter we + * only need to if there's a parent, to see if it has + * disappeared due to the mapped image getting flattened. + */ + if (rbd_dev->header.features & RBD_FEATURE_LAYERING && + (first_time || rbd_dev->parent_spec)) { + bool warn; + + ret = rbd_dev_v2_parent_info(rbd_dev); + if (ret) + goto out; + + /* + * Print a warning if this is the initial probe and + * the image has a parent. Don't print it if the + * image now being probed is itself a parent. We + * can tell at this point because we won't know its + * pool name yet (just its pool id). + */ + warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name; + if (first_time && warn) + rbd_warn(rbd_dev, "WARNING: kernel layering " + "is EXPERIMENTAL!"); + } + ret = rbd_dev_v2_image_size(rbd_dev); if (ret) goto out; - rbd_update_mapping_size(rbd_dev); + + if (rbd_dev->spec->snap_id == CEPH_NOSNAP) + if (rbd_dev->mapping.size != rbd_dev->header.image_size) + rbd_dev->mapping.size = rbd_dev->header.image_size; ret = rbd_dev_v2_snap_context(rbd_dev); dout("rbd_dev_v2_snap_context returned %d\n", ret); - if (ret) - goto out; out: up_write(&rbd_dev->header_rwsem); @@ -4484,16 +4698,18 @@ out: return ret; } -/* Undo whatever state changes are made by v1 or v2 image probe */ - +/* + * Undo whatever state changes are made by v1 or v2 header info + * call. + */ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) { struct rbd_image_header *header; - rbd_dev_remove_parent(rbd_dev); - rbd_spec_put(rbd_dev->parent_spec); - rbd_dev->parent_spec = NULL; - rbd_dev->parent_overlap = 0; + /* Drop parent reference unless it's already been done (or none) */ + + if (rbd_dev->parent_overlap) + rbd_dev_parent_put(rbd_dev); /* Free dynamic fields from the header, then zero it out */ @@ -4505,72 +4721,22 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) memset(header, 0, sizeof (*header)); } -static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) +static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) { int ret; - /* Populate rbd image metadata */ - - ret = rbd_read_header(rbd_dev, &rbd_dev->header); - if (ret < 0) - goto out_err; - - /* Version 1 images have no parent (no layering) */ - - rbd_dev->parent_spec = NULL; - rbd_dev->parent_overlap = 0; - - dout("discovered version 1 image, header name is %s\n", - rbd_dev->header_name); - - return 0; - -out_err: - kfree(rbd_dev->header_name); - rbd_dev->header_name = NULL; - kfree(rbd_dev->spec->image_id); - rbd_dev->spec->image_id = NULL; - - return ret; -} - -static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) -{ - int ret; - - ret = rbd_dev_v2_image_size(rbd_dev); - if (ret) - goto out_err; - - /* Get the object prefix (a.k.a. block_name) for the image */ - ret = rbd_dev_v2_object_prefix(rbd_dev); if (ret) goto out_err; - /* Get the and check features for the image */ - + /* + * Get the and check features for the image. Currently the + * features are assumed to never change. + */ ret = rbd_dev_v2_features(rbd_dev); if (ret) goto out_err; - /* If the image supports layering, get the parent info */ - - if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { - ret = rbd_dev_v2_parent_info(rbd_dev); - if (ret) - goto out_err; - - /* - * Don't print a warning for parent images. We can - * tell this point because we won't know its pool - * name yet (just its pool id). - */ - if (rbd_dev->spec->pool_name) - rbd_warn(rbd_dev, "WARNING: kernel layering " - "is EXPERIMENTAL!"); - } - /* If the image supports fancy striping, get its parameters */ if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { @@ -4578,28 +4744,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) if (ret < 0) goto out_err; } - - /* crypto and compression type aren't (yet) supported for v2 images */ - - rbd_dev->header.crypt_type = 0; - rbd_dev->header.comp_type = 0; - - /* Get the snapshot context, plus the header version */ - - ret = rbd_dev_v2_snap_context(rbd_dev); - if (ret) - goto out_err; - - dout("discovered version 2 image, header name is %s\n", - rbd_dev->header_name); + /* No support for crypto and compression type format 2 images */ return 0; out_err: - rbd_dev->parent_overlap = 0; - rbd_spec_put(rbd_dev->parent_spec); - rbd_dev->parent_spec = NULL; - kfree(rbd_dev->header_name); - rbd_dev->header_name = NULL; + rbd_dev->header.features = 0; kfree(rbd_dev->header.object_prefix); rbd_dev->header.object_prefix = NULL; @@ -4628,15 +4777,16 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) if (!parent) goto out_err; - ret = rbd_dev_image_probe(parent); + ret = rbd_dev_image_probe(parent, false); if (ret < 0) goto out_err; rbd_dev->parent = parent; + atomic_set(&rbd_dev->parent_ref, 1); return 0; out_err: if (parent) { - rbd_spec_put(rbd_dev->parent_spec); + rbd_dev_unparent(rbd_dev); kfree(rbd_dev->header_name); rbd_dev_destroy(parent); } else { @@ -4651,10 +4801,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) { int ret; - ret = rbd_dev_mapping_set(rbd_dev); - if (ret) - return ret; - /* generate unique id: find highest unique id, add one */ rbd_dev_id_get(rbd_dev); @@ -4676,13 +4822,17 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) if (ret) goto err_out_blkdev; - ret = rbd_bus_add_dev(rbd_dev); + ret = rbd_dev_mapping_set(rbd_dev); if (ret) goto err_out_disk; + set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); + + ret = rbd_bus_add_dev(rbd_dev); + if (ret) + goto err_out_mapping; /* Everything's ready. Announce the disk to the world. */ - set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); add_disk(rbd_dev->disk); @@ -4691,6 +4841,8 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) return ret; +err_out_mapping: + rbd_dev_mapping_clear(rbd_dev); err_out_disk: rbd_free_disk(rbd_dev); err_out_blkdev: @@ -4731,12 +4883,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) static void rbd_dev_image_release(struct rbd_device *rbd_dev) { - int ret; - rbd_dev_unprobe(rbd_dev); - ret = rbd_dev_header_watch_sync(rbd_dev, 0); - if (ret) - rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); kfree(rbd_dev->header_name); rbd_dev->header_name = NULL; rbd_dev->image_format = 0; @@ -4748,18 +4895,20 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) /* * Probe for the existence of the header object for the given rbd - * device. For format 2 images this includes determining the image - * id. + * device. If this image is the one being mapped (i.e., not a + * parent), initiate a watch on its header object before using that + * object to get detailed information about the rbd image. */ -static int rbd_dev_image_probe(struct rbd_device *rbd_dev) +static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) { int ret; int tmp; /* - * Get the id from the image id object. If it's not a - * format 2 image, we'll get ENOENT back, and we'll assume - * it's a format 1 image. + * Get the id from the image id object. Unless there's an + * error, rbd_dev->spec->image_id will be filled in with + * a dynamically-allocated string, and rbd_dev->image_format + * will be set to either 1 or 2. */ ret = rbd_dev_image_id(rbd_dev); if (ret) @@ -4771,14 +4920,16 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) if (ret) goto err_out_format; - ret = rbd_dev_header_watch_sync(rbd_dev, 1); - if (ret) - goto out_header_name; + if (mapping) { + ret = rbd_dev_header_watch_sync(rbd_dev, true); + if (ret) + goto out_header_name; + } if (rbd_dev->image_format == 1) - ret = rbd_dev_v1_probe(rbd_dev); + ret = rbd_dev_v1_header_info(rbd_dev); else - ret = rbd_dev_v2_probe(rbd_dev); + ret = rbd_dev_v2_header_info(rbd_dev); if (ret) goto err_out_watch; @@ -4787,15 +4938,22 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) goto err_out_probe; ret = rbd_dev_probe_parent(rbd_dev); - if (!ret) - return 0; + if (ret) + goto err_out_probe; + + dout("discovered format %u image, header name is %s\n", + rbd_dev->image_format, rbd_dev->header_name); + return 0; err_out_probe: rbd_dev_unprobe(rbd_dev); err_out_watch: - tmp = rbd_dev_header_watch_sync(rbd_dev, 0); - if (tmp) - rbd_warn(rbd_dev, "unable to tear down watch request\n"); + if (mapping) { + tmp = rbd_dev_header_watch_sync(rbd_dev, false); + if (tmp) + rbd_warn(rbd_dev, "unable to tear down " + "watch request (%d)\n", tmp); + } out_header_name: kfree(rbd_dev->header_name); rbd_dev->header_name = NULL; @@ -4819,6 +4977,7 @@ static ssize_t rbd_add(struct bus_type *bus, struct rbd_spec *spec = NULL; struct rbd_client *rbdc; struct ceph_osd_client *osdc; + bool read_only; int rc = -ENOMEM; if (!try_module_get(THIS_MODULE)) @@ -4828,13 +4987,15 @@ static ssize_t rbd_add(struct bus_type *bus, rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); if (rc < 0) goto err_out_module; + read_only = rbd_opts->read_only; + kfree(rbd_opts); + rbd_opts = NULL; /* done with this */ rbdc = rbd_get_client(ceph_opts); if (IS_ERR(rbdc)) { rc = PTR_ERR(rbdc); goto err_out_args; } - ceph_opts = NULL; /* rbd_dev client now owns this */ /* pick the pool */ osdc = &rbdc->client->osdc; @@ -4858,27 +5019,29 @@ static ssize_t rbd_add(struct bus_type *bus, rbdc = NULL; /* rbd_dev now owns this */ spec = NULL; /* rbd_dev now owns this */ - rbd_dev->mapping.read_only = rbd_opts->read_only; - kfree(rbd_opts); - rbd_opts = NULL; /* done with this */ - - rc = rbd_dev_image_probe(rbd_dev); + rc = rbd_dev_image_probe(rbd_dev, true); if (rc < 0) goto err_out_rbd_dev; + /* If we are mapping a snapshot it must be marked read-only */ + + if (rbd_dev->spec->snap_id != CEPH_NOSNAP) + read_only = true; + rbd_dev->mapping.read_only = read_only; + rc = rbd_dev_device_setup(rbd_dev); - if (!rc) - return count; + if (rc) { + rbd_dev_image_release(rbd_dev); + goto err_out_module; + } + + return count; - rbd_dev_image_release(rbd_dev); err_out_rbd_dev: rbd_dev_destroy(rbd_dev); err_out_client: rbd_put_client(rbdc); err_out_args: - if (ceph_opts) - ceph_destroy_options(ceph_opts); - kfree(rbd_opts); rbd_spec_put(spec); err_out_module: module_put(THIS_MODULE); @@ -4911,7 +5074,7 @@ static void rbd_dev_device_release(struct device *dev) rbd_free_disk(rbd_dev); clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); - rbd_dev_clear_mapping(rbd_dev); + rbd_dev_mapping_clear(rbd_dev); unregister_blkdev(rbd_dev->major, rbd_dev->name); rbd_dev->major = 0; rbd_dev_id_put(rbd_dev); @@ -4978,10 +5141,13 @@ static ssize_t rbd_remove(struct bus_type *bus, spin_unlock_irq(&rbd_dev->lock); if (ret < 0) goto done; - ret = count; rbd_bus_del_dev(rbd_dev); + ret = rbd_dev_header_watch_sync(rbd_dev, false); + if (ret) + rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); rbd_dev_image_release(rbd_dev); module_put(THIS_MODULE); + ret = count; done: mutex_unlock(&ctl_mutex); diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index f8ef15f37c5e..3fd130fdfbc1 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -1160,8 +1160,7 @@ static int ace_probe(struct platform_device *dev) dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); /* device id and bus width */ - of_property_read_u32(dev->dev.of_node, "port-number", &id); - if (id < 0) + if (of_property_read_u32(dev->dev.of_node, "port-number", &id)) id = 0; if (of_find_property(dev->dev.of_node, "8-bit", NULL)) bus_width = ACE_BUS_WIDTH_8; diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index fdfd61a2d523..11a6104a1e4f 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -201,7 +201,7 @@ config BT_MRVL The core driver to support Marvell Bluetooth devices. This driver is required if you want to support - Marvell Bluetooth devices, such as 8688/8787/8797. + Marvell Bluetooth devices, such as 8688/8787/8797/8897. Say Y here to compile Marvell Bluetooth driver into the kernel or say M to compile it as module. @@ -214,7 +214,7 @@ config BT_MRVL_SDIO The driver for Marvell Bluetooth chipsets with SDIO interface. This driver is required if you want to use Marvell Bluetooth - devices with SDIO interface. Currently SD8688/SD8787/SD8797 + devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8897 chipsets are supported. Say Y here to compile support for Marvell BT-over-SDIO driver diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index c63488c54f4a..13693b7a0d5c 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -82,6 +82,23 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = { .io_port_2 = 0x7a, }; +static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = { + .cfg = 0x00, + .host_int_mask = 0x02, + .host_intstatus = 0x03, + .card_status = 0x50, + .sq_read_base_addr_a0 = 0x60, + .sq_read_base_addr_a1 = 0x61, + .card_revision = 0xbc, + .card_fw_status0 = 0xc0, + .card_fw_status1 = 0xc1, + .card_rx_len = 0xc2, + .card_rx_unit = 0xc3, + .io_port_0 = 0xd8, + .io_port_1 = 0xd9, + .io_port_2 = 0xda, +}; + static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { .helper = "mrvl/sd8688_helper.bin", .firmware = "mrvl/sd8688.bin", @@ -103,6 +120,13 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = { .sd_blksz_fw_dl = 256, }; +static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { + .helper = NULL, + .firmware = "mrvl/sd8897_uapsta.bin", + .reg = &btmrvl_reg_88xx, + .sd_blksz_fw_dl = 256, +}; + static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8688 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), @@ -116,6 +140,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8797 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, + /* Marvell SD8897 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E), + .driver_data = (unsigned long) &btmrvl_sdio_sd8897 }, { } /* Terminating entry */ }; @@ -1194,3 +1221,4 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin"); MODULE_FIRMWARE("mrvl/sd8688.bin"); MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index 4ca35e8a5d8c..19a12ac64a9e 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c @@ -167,11 +167,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) clk_prepare_enable(mxc_rng->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - err = -ENOENT; - goto err_region; - } - mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mxc_rng->mem)) { err = PTR_ERR(mxc_rng->mem); @@ -189,7 +184,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) return 0; err_ioremap: -err_region: clk_disable_unprepare(mxc_rng->clk); out: diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 749dc16ca2cc..d2903e772270 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -119,11 +119,6 @@ static int omap_rng_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!priv->mem_res) { - ret = -ENOENT; - goto err_ioremap; - } - priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index cdd4c09fda96..a22a7a502740 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c @@ -95,9 +95,9 @@ struct si_sm_data { enum bt_states state; unsigned char seq; /* BT sequence number */ struct si_sm_io *io; - unsigned char write_data[IPMI_MAX_MSG_LENGTH]; + unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int write_count; - unsigned char read_data[IPMI_MAX_MSG_LENGTH]; + unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int read_count; int truncated; long timeout; /* microseconds countdown */ diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 9eb360ff8cab..d5a5f020810a 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, return ipmi_ioctl(filep, cmd, arg); } } + +static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int ret; + + mutex_lock(&ipmi_mutex); + ret = compat_ipmi_ioctl(filep, cmd, arg); + mutex_unlock(&ipmi_mutex); + + return ret; +} #endif static const struct file_operations ipmi_fops = { .owner = THIS_MODULE, .unlocked_ioctl = ipmi_unlocked_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = compat_ipmi_ioctl, + .compat_ioctl = unlocked_compat_ipmi_ioctl, #endif .open = ipmi_open, .release = ipmi_release, diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 4d439d2fcfd6..4445fa164a2d 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2037,12 +2037,11 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; - entry->name = kmalloc(strlen(name)+1, GFP_KERNEL); + entry->name = kstrdup(name, GFP_KERNEL); if (!entry->name) { kfree(entry); return -ENOMEM; } - strcpy(entry->name, name); file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); if (!file) { diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 313538abe63c..af4b23ffc5a6 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -663,8 +663,10 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { - dev_warn(smi_info->dev, "Could not enable interrupts" - ", failed get, using polled mode.\n"); + dev_warn(smi_info->dev, + "Couldn't get irq info: %x.\n", msg[2]); + dev_warn(smi_info->dev, + "Maybe ok, but ipmi might run very slowly.\n"); smi_info->si_state = SI_NORMAL; } else { msg[0] = (IPMI_NETFN_APP_REQUEST << 2); @@ -685,10 +687,12 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) - dev_warn(smi_info->dev, "Could not enable interrupts" - ", failed set, using polled mode.\n"); - else + if (msg[2] != 0) { + dev_warn(smi_info->dev, + "Couldn't set irq info: %x.\n", msg[2]); + dev_warn(smi_info->dev, + "Maybe ok, but ipmi might run very slowly.\n"); + } else smi_info->interrupt_disabled = 0; smi_info->si_state = SI_NORMAL; break; diff --git a/drivers/char/lp.c b/drivers/char/lp.c index dafd9ac6428f..0913d79424d3 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -622,9 +622,12 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd, return -EFAULT; break; case LPGETSTATUS: + if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) + return -EINTR; lp_claim_parport_or_block (&lp_table[minor]); status = r_str(minor); lp_release_parport (&lp_table[minor]); + mutex_unlock(&lp_table[minor].port_mutex); if (copy_to_user(argp, &status, sizeof(int))) return -EFAULT; diff --git a/drivers/char/random.c b/drivers/char/random.c index cd9a6211dcad..35487e8ded59 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -865,16 +865,24 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, if (r->entropy_count / 8 < min + reserved) { nbytes = 0; } else { + int entropy_count, orig; +retry: + entropy_count = orig = ACCESS_ONCE(r->entropy_count); /* If limited, never pull more than available */ - if (r->limit && nbytes + reserved >= r->entropy_count / 8) - nbytes = r->entropy_count/8 - reserved; - - if (r->entropy_count / 8 >= nbytes + reserved) - r->entropy_count -= nbytes*8; - else - r->entropy_count = reserved; + if (r->limit && nbytes + reserved >= entropy_count / 8) + nbytes = entropy_count/8 - reserved; + + if (entropy_count / 8 >= nbytes + reserved) { + entropy_count -= nbytes*8; + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + goto retry; + } else { + entropy_count = reserved; + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + goto retry; + } - if (r->entropy_count < random_write_wakeup_thresh) + if (entropy_count < random_write_wakeup_thresh) wakeup_write = 1; } @@ -957,10 +965,23 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, { ssize_t ret = 0, i; __u8 tmp[EXTRACT_SIZE]; + unsigned long flags; /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ - if (fips_enabled && !r->last_data_init) - nbytes += EXTRACT_SIZE; + if (fips_enabled) { + spin_lock_irqsave(&r->lock, flags); + if (!r->last_data_init) { + r->last_data_init = true; + spin_unlock_irqrestore(&r->lock, flags); + trace_extract_entropy(r->name, EXTRACT_SIZE, + r->entropy_count, _RET_IP_); + xfer_secondary_pool(r, EXTRACT_SIZE); + extract_buf(r, tmp); + spin_lock_irqsave(&r->lock, flags); + memcpy(r->last_data, tmp, EXTRACT_SIZE); + } + spin_unlock_irqrestore(&r->lock, flags); + } trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_); xfer_secondary_pool(r, nbytes); @@ -970,19 +991,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, extract_buf(r, tmp); if (fips_enabled) { - unsigned long flags; - - - /* prime last_data value if need be, per fips 140-2 */ - if (!r->last_data_init) { - spin_lock_irqsave(&r->lock, flags); - memcpy(r->last_data, tmp, EXTRACT_SIZE); - r->last_data_init = true; - nbytes -= EXTRACT_SIZE; - spin_unlock_irqrestore(&r->lock, flags); - extract_buf(r, tmp); - } - spin_lock_irqsave(&r->lock, flags); if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) panic("Hardware RNG duplicated output!\n"); diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c index 4945bd3d18d0..d5d2e4a985aa 100644 --- a/drivers/char/ttyprintk.c +++ b/drivers/char/ttyprintk.c @@ -179,7 +179,6 @@ static int __init ttyprintk_init(void) { int ret = -ENOMEM; - tpk_port.port.ops = &null_ops; mutex_init(&tpk_port.port_write_mutex); ttyprintk_driver = tty_alloc_driver(1, @@ -190,6 +189,7 @@ static int __init ttyprintk_init(void) return PTR_ERR(ttyprintk_driver); tty_port_init(&tpk_port.port); + tpk_port.port.ops = &null_ops; ttyprintk_driver->driver_name = "ttyprintk"; ttyprintk_driver->name = "ttyprintk"; diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index 892728412e9d..24f553673b72 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -932,7 +932,7 @@ static unsigned long si5351_clkout_recalc_rate(struct clk_hw *hw, unsigned char reg; unsigned char rdiv; - if (hwdata->num > 5) + if (hwdata->num <= 5) reg = si5351_msynth_params_address(hwdata->num) + 2; else reg = SI5351_CLK6_7_OUTPUT_DIVIDER; @@ -1477,6 +1477,16 @@ static int si5351_i2c_probe(struct i2c_client *client, return -EINVAL; } drvdata->onecell.clks[n] = clk; + + /* set initial clkout rate */ + if (pdata->clkout[n].rate != 0) { + int ret; + ret = clk_set_rate(clk, pdata->clkout[n].rate); + if (ret != 0) { + dev_err(&client->dev, "Cannot set rate : %d\n", + ret); + } + } } ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get, diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c index debf688afa8e..553ac35bcc91 100644 --- a/drivers/clk/clk-vt8500.c +++ b/drivers/clk/clk-vt8500.c @@ -183,7 +183,7 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate, writel(divisor, cdev->div_reg); vt8500_pmc_wait_busy(); - spin_lock_irqsave(cdev->lock, flags); + spin_unlock_irqrestore(cdev->lock, flags); return 0; } diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c index d0e5eed146de..4faf0afc44cd 100644 --- a/drivers/clk/mxs/clk-imx28.c +++ b/drivers/clk/mxs/clk-imx28.c @@ -10,6 +10,7 @@ */ #include <linux/clk.h> +#include <linux/clk/mxs.h> #include <linux/clkdev.h> #include <linux/err.h> #include <linux/init.h> diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index d0940e69d034..3c1f88868f29 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -791,7 +791,8 @@ struct samsung_gate_clock exynos4210_gate_clks[] __initdata = { GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), - GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), + GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, + CLK_IGNORE_UNUSED, 0), GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0), GATE(smmu_rotator, "smmu_rotator", "aclk200", E4210_GATE_IP_IMAGE, 4, 0, 0), @@ -819,7 +820,8 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0), GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), - GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, 0, 0), + GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, + CLK_IGNORE_UNUSED, 0), GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0), GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0", SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0), diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 8292a00c3de9..075db0c99edb 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -872,6 +872,14 @@ static void __init tegra20_periph_clk_init(void) struct clk *clk; int i; + /* ac97 */ + clk = tegra_clk_register_periph_gate("ac97", "pll_a_out0", + TEGRA_PERIPH_ON_APB, + clk_base, 0, 3, &periph_l_regs, + periph_clk_enb_refcnt); + clk_register_clkdev(clk, NULL, "tegra20-ac97"); + clks[ac97] = clk; + /* apbdma */ clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base, 0, 34, &periph_h_regs, @@ -1234,9 +1242,6 @@ static __initdata struct tegra_clk_init_table init_table[] = { {uartc, pll_p, 0, 0}, {uartd, pll_p, 0, 0}, {uarte, pll_p, 0, 0}, - {usbd, clk_max, 12000000, 0}, - {usb2, clk_max, 12000000, 0}, - {usb3, clk_max, 12000000, 0}, {pll_a, clk_max, 56448000, 1}, {pll_a_out0, clk_max, 11289600, 1}, {cdev1, clk_max, 0, 1}, diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c index bc7e9bde792b..e364c9d4aa60 100644 --- a/drivers/clk/ux500/clk-sysctrl.c +++ b/drivers/clk/ux500/clk-sysctrl.c @@ -145,7 +145,13 @@ static struct clk *clk_reg_sysctrl(struct device *dev, return ERR_PTR(-ENOMEM); } - for (i = 0; i < num_parents; i++) { + /* set main clock registers */ + clk->reg_sel[0] = reg_sel[0]; + clk->reg_bits[0] = reg_bits[0]; + clk->reg_mask[0] = reg_mask[0]; + + /* handle clocks with more than one parent */ + for (i = 1; i < num_parents; i++) { clk->reg_sel[i] = reg_sel[i]; clk->reg_bits[i] = reg_bits[i]; clk->reg_mask[i] = reg_mask[i]; diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c index 0b4f35a5ffc2..80069c370a47 100644 --- a/drivers/clk/ux500/u8500_clk.c +++ b/drivers/clk/ux500/u8500_clk.c @@ -325,7 +325,7 @@ void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base, BIT(0), 0); clk_register_clkdev(clk, "fsmc", NULL); - clk_register_clkdev(clk, NULL, "smsc911x"); + clk_register_clkdev(clk, NULL, "smsc911x.0"); clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base, BIT(1), 0); diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c index 5cf4f4686406..4f45eee9e33b 100644 --- a/drivers/clk/x86/clk-lpt.c +++ b/drivers/clk/x86/clk-lpt.c @@ -15,22 +15,29 @@ #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/module.h> +#include <linux/platform_data/clk-lpss.h> #include <linux/platform_device.h> #define PRV_CLOCK_PARAMS 0x800 static int lpt_clk_probe(struct platform_device *pdev) { + struct lpss_clk_data *drvdata; struct clk *clk; + drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + /* LPSS free running clock */ - clk = clk_register_fixed_rate(&pdev->dev, "lpss_clk", NULL, CLK_IS_ROOT, - 100000000); + drvdata->name = "lpss_clk"; + clk = clk_register_fixed_rate(&pdev->dev, drvdata->name, NULL, + CLK_IS_ROOT, 100000000); if (IS_ERR(clk)) return PTR_ERR(clk); - /* Shared DMA clock */ - clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto"); + drvdata->clk = clk; + platform_set_drvdata(pdev, drvdata); return 0; } diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index a1488f58f6ca..534fcb825153 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS choice prompt "Default CPUFreq governor" - default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 + default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ default CPU_FREQ_DEFAULT_GOV_PERFORMANCE help This option sets which CPUFreq governor shall be loaded at diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index f3af18b9acc5..6e57543fe0b9 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -3,16 +3,17 @@ # config ARM_BIG_LITTLE_CPUFREQ - tristate - depends on ARM_CPU_TOPOLOGY + tristate "Generic ARM big LITTLE CPUfreq driver" + depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK + help + This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. config ARM_DT_BL_CPUFREQ - tristate "Generic ARM big LITTLE CPUfreq driver probed via DT" - select ARM_BIG_LITTLE_CPUFREQ - depends on OF && HAVE_CLK + tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver" + depends on ARM_BIG_LITTLE_CPUFREQ && OF help - This enables the Generic CPUfreq driver for ARM big.LITTLE platform. - This gets frequency tables from DT. + This enables probing via DT for Generic CPUfreq driver for ARM + big.LITTLE platform. This gets frequency tables from DT. config ARM_EXYNOS_CPUFREQ bool "SAMSUNG EXYNOS SoCs" diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 2b8a8c374548..6bd63d63d356 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -272,7 +272,7 @@ config X86_LONGHAUL config X86_E_POWERSAVER tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" select CPU_FREQ_TABLE - depends on X86_32 + depends on X86_32 && ACPI_PROCESSOR help This adds the CPUFreq driver for VIA C7 processors. However, this driver does not have any safeguards to prevent operating the CPU out of spec diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 11b8b4b54ceb..edc089e9d0c4 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -347,11 +347,11 @@ static u32 get_cur_val(const struct cpumask *mask) switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; + cmd.addr.msr.reg = MSR_IA32_PERF_CTL; break; case SYSTEM_AMD_MSR_CAPABLE: cmd.type = SYSTEM_AMD_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_AMD_PERF_STATUS; + cmd.addr.msr.reg = MSR_AMD_PERF_CTL; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index dbdf677d2f36..5d7f53fcd6f5 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS]; static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; -static int cpu_to_cluster(int cpu) -{ - return topology_physical_package_id(cpu); -} - static unsigned int bL_cpufreq_get(unsigned int cpu) { u32 cur_cluster = cpu_to_cluster(cpu); @@ -192,7 +187,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); - dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu); + dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); return 0; } diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h index 70f18fc12d4a..79b2ce17884d 100644 --- a/drivers/cpufreq/arm_big_little.h +++ b/drivers/cpufreq/arm_big_little.h @@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops { int (*init_opp_table)(struct device *cpu_dev); }; +static inline int cpu_to_cluster(int cpu) +{ + return topology_physical_package_id(cpu); +} + int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c index 44be3115375c..fd9e3ea6a480 100644 --- a/drivers/cpufreq/arm_big_little_dt.c +++ b/drivers/cpufreq/arm_big_little_dt.c @@ -19,69 +19,75 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/device.h> #include <linux/export.h> #include <linux/module.h> #include <linux/of.h> #include <linux/opp.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> #include "arm_big_little.h" -static int dt_init_opp_table(struct device *cpu_dev) +/* get cpu node with valid operating-points */ +static struct device_node *get_cpu_node_with_valid_op(int cpu) { - struct device_node *np, *parent; - int count = 0, ret; + struct device_node *np = NULL, *parent; + int count = 0; parent = of_find_node_by_path("/cpus"); if (!parent) { pr_err("failed to find OF /cpus\n"); - return -ENOENT; + return NULL; } for_each_child_of_node(parent, np) { - if (count++ != cpu_dev->id) + if (count++ != cpu) continue; if (!of_get_property(np, "operating-points", NULL)) { - ret = -ENODATA; - } else { - cpu_dev->of_node = np; - ret = of_init_opp_table(cpu_dev); + of_node_put(np); + np = NULL; } - of_node_put(np); - of_node_put(parent); - return ret; + break; } - return -ENODEV; + of_node_put(parent); + return np; } -static int dt_get_transition_latency(struct device *cpu_dev) +static int dt_init_opp_table(struct device *cpu_dev) { - struct device_node *np, *parent; - u32 transition_latency = CPUFREQ_ETERNAL; - int count = 0; + struct device_node *np; + int ret; - parent = of_find_node_by_path("/cpus"); - if (!parent) { - pr_err("failed to find OF /cpus\n"); - return -ENOENT; - } + np = get_cpu_node_with_valid_op(cpu_dev->id); + if (!np) + return -ENODATA; - for_each_child_of_node(parent, np) { - if (count++ != cpu_dev->id) - continue; + cpu_dev->of_node = np; + ret = of_init_opp_table(cpu_dev); + of_node_put(np); - of_property_read_u32(np, "clock-latency", &transition_latency); - of_node_put(np); - of_node_put(parent); + return ret; +} - return 0; - } +static int dt_get_transition_latency(struct device *cpu_dev) +{ + struct device_node *np; + u32 transition_latency = CPUFREQ_ETERNAL; + + np = get_cpu_node_with_valid_op(cpu_dev->id); + if (!np) + return CPUFREQ_ETERNAL; - return -ENODEV; + of_property_read_u32(np, "clock-latency", &transition_latency); + of_node_put(np); + + pr_debug("%s: clock-latency: %d\n", __func__, transition_latency); + return transition_latency; } static struct cpufreq_arm_bL_ops dt_bL_ops = { @@ -90,17 +96,33 @@ static struct cpufreq_arm_bL_ops dt_bL_ops = { .init_opp_table = dt_init_opp_table, }; -static int generic_bL_init(void) +static int generic_bL_probe(struct platform_device *pdev) { + struct device_node *np; + + np = get_cpu_node_with_valid_op(0); + if (!np) + return -ENODEV; + + of_node_put(np); return bL_cpufreq_register(&dt_bL_ops); } -module_init(generic_bL_init); -static void generic_bL_exit(void) +static int generic_bL_remove(struct platform_device *pdev) { - return bL_cpufreq_unregister(&dt_bL_ops); + bL_cpufreq_unregister(&dt_bL_ops); + return 0; } -module_exit(generic_bL_exit); + +static struct platform_driver generic_bL_platdrv = { + .driver = { + .name = "arm-bL-cpufreq-dt", + .owner = THIS_MODULE, + }, + .probe = generic_bL_probe, + .remove = generic_bL_remove, +}; +module_platform_driver(generic_bL_platdrv); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT"); diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 3ab8294eab04..ad1fde277661 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -45,7 +45,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; struct opp *opp; unsigned long volt = 0, volt_old = 0, tol = 0; - long freq_Hz; + long freq_Hz, freq_exact; unsigned int index; int ret; @@ -60,6 +60,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); if (freq_Hz < 0) freq_Hz = freq_table[index].frequency * 1000; + freq_exact = freq_Hz; freqs.new = freq_Hz / 1000; freqs.old = clk_get_rate(cpu_clk) / 1000; @@ -98,7 +99,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, } } - ret = clk_set_rate(cpu_clk, freqs.new * 1000); + ret = clk_set_rate(cpu_clk, freq_exact); if (ret) { pr_err("failed to set clock rate: %d\n", ret); if (cpu_reg) @@ -189,12 +190,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) if (!np) { pr_err("failed to find cpu0 node\n"); - return -ENOENT; + ret = -ENOENT; + goto out_put_parent; } cpu_dev = &pdev->dev; cpu_dev->of_node = np; + cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); + if (IS_ERR(cpu_reg)) { + /* + * If cpu0 regulator supply node is present, but regulator is + * not yet registered, we should try defering probe. + */ + if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { + dev_err(cpu_dev, "cpu0 regulator not ready, retry\n"); + ret = -EPROBE_DEFER; + goto out_put_node; + } + pr_warn("failed to get cpu0 regulator: %ld\n", + PTR_ERR(cpu_reg)); + cpu_reg = NULL; + } + cpu_clk = devm_clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); @@ -202,12 +220,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) goto out_put_node; } - cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); - if (IS_ERR(cpu_reg)) { - pr_warn("failed to get cpu0 regulator\n"); - cpu_reg = NULL; - } - ret = of_init_opp_table(cpu_dev); if (ret) { pr_err("failed to init OPP table: %d\n", ret); @@ -264,6 +276,8 @@ out_free_table: opp_free_cpufreq_table(cpu_dev, &freq_table); out_put_node: of_node_put(np); +out_put_parent: + of_node_put(parent); return ret; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1b8a48eaf90f..2d53f47d1747 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif __func__, cpu_dev->id, cpu); } + if ((cpus == 1) && (cpufreq_driver->target)) + __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); + pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); cpufreq_cpu_put(data); /* If cpu is last user of policy, free policy */ if (cpus == 1) { - if (cpufreq_driver->target) - __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); - lock_policy_rwsem_read(cpu); kobj = &data->kobj; cmp = &data->kobj_unregister; @@ -1729,18 +1729,23 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, /* end old governor */ if (data->governor) { __cpufreq_governor(data, CPUFREQ_GOV_STOP); + unlock_policy_rwsem_write(policy->cpu); __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); + lock_policy_rwsem_write(policy->cpu); } /* start new governor */ data->governor = policy->governor; if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) { - if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) + if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) { failed = 0; - else + } else { + unlock_policy_rwsem_write(policy->cpu); __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); + lock_policy_rwsem_write(policy->cpu); + } } if (failed) { @@ -1832,15 +1837,13 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, if (dev) { switch (action) { case CPU_ONLINE: - case CPU_ONLINE_FROZEN: cpufreq_add_dev(dev, NULL); break; case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: + case CPU_UP_CANCELED_FROZEN: __cpufreq_remove_dev(dev, NULL); break; case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: cpufreq_add_dev(dev, NULL); break; } diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 443442df113b..dc9b72e25c1a 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -26,6 +26,7 @@ #include <linux/tick.h> #include <linux/types.h> #include <linux/workqueue.h> +#include <linux/cpu.h> #include "cpufreq_governor.h" @@ -180,8 +181,10 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, if (!all_cpus) { __gov_queue_work(smp_processor_id(), dbs_data, delay); } else { + get_online_cpus(); for_each_cpu(i, policy->cpus) __gov_queue_work(i, dbs_data, delay); + put_online_cpus(); } } EXPORT_SYMBOL_GPL(gov_queue_work); @@ -255,6 +258,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (have_governor_per_policy()) { WARN_ON(dbs_data); } else if (dbs_data) { + dbs_data->usage_count++; policy->governor_data = dbs_data; return 0; } @@ -266,6 +270,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, } dbs_data->cdata = cdata; + dbs_data->usage_count = 1; rc = cdata->init(dbs_data); if (rc) { pr_err("%s: POLICY_INIT: init() failed\n", __func__); @@ -294,7 +299,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, latency * LATENCY_MULTIPLIER)); - if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { + if ((cdata->governor == GOV_CONSERVATIVE) && + (!policy->governor->initialized)) { struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; cpufreq_register_notifier(cs_ops->notifier_block, @@ -306,12 +312,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, return 0; case CPUFREQ_GOV_POLICY_EXIT: - if ((policy->governor->initialized == 1) || - have_governor_per_policy()) { + if (!--dbs_data->usage_count) { sysfs_remove_group(get_governor_parent_kobj(policy), get_sysfs_attr(dbs_data)); - if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { + if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && + (policy->governor->initialized == 1)) { struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; cpufreq_unregister_notifier(cs_ops->notifier_block, diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 8ac33538d0bd..e16a96130cb3 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -211,6 +211,7 @@ struct common_dbs_data { struct dbs_data { struct common_dbs_data *cdata; unsigned int min_sampling_rate; + int usage_count; void *tuners; /* dbs_mutex protects dbs_enable in governor start/stop */ diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index b0ffef96bf77..4b9bb5def6f1 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -547,7 +547,6 @@ static int od_init(struct dbs_data *dbs_data) tuners->io_is_busy = should_io_be_busy(); dbs_data->tuners = tuners; - pr_info("%s: tuners %p\n", __func__, tuners); mutex_init(&dbs_data->mutex); return 0; } diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index bfd6273fd873..fb65decffa28 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -349,15 +349,16 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: - case CPU_ONLINE_FROZEN: cpufreq_update_policy(cpu); break; case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: cpufreq_stats_free_sysfs(cpu); break; case CPU_DEAD: - case CPU_DEAD_FROZEN: + cpufreq_stats_free_table(cpu); + break; + case CPU_UP_CANCELED_FROZEN: + cpufreq_stats_free_sysfs(cpu); cpufreq_stats_free_table(cpu); break; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index cc3a8e6c92be..07f2840ad805 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y) } struct sample { - ktime_t start_time; - ktime_t end_time; int core_pct_busy; - int pstate_pct_busy; - u64 duration_us; - u64 idletime_us; u64 aperf; u64 mperf; int freq; @@ -86,13 +81,9 @@ struct cpudata { struct pstate_adjust_policy *pstate_policy; struct pstate_data pstate; struct _pid pid; - struct _pid idle_pid; int min_pstate_count; - int idle_mode; - ktime_t prev_sample; - u64 prev_idle_time_us; u64 prev_aperf; u64 prev_mperf; int sample_ptr; @@ -124,6 +115,8 @@ struct perf_limits { int min_perf_pct; int32_t max_perf; int32_t min_perf; + int max_policy_pct; + int max_sysfs_pct; }; static struct perf_limits limits = { @@ -132,6 +125,8 @@ static struct perf_limits limits = { .max_perf = int_tofp(1), .min_perf_pct = 0, .min_perf = 0, + .max_policy_pct = 100, + .max_sysfs_pct = 100, }; static inline void pid_reset(struct _pid *pid, int setpoint, int busy, @@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 0); } -static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu) -{ - pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct); - pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct); - pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct); - - pid_reset(&cpu->idle_pid, - 75, - 50, - cpu->pstate_policy->deadband, - 0); -} - static inline void intel_pstate_reset_all_pid(void) { unsigned int cpu; @@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; - limits.max_perf_pct = clamp_t(int, input, 0 , 100); + limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); + limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); return count; } @@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) if (pstate == cpu->pstate.current_pstate) return; -#ifndef MODULE trace_cpu_frequency(pstate * 100000, cpu->cpu); -#endif + cpu->pstate.current_pstate = pstate; wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); @@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, struct sample *sample) { u64 core_pct; - sample->pstate_pct_busy = 100 - div64_u64( - sample->idletime_us * 100, - sample->duration_us); core_pct = div64_u64(sample->aperf * 100, sample->mperf); sample->freq = cpu->pstate.max_pstate * core_pct * 1000; - sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), - 100); + sample->core_pct_busy = core_pct; } static inline void intel_pstate_sample(struct cpudata *cpu) { - ktime_t now; - u64 idle_time_us; u64 aperf, mperf; - now = ktime_get(); - idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL); - rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - /* for the first sample, don't actually record a sample, just - * set the baseline */ - if (cpu->prev_idle_time_us > 0) { - cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; - cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; - cpu->samples[cpu->sample_ptr].end_time = now; - cpu->samples[cpu->sample_ptr].duration_us = - ktime_us_delta(now, cpu->prev_sample); - cpu->samples[cpu->sample_ptr].idletime_us = - idle_time_us - cpu->prev_idle_time_us; - - cpu->samples[cpu->sample_ptr].aperf = aperf; - cpu->samples[cpu->sample_ptr].mperf = mperf; - cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; - cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; - - intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); - } + cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; + cpu->samples[cpu->sample_ptr].aperf = aperf; + cpu->samples[cpu->sample_ptr].mperf = mperf; + cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; + cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; + + intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); - cpu->prev_sample = now; - cpu->prev_idle_time_us = idle_time_us; cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; } @@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) mod_timer_pinned(&cpu->timer, jiffies + delay); } -static inline void intel_pstate_idle_mode(struct cpudata *cpu) -{ - cpu->idle_mode = 1; -} - -static inline void intel_pstate_normal_mode(struct cpudata *cpu) -{ - cpu->idle_mode = 0; -} - static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) { int32_t busy_scaled; @@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) intel_pstate_pstate_decrease(cpu, steps); } -static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu) -{ - int busy_scaled; - struct _pid *pid; - int ctl = 0; - int steps; - - pid = &cpu->idle_pid; - - busy_scaled = intel_pstate_get_scaled_busy(cpu); - - ctl = pid_calc(pid, 100 - busy_scaled); - - steps = abs(ctl); - if (ctl < 0) - intel_pstate_pstate_decrease(cpu, steps); - else - intel_pstate_pstate_increase(cpu, steps); - - if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) - intel_pstate_normal_mode(cpu); -} - static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; intel_pstate_sample(cpu); + intel_pstate_adjust_busy_pstate(cpu); - if (!cpu->idle_mode) - intel_pstate_adjust_busy_pstate(cpu); - else - intel_pstate_adjust_idle_pstate(cpu); - -#if defined(XPERF_FIX) if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { cpu->min_pstate_count++; if (!(cpu->min_pstate_count % 5)) { intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); - intel_pstate_idle_mode(cpu); } } else cpu->min_pstate_count = 0; -#endif + intel_pstate_set_sample_time(cpu); } @@ -600,6 +521,7 @@ static void intel_pstate_timer_func(unsigned long __data) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x2a, default_policy), ICPU(0x2d, default_policy), + ICPU(0x3a, default_policy), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); @@ -631,7 +553,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) (unsigned long)cpu; cpu->timer.expires = jiffies + HZ/100; intel_pstate_busy_pid_reset(cpu); - intel_pstate_idle_pid_reset(cpu); intel_pstate_sample(cpu); intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); @@ -675,8 +596,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); - limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; - limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); + limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; + limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); + limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); return 0; @@ -788,10 +710,9 @@ static int __init intel_pstate_init(void) pr_info("Intel P-state driver initializing.\n"); - all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); + all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); if (!all_cpu_data) return -ENOMEM; - memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus()); rc = cpufreq_register_driver(&intel_pstate_driver); if (rc) diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index d36ea8dc96eb..b2644af985ec 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) priv.dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Cannot get memory resource\n"); - return -ENODEV; - } priv.base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv.base)) return PTR_ERR(priv.base); diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index 84889573b566..d53912768946 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -18,6 +18,7 @@ #include <linux/platform_device.h> #include <asm/clock.h> +#include <asm/idle.h> #include <asm/mach-loongson/loongson.h> @@ -200,6 +201,7 @@ static void loongson2_cpu_wait(void) LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ spin_unlock_irqrestore(&loongson2_wait_lock, flags); + local_irq_enable(); } static int __init cpufreq_init(void) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 765fdf5ce579..bf416a8391a7 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -1154,7 +1154,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, - DMA_BIDIRECTIONAL, assoc_chained); + DMA_TO_DEVICE, assoc_chained); if (likely(req->src == req->dst)) { sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_BIDIRECTIONAL, src_chained); @@ -1336,7 +1336,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, - DMA_BIDIRECTIONAL, assoc_chained); + DMA_TO_DEVICE, assoc_chained); if (likely(req->src == req->dst)) { sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_BIDIRECTIONAL, src_chained); diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index a76d4c4f29f5..35d483f8db66 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -126,6 +126,7 @@ struct crypto_alg nx_cbc_aes_alg = { .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_type = &crypto_blkcipher_type, + .cra_alignmask = 0xf, .cra_module = THIS_MODULE, .cra_init = nx_crypto_ctx_aes_cbc_init, .cra_exit = nx_crypto_ctx_exit, diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index ba5f1611336f..7bbc9a81da21 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -123,6 +123,7 @@ struct crypto_alg nx_ecb_aes_alg = { .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, + .cra_alignmask = 0xf, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index c8109edc5cfb..6cca6c392b00 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else - nbytes -= AES_BLOCK_SIZE; + nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 9767315f8c0b..67024f2f0b78 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover */ - if (len + sctx->count <= SHA256_BLOCK_SIZE) { + if (len + sctx->count < SHA256_BLOCK_SIZE) { memcpy(sctx->buf + sctx->count, data, len); sctx->count += len; goto out; @@ -110,7 +110,8 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, atomic_inc(&(nx_ctx->stats->sha256_ops)); /* copy the leftover back into the state struct */ - memcpy(sctx->buf, data + len - leftover, leftover); + if (leftover) + memcpy(sctx->buf, data + len - leftover, leftover); sctx->count = leftover; csbcpb->cpb.sha256.message_bit_length += (u64) @@ -130,6 +131,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) struct nx_sg *in_sg, *out_sg; int rc; + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ @@ -162,7 +164,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) atomic_inc(&(nx_ctx->stats->sha256_ops)); - atomic64_add(csbcpb->cpb.sha256.message_bit_length, + atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8, &(nx_ctx->stats->sha256_bytes)); memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 3177b8c3d5f1..08eee1122349 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover */ - if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) { + if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) { memcpy(sctx->buf + sctx->count[0], data, len); sctx->count[0] += len; goto out; @@ -110,7 +110,8 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, atomic_inc(&(nx_ctx->stats->sha512_ops)); /* copy the leftover back into the state struct */ - memcpy(sctx->buf, data + len - leftover, leftover); + if (leftover) + memcpy(sctx->buf, data + len - leftover, leftover); sctx->count[0] = leftover; spbc_bits = csbcpb->cpb.sha512.spbc * 8; @@ -168,7 +169,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) goto out; atomic_inc(&(nx_ctx->stats->sha512_ops)); - atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo, + atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8, &(nx_ctx->stats->sha512_bytes)); memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index c767f232e693..bbdab6e5ccf0 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -211,44 +211,20 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, { struct nx_sg *nx_insg = nx_ctx->in_sg; struct nx_sg *nx_outsg = nx_ctx->out_sg; - struct blkcipher_walk walk; - int rc; - - blkcipher_walk_init(&walk, dst, src, nbytes); - rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); - if (rc) - goto out; if (iv) - memcpy(iv, walk.iv, AES_BLOCK_SIZE); + memcpy(iv, desc->info, AES_BLOCK_SIZE); - while (walk.nbytes) { - nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, - walk.nbytes, nx_ctx->ap->sglen); - nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, - walk.nbytes, nx_ctx->ap->sglen); - - rc = blkcipher_walk_done(desc, &walk, 0); - if (rc) - break; - } - - if (walk.nbytes) { - nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, - walk.nbytes, nx_ctx->ap->sglen); - nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, - walk.nbytes, nx_ctx->ap->sglen); - - rc = 0; - } + nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); + nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear * buffers */ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); -out: - return rc; + + return 0; } /** @@ -454,6 +430,8 @@ static int nx_register_algs(void) if (rc) goto out; + nx_driver.of.status = NX_OKAY; + rc = crypto_register_alg(&nx_ecb_aes_alg); if (rc) goto out; @@ -498,8 +476,6 @@ static int nx_register_algs(void) if (rc) goto out_unreg_s512; - nx_driver.of.status = NX_OKAY; - goto out; out_unreg_s512: diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index a97bb6c1596c..c3dc1c04a5df 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -863,7 +863,7 @@ static struct of_device_id sahara_dt_ids[] = { { .compatible = "fsl,imx27-sahara" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(platform, sahara_dt_ids); +MODULE_DEVICE_TABLE(of, sahara_dt_ids); static int sahara_probe(struct platform_device *pdev) { diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index ba6fc62e9651..5a18f82f732a 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -4,7 +4,8 @@ * Based on of-dma.c * * Copyright (C) 2013, Intel Corporation - * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -16,6 +17,7 @@ #include <linux/list.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/ioport.h> #include <linux/acpi.h> #include <linux/acpi_dma.h> @@ -23,6 +25,117 @@ static LIST_HEAD(acpi_dma_list); static DEFINE_MUTEX(acpi_dma_lock); /** + * acpi_dma_parse_resource_group - match device and parse resource group + * @grp: CSRT resource group + * @adev: ACPI device to match with + * @adma: struct acpi_dma of the given DMA controller + * + * Returns 1 on success, 0 when no information is available, or appropriate + * errno value on error. + * + * In order to match a device from DSDT table to the corresponding CSRT device + * we use MMIO address and IRQ. + */ +static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, + struct acpi_device *adev, struct acpi_dma *adma) +{ + const struct acpi_csrt_shared_info *si; + struct list_head resource_list; + struct resource_list_entry *rentry; + resource_size_t mem = 0, irq = 0; + u32 vendor_id; + int ret; + + if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info)) + return -ENODEV; + + INIT_LIST_HEAD(&resource_list); + ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); + if (ret <= 0) + return 0; + + list_for_each_entry(rentry, &resource_list, node) { + if (resource_type(&rentry->res) == IORESOURCE_MEM) + mem = rentry->res.start; + else if (resource_type(&rentry->res) == IORESOURCE_IRQ) + irq = rentry->res.start; + } + + acpi_dev_free_resource_list(&resource_list); + + /* Consider initial zero values as resource not found */ + if (mem == 0 && irq == 0) + return 0; + + si = (const struct acpi_csrt_shared_info *)&grp[1]; + + /* Match device by MMIO and IRQ */ + if (si->mmio_base_low != mem || si->gsi_interrupt != irq) + return 0; + + vendor_id = le32_to_cpu(grp->vendor_id); + dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", + (char *)&vendor_id, grp->device_id, grp->revision); + + /* Check if the request line range is available */ + if (si->base_request_line == 0 && si->num_handshake_signals == 0) + return 0; + + adma->base_request_line = si->base_request_line; + adma->end_request_line = si->base_request_line + + si->num_handshake_signals - 1; + + dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n", + adma->base_request_line, adma->end_request_line); + + return 1; +} + +/** + * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources + * @adev: ACPI device to match with + * @adma: struct acpi_dma of the given DMA controller + * + * CSRT or Core System Resources Table is a proprietary ACPI table + * introduced by Microsoft. This table can contain devices that are not in + * the system DSDT table. In particular DMA controllers might be described + * here. + * + * We are using this table to get the request line range of the specific DMA + * controller to be used later. + * + */ +static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) +{ + struct acpi_csrt_group *grp, *end; + struct acpi_table_csrt *csrt; + acpi_status status; + int ret; + + status = acpi_get_table(ACPI_SIG_CSRT, 0, + (struct acpi_table_header **)&csrt); + if (ACPI_FAILURE(status)) { + if (status != AE_NOT_FOUND) + dev_warn(&adev->dev, "failed to get the CSRT table\n"); + return; + } + + grp = (struct acpi_csrt_group *)(csrt + 1); + end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length); + + while (grp < end) { + ret = acpi_dma_parse_resource_group(grp, adev, adma); + if (ret < 0) { + dev_warn(&adev->dev, + "error in parsing resource group\n"); + return; + } + + grp = (struct acpi_csrt_group *)((void *)grp + grp->length); + } +} + +/** * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers * @dev: struct device of DMA controller * @acpi_dma_xlate: translation function which converts a dma specifier @@ -61,6 +174,8 @@ int acpi_dma_controller_register(struct device *dev, adma->acpi_dma_xlate = acpi_dma_xlate; adma->data = data; + acpi_dma_parse_csrt(adev, adma); + /* Now queue acpi_dma controller structure in list */ mutex_lock(&acpi_dma_lock); list_add_tail(&adma->dma_controllers, &acpi_dma_list); @@ -149,6 +264,45 @@ void devm_acpi_dma_controller_free(struct device *dev) } EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); +/** + * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function + * @adma: struct acpi_dma of DMA controller + * @dma_spec: dma specifier to update + * + * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. + * + * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource + * Descriptor": + * DMA Request Line bits is a platform-relative number uniquely + * identifying the request line assigned. Request line-to-Controller + * mapping is done in a controller-specific OS driver. + * That's why we can safely adjust slave_id when the appropriate controller is + * found. + */ +static int acpi_dma_update_dma_spec(struct acpi_dma *adma, + struct acpi_dma_spec *dma_spec) +{ + /* Set link to the DMA controller device */ + dma_spec->dev = adma->dev; + + /* Check if the request line range is available */ + if (adma->base_request_line == 0 && adma->end_request_line == 0) + return 0; + + /* Check if slave_id falls to the range */ + if (dma_spec->slave_id < adma->base_request_line || + dma_spec->slave_id > adma->end_request_line) + return -1; + + /* + * Here we adjust slave_id. It should be a relative number to the base + * request line. + */ + dma_spec->slave_id -= adma->base_request_line; + + return 1; +} + struct acpi_dma_parser_data { struct acpi_dma_spec dma_spec; size_t index; @@ -193,6 +347,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, struct acpi_device *adev; struct acpi_dma *adma; struct dma_chan *chan = NULL; + int found; /* Check if the device was enumerated by ACPI */ if (!dev || !ACPI_HANDLE(dev)) @@ -219,9 +374,20 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, mutex_lock(&acpi_dma_lock); list_for_each_entry(adma, &acpi_dma_list, dma_controllers) { - dma_spec->dev = adma->dev; + /* + * We are not going to call translation function if slave_id + * doesn't fall to the request range. + */ + found = acpi_dma_update_dma_spec(adma, dma_spec); + if (found < 0) + continue; chan = adma->acpi_dma_xlate(dma_spec, adma); - if (chan) + /* + * Try to get a channel only from the DMA controller that + * matches the slave_id. See acpi_dma_update_dma_spec() + * description for the details. + */ + if (found > 0 || chan) break; } diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index d8ce4ecfef18..e88ded2c8d2f 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -716,8 +716,7 @@ static int dmatest_func(void *data) } dma_async_issue_pending(chan); - wait_event_freezable_timeout(done_wait, - done.done || kthread_should_stop(), + wait_event_freezable_timeout(done_wait, done.done, msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); @@ -997,7 +996,6 @@ static void stop_threaded_test(struct dmatest_info *info) static int __restart_threaded_test(struct dmatest_info *info, bool run) { struct dmatest_params *params = &info->params; - int ret; /* Stop any running test first */ __stop_threaded_test(info); @@ -1012,13 +1010,23 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run) memcpy(params, &info->dbgfs_params, sizeof(*params)); /* Run test with new parameters */ - ret = __run_threaded_test(info); - if (ret) { - __stop_threaded_test(info); - pr_err("dmatest: Can't run test\n"); + return __run_threaded_test(info); +} + +static bool __is_threaded_test_run(struct dmatest_info *info) +{ + struct dmatest_chan *dtc; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (!thread->done) + return true; + } } - return ret; + return false; } static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, @@ -1091,22 +1099,10 @@ static ssize_t dtf_read_run(struct file *file, char __user *user_buf, { struct dmatest_info *info = file->private_data; char buf[3]; - struct dmatest_chan *dtc; - bool alive = false; mutex_lock(&info->lock); - list_for_each_entry(dtc, &info->channels, node) { - struct dmatest_thread *thread; - - list_for_each_entry(thread, &dtc->threads, node) { - if (!thread->done) { - alive = true; - break; - } - } - } - if (alive) { + if (__is_threaded_test_run(info)) { buf[0] = 'Y'; } else { __stop_threaded_test(info); @@ -1132,7 +1128,12 @@ static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, if (strtobool(buf, &bv) == 0) { mutex_lock(&info->lock); - ret = __restart_threaded_test(info, bv); + + if (__is_threaded_test_run(info)) + ret = -EBUSY; + else + ret = __restart_threaded_test(info, bv); + mutex_unlock(&info->lock); } diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 1734feec47b1..71bf4ec300ea 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1566,10 +1566,12 @@ static void dma_tc_handle(struct d40_chan *d40c) return; } - if (d40_queue_start(d40c) == NULL) + if (d40_queue_start(d40c) == NULL) { d40c->busy = false; - pm_runtime_mark_last_busy(d40c->base->dev); - pm_runtime_put_autosuspend(d40c->base->dev); + + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); + } d40_desc_remove(d40d); d40_desc_done(d40c, d40d); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index ce193409ebd3..33f59ecd256e 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1273,11 +1273,6 @@ static int tegra_dma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tdma); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "No mem resource for DMA\n"); - return -EINVAL; - } - tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(tdma->base_addr)) return PTR_ERR(tdma->base_addr); diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c index 8c171fa1cb9b..845f04786c2d 100644 --- a/drivers/edac/amd64_edac_inj.c +++ b/drivers/edac/amd64_edac_inj.c @@ -202,9 +202,9 @@ static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR, amd64_inject_word_show, amd64_inject_word_store); static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR, amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store); -static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(inject_write, S_IWUSR, NULL, amd64_inject_write_store); -static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(inject_read, S_IWUSR, NULL, amd64_inject_read_store); diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index b623c599e572..8bd1bb6dbe47 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -523,13 +523,11 @@ static void efivar_update_sysfs_entries(struct work_struct *work) struct efivar_entry *entry; int err; - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return; - /* Add new sysfs entries */ while (1) { - memset(entry, 0, sizeof(*entry)); + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; err = efivar_init(efivar_update_sysfs_entry, entry, true, false, &efivar_sysfs_list); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 87d567089f13..573c449c49b9 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -636,7 +636,7 @@ config GPIO_MAX7301 config GPIO_MCP23S08 tristate "Microchip MCP23xxx I/O expander" - depends on SPI_MASTER || I2C + depends on (SPI_MASTER && !I2C) || I2C help SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017 I/O expanders. diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c index 634c3d37f7b5..62ef10a641c4 100644 --- a/drivers/gpio/gpio-langwell.c +++ b/drivers/gpio/gpio-langwell.c @@ -324,6 +324,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev, resource_size_t start, len; struct lnw_gpio *lnw; u32 gpio_base; + u32 irq_base; int retval; int ngpio = id->driver_data; @@ -345,6 +346,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev, retval = -EFAULT; goto err_ioremap; } + irq_base = *(u32 *)base; gpio_base = *((u32 *)base + 1); /* release the IO mapping, since we already get the info from bar1 */ iounmap(base); @@ -365,13 +367,6 @@ static int lnw_gpio_probe(struct pci_dev *pdev, goto err_ioremap; } - lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio, - &lnw_gpio_irq_ops, lnw); - if (!lnw->domain) { - retval = -ENOMEM; - goto err_ioremap; - } - lnw->reg_base = base; lnw->chip.label = dev_name(&pdev->dev); lnw->chip.request = lnw_gpio_request; @@ -384,6 +379,14 @@ static int lnw_gpio_probe(struct pci_dev *pdev, lnw->chip.ngpio = ngpio; lnw->chip.can_sleep = 0; lnw->pdev = pdev; + + lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base, + &lnw_gpio_irq_ops, lnw); + if (!lnw->domain) { + retval = -ENOMEM; + goto err_ioremap; + } + pci_set_drvdata(pdev, lnw); retval = gpiochip_add(&lnw->chip); if (retval) { diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c index b73366523fae..0966f2637ad2 100644 --- a/drivers/gpio/gpio-ml-ioh.c +++ b/drivers/gpio/gpio-ml-ioh.c @@ -496,8 +496,7 @@ err_irq_alloc_descs: err_gpiochip_add: while (--i >= 0) { chip--; - ret = gpiochip_remove(&chip->gpio); - if (ret) + if (gpiochip_remove(&chip->gpio)) dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i); } kfree(chip_save); diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index bf69a7eff370..3a4816adc137 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -619,11 +619,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) * per-CPU registers */ if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) { res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(&pdev->dev, "Cannot get memory resource\n"); - return -ENODEV; - } - mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mvchip->percpu_membase)) diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index 25000b0f8453..f8e6af20dfbf 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -326,7 +326,8 @@ static int mxs_gpio_probe(struct platform_device *pdev) err = bgpio_init(&port->bgc, &pdev->dev, 4, port->base + PINCTRL_DIN(port), - port->base + PINCTRL_DOUT(port), NULL, + port->base + PINCTRL_DOUT(port) + MXS_SET, + port->base + PINCTRL_DOUT(port) + MXS_CLR, port->base + PINCTRL_DOE(port), NULL, 0); if (err) goto out_irqdesc_free; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 2050891d9c65..d3f7d2db870f 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -69,6 +69,7 @@ struct gpio_bank { bool is_mpuio; bool dbck_flag; bool loses_context; + bool context_valid; int stride; u32 width; int context_loss_count; @@ -1128,6 +1129,10 @@ static int omap_gpio_probe(struct platform_device *pdev) bank->loses_context = true; } else { bank->loses_context = pdata->loses_context; + + if (bank->loses_context) + bank->get_context_loss_count = + pdata->get_context_loss_count; } @@ -1178,9 +1183,6 @@ static int omap_gpio_probe(struct platform_device *pdev) omap_gpio_chip_init(bank); omap_gpio_show_rev(bank); - if (bank->loses_context) - bank->get_context_loss_count = pdata->get_context_loss_count; - pm_runtime_put(bank->dev); list_add_tail(&bank->node, &omap_gpio_list); @@ -1259,6 +1261,8 @@ update_gpio_context_count: return 0; } +static void omap_gpio_init_context(struct gpio_bank *p); + static int omap_gpio_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -1268,6 +1272,20 @@ static int omap_gpio_runtime_resume(struct device *dev) int c; spin_lock_irqsave(&bank->lock, flags); + + /* + * On the first resume during the probe, the context has not + * been initialised and so initialise it now. Also initialise + * the context loss count. + */ + if (bank->loses_context && !bank->context_valid) { + omap_gpio_init_context(bank); + + if (bank->get_context_loss_count) + bank->context_loss_count = + bank->get_context_loss_count(bank->dev); + } + _gpio_dbck_enable(bank); /* @@ -1384,6 +1402,29 @@ void omap2_gpio_resume_after_idle(void) } #if defined(CONFIG_PM_RUNTIME) +static void omap_gpio_init_context(struct gpio_bank *p) +{ + struct omap_gpio_reg_offs *regs = p->regs; + void __iomem *base = p->base; + + p->context.ctrl = __raw_readl(base + regs->ctrl); + p->context.oe = __raw_readl(base + regs->direction); + p->context.wake_en = __raw_readl(base + regs->wkup_en); + p->context.leveldetect0 = __raw_readl(base + regs->leveldetect0); + p->context.leveldetect1 = __raw_readl(base + regs->leveldetect1); + p->context.risingdetect = __raw_readl(base + regs->risingdetect); + p->context.fallingdetect = __raw_readl(base + regs->fallingdetect); + p->context.irqenable1 = __raw_readl(base + regs->irqenable); + p->context.irqenable2 = __raw_readl(base + regs->irqenable2); + + if (regs->set_dataout && p->regs->clr_dataout) + p->context.dataout = __raw_readl(base + regs->set_dataout); + else + p->context.dataout = __raw_readl(base + regs->dataout); + + p->context_valid = true; +} + static void omap_gpio_restore_context(struct gpio_bank *bank) { __raw_writel(bank->context.wake_en, @@ -1421,6 +1462,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank) #else #define omap_gpio_runtime_suspend NULL #define omap_gpio_runtime_resume NULL +static void omap_gpio_init_context(struct gpio_bank *p) {} #endif static const struct dev_pm_ops gpio_pm_ops = { diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index cdf599687cf7..0fec097e838d 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c @@ -424,8 +424,7 @@ end: err_request_irq: irq_free_descs(irq_base, gpio_pins[chip->ioh]); - ret = gpiochip_remove(&chip->gpio); - if (ret) + if (gpiochip_remove(&chip->gpio)) dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); err_gpiochip_add: diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index 1e4de16ceb41..5af65719b95d 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -272,10 +272,8 @@ static int sch_gpio_probe(struct platform_device *pdev) return 0; err_sch_gpio_resume: - err = gpiochip_remove(&sch_gpio_core); - if (err) - dev_err(&pdev->dev, "%s failed, %d\n", - "gpiochip_remove()", err); + if (gpiochip_remove(&sch_gpio_core)) + dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); err_sch_gpio_core: release_region(res->start, resource_size(res)); diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index da4cb5b0cb87..9a62672f1bed 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -463,11 +463,6 @@ static int tegra_gpio_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Missing MEM resource\n"); - return -ENODEV; - } - regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c index 095ab14cea4d..5ac2919197fe 100644 --- a/drivers/gpio/gpio-viperboard.c +++ b/drivers/gpio/gpio-viperboard.c @@ -446,7 +446,8 @@ static int vprbrd_gpio_probe(struct platform_device *pdev) return ret; err_gpiob: - ret = gpiochip_remove(&vb_gpio->gpioa); + if (gpiochip_remove(&vb_gpio->gpioa)) + dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); err_gpioa: return ret; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 3a8f7e6db295..e7e92429d10f 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) { struct drm_crtc *crtc; + /* Locking is currently fubar in the panic handler. */ + if (oops_in_progress) + return; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) WARN_ON(!mutex_is_locked(&crtc->mutex)); @@ -246,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status) else return "unknown"; } +EXPORT_SYMBOL(drm_get_connector_status_name); /** * drm_mode_object_get - allocate a new modeset identifier diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index e974f9309b72..ed1334e27c33 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, connector->helper_private; int count = 0; int mode_flags = 0; + bool verbose_prune = true; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); @@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", connector->base.id, drm_get_connector_name(connector)); drm_mode_connector_update_edid_property(connector, NULL); + verbose_prune = false; goto prune; } @@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, } prune: - drm_mode_prune_invalid(dev, &connector->modes, true); + drm_mode_prune_invalid(dev, &connector->modes, verbose_prune); if (list_empty(&connector->modes)) return 0; @@ -1005,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work) continue; connector->status = connector->funcs->detect(connector, false); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", - connector->base.id, - drm_get_connector_name(connector), - old_status, connector->status); - if (old_status != connector->status) + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] " + "status updated from %s to %s\n", + connector->base.id, + drm_get_connector_name(connector), + old, new); + changed = true; + } } mutex_unlock(&dev->mode_config.mutex); @@ -1083,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) old_status = connector->status; connector->status = connector->funcs->detect(connector, false); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", connector->base.id, drm_get_connector_name(connector), - old_status, connector->status); + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->status)); if (old_status != connector->status) changed = true; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8d4f29075af5..9cc247f55502 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ - [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} + [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} /** Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { @@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp, { struct drm_file *file_priv = filp->private_data; struct drm_device *dev; - const struct drm_ioctl_desc *ioctl; + const struct drm_ioctl_desc *ioctl = NULL; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; @@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp, atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; - DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", - task_pid_nr(current), cmd, nr, - (long)old_encode_dev(file_priv->minor->device), - file_priv->authenticated); - if ((nr >= DRM_CORE_IOCTL_COUNT) && ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) goto err_i1; @@ -417,6 +412,11 @@ long drm_ioctl(struct file *filp, } else goto err_i1; + DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->device), + file_priv->authenticated, ioctl->name); + /* Do not trust userspace, use our own definition */ func = ioctl->func; /* is there a local override? */ @@ -471,6 +471,12 @@ long drm_ioctl(struct file *filp, } err_i1: + if (!ioctl) + DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->device), + file_priv->authenticated, cmd, nr); + if (kdata != stack_kdata) kfree(kdata); atomic_dec(&dev->ioctl_count); diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index 48c52f7df4e6..0cfb60f54766 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev, struct i2c_adapter *adap, const struct i2c_board_info *info) { - char modalias[sizeof(I2C_MODULE_PREFIX) - + I2C_NAME_SIZE]; struct module *module = NULL; struct i2c_client *client; struct drm_i2c_encoder_driver *encoder_drv; int err = 0; - snprintf(modalias, sizeof(modalias), - "%s%s", I2C_MODULE_PREFIX, info->type); - request_module(modalias); + request_module("%s%s", I2C_MODULE_PREFIX, info->type); client = i2c_new_device(adap, info); if (!client) { diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index a6a8643a6a77..8bcce7866d36 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1054,7 +1054,7 @@ EXPORT_SYMBOL(drm_vblank_off); */ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) { - /* vblank is not initialized (IRQ not installed ?) */ + /* vblank is not initialized (IRQ not installed ?), or has been freed */ if (!dev->num_crtcs) return; /* @@ -1076,6 +1076,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc) { unsigned long irqflags; + /* vblank is not initialized (IRQ not installed ?), or has been freed */ + if (!dev->num_crtcs) + return; + if (dev->vblank_inmodeset[crtc]) { spin_lock_irqsave(&dev->vbl_lock, irqflags); dev->vblank_disable_allowed = 1; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index db1e2d6f90d7..07cf99cc8862 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) EXPORT_SYMBOL(drm_mm_debug_table); #if defined(CONFIG_DEBUG_FS) -int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) +static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) { - struct drm_mm_node *entry; - unsigned long total_used = 0, total_free = 0, total = 0; unsigned long hole_start, hole_end, hole_size; - hole_start = drm_mm_hole_node_start(&mm->head_node); - hole_end = drm_mm_hole_node_end(&mm->head_node); - hole_size = hole_end - hole_start; - if (hole_size) + if (entry->hole_follows) { + hole_start = drm_mm_hole_node_start(entry); + hole_end = drm_mm_hole_node_end(entry); + hole_size = hole_end - hole_start; seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", hole_start, hole_end, hole_size); - total_free += hole_size; + return hole_size; + } + + return 0; +} + +int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) +{ + struct drm_mm_node *entry; + unsigned long total_used = 0, total_free = 0, total = 0; + + total_free += drm_mm_dump_hole(m, &mm->head_node); drm_mm_for_each_node(entry, mm) { seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", entry->start, entry->start + entry->size, entry->size); total_used += entry->size; - if (entry->hole_follows) { - hole_start = drm_mm_hole_node_start(entry); - hole_end = drm_mm_hole_node_end(entry); - hole_size = hole_end - hole_start; - seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", - hole_start, hole_end, hole_size); - total_free += hole_size; - } + total_free += drm_mm_dump_hole(m, entry); } total = total_free + total_used; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index faa79df02648..a371ff865a88 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1143,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, was_digit = false; } else goto done; + break; case '0' ... '9': was_digit = true; break; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index e8894bc9e6d5..c200e4d71e3d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -48,6 +48,8 @@ struct exynos_drm_crtc { unsigned int pipe; unsigned int dpms; enum exynos_crtc_mode mode; + wait_queue_head_t pending_flip_queue; + atomic_t pending_flip; }; static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -61,6 +63,13 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) return; } + if (mode > DRM_MODE_DPMS_ON) { + /* wait for the completion of page flip. */ + wait_event(exynos_crtc->pending_flip_queue, + atomic_read(&exynos_crtc->pending_flip) == 0); + drm_vblank_off(crtc->dev, exynos_crtc->pipe); + } + exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms); exynos_crtc->dpms = mode; } @@ -217,7 +226,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, ret = drm_vblank_get(dev, exynos_crtc->pipe); if (ret) { DRM_DEBUG("failed to acquire vblank counter\n"); - list_del(&event->base.link); goto out; } @@ -225,6 +233,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, spin_lock_irq(&dev->event_lock); list_add_tail(&event->base.link, &dev_priv->pageflip_event_list); + atomic_set(&exynos_crtc->pending_flip, 1); spin_unlock_irq(&dev->event_lock); crtc->fb = fb; @@ -344,6 +353,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) exynos_crtc->pipe = nr; exynos_crtc->dpms = DRM_MODE_DPMS_OFF; + init_waitqueue_head(&exynos_crtc->pending_flip_queue); + atomic_set(&exynos_crtc->pending_flip, 0); exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true); if (!exynos_crtc->plane) { kfree(exynos_crtc); @@ -398,7 +409,8 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc) { struct exynos_drm_private *dev_priv = dev->dev_private; struct drm_pending_vblank_event *e, *t; - struct timeval now; + struct drm_crtc *drm_crtc = dev_priv->crtc[crtc]; + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc); unsigned long flags; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -411,14 +423,11 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc) if (crtc != e->pipe) continue; - do_gettimeofday(&now); - e->event.sequence = 0; - e->event.tv_sec = now.tv_sec; - e->event.tv_usec = now.tv_usec; - - list_move_tail(&e->base.link, &e->base.file_priv->event_list); - wake_up_interruptible(&e->base.file_priv->event_wait); + list_del(&e->base.link); + drm_send_vblank_event(dev, -1, e); drm_vblank_put(dev, crtc); + atomic_set(&exynos_crtc->pending_flip, 0); + wake_up(&exynos_crtc->pending_flip_queue); } spin_unlock_irqrestore(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 68f0045f86b8..8f007aaeffc3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -182,7 +182,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem_obj->base); - if (IS_ERR_OR_NULL(helper->fb)) { + if (IS_ERR(helper->fb)) { DRM_ERROR("failed to create drm framebuffer.\n"); ret = PTR_ERR(helper->fb); goto err_destroy_gem; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 773f583fa964..4a1616a18ab7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -12,9 +12,9 @@ * */ #include <linux/kernel.h> -#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/clk.h> #include <linux/pm_runtime.h> @@ -1845,7 +1845,7 @@ static int fimc_probe(struct platform_device *pdev) } ctx->irq = res->start; - ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler, + ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler, IRQF_ONESHOT, "drm_fimc", ctx); if (ret < 0) { dev_err(dev, "failed to request irq.\n"); @@ -1854,7 +1854,7 @@ static int fimc_probe(struct platform_device *pdev) ret = fimc_setup_clocks(ctx); if (ret < 0) - goto err_free_irq; + return ret; ippdrv = &ctx->ippdrv; ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops; @@ -1884,7 +1884,7 @@ static int fimc_probe(struct platform_device *pdev) goto err_pm_dis; } - dev_info(&pdev->dev, "drm fimc registered successfully.\n"); + dev_info(dev, "drm fimc registered successfully.\n"); return 0; @@ -1892,8 +1892,6 @@ err_pm_dis: pm_runtime_disable(dev); err_put_clk: fimc_put_clocks(ctx); -err_free_irq: - free_irq(ctx->irq, ctx); return ret; } @@ -1911,8 +1909,6 @@ static int fimc_remove(struct platform_device *pdev) pm_runtime_set_suspended(dev); pm_runtime_disable(dev); - free_irq(ctx->irq, ctx); - return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 746b282b343a..97c61dbffd82 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -885,7 +885,7 @@ static int fimd_probe(struct platform_device *pdev) DRM_DEBUG_KMS("%s\n", __FILE__); - if (pdev->dev.of_node) { + if (dev->of_node) { pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { DRM_ERROR("memory allocation for pdata failed\n"); @@ -899,7 +899,7 @@ static int fimd_probe(struct platform_device *pdev) return ret; } } else { - pdata = pdev->dev.platform_data; + pdata = dev->platform_data; if (!pdata) { DRM_ERROR("no platform data specified\n"); return -EINVAL; @@ -912,7 +912,7 @@ static int fimd_probe(struct platform_device *pdev) return -EINVAL; } - ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -930,7 +930,7 @@ static int fimd_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->regs = devm_ioremap_resource(&pdev->dev, res); + ctx->regs = devm_ioremap_resource(dev, res); if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); @@ -942,7 +942,7 @@ static int fimd_probe(struct platform_device *pdev) ctx->irq = res->start; - ret = devm_request_irq(&pdev->dev, ctx->irq, fimd_irq_handler, + ret = devm_request_irq(dev, ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx); if (ret) { dev_err(dev, "irq request failed.\n"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 47a493c8a71f..af75434ee4d7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1379,7 +1379,7 @@ static int g2d_probe(struct platform_device *pdev) struct exynos_drm_subdrv *subdrv; int ret; - g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL); + g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL); if (!g2d) { dev_err(dev, "failed to allocate driver data\n"); return -ENOMEM; @@ -1417,7 +1417,7 @@ static int g2d_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - g2d->regs = devm_ioremap_resource(&pdev->dev, res); + g2d->regs = devm_ioremap_resource(dev, res); if (IS_ERR(g2d->regs)) { ret = PTR_ERR(g2d->regs); goto err_put_clk; @@ -1430,7 +1430,7 @@ static int g2d_probe(struct platform_device *pdev) goto err_put_clk; } - ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0, + ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d); if (ret < 0) { dev_err(dev, "irq request failed\n"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 7841c3b8a20e..762f40d548b7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1704,7 +1704,7 @@ static int gsc_probe(struct platform_device *pdev) } ctx->irq = res->start; - ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler, + ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler, IRQF_ONESHOT, "drm_gsc", ctx); if (ret < 0) { dev_err(dev, "failed to request irq.\n"); @@ -1725,7 +1725,7 @@ static int gsc_probe(struct platform_device *pdev) ret = gsc_init_prop_list(ippdrv); if (ret < 0) { dev_err(dev, "failed to init property list.\n"); - goto err_get_irq; + return ret; } DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id, @@ -1743,15 +1743,12 @@ static int gsc_probe(struct platform_device *pdev) goto err_ippdrv_register; } - dev_info(&pdev->dev, "drm gsc registered successfully.\n"); + dev_info(dev, "drm gsc registered successfully.\n"); return 0; err_ippdrv_register: - devm_kfree(dev, ippdrv->prop_list); pm_runtime_disable(dev); -err_get_irq: - free_irq(ctx->irq, ctx); return ret; } @@ -1761,15 +1758,12 @@ static int gsc_remove(struct platform_device *pdev) struct gsc_context *ctx = get_gsc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - devm_kfree(dev, ippdrv->prop_list); exynos_drm_ippdrv_unregister(ippdrv); mutex_destroy(&ctx->lock); pm_runtime_set_suspended(dev); pm_runtime_disable(dev); - free_irq(ctx->irq, ctx); - return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index ba2f0f1aa05f..437fb947e46d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -442,7 +442,7 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev) DRM_DEBUG_KMS("%s\n", __FILE__); - ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) { DRM_LOG_KMS("failed to alloc common hdmi context.\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 29d2ad314490..be1e88463466 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -222,7 +222,7 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, /* find ipp driver using idr */ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id); - if (IS_ERR_OR_NULL(ippdrv)) { + if (IS_ERR(ippdrv)) { DRM_ERROR("not found ipp%d driver.\n", ipp_id); return ippdrv; } @@ -388,7 +388,7 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property) DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id); ippdrv = ipp_find_drv_by_handle(prop_id); - if (IS_ERR_OR_NULL(ippdrv)) { + if (IS_ERR(ippdrv)) { DRM_ERROR("failed to get ipp driver.\n"); return -EINVAL; } @@ -492,7 +492,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, /* find ipp driver using ipp id */ ippdrv = ipp_find_driver(ctx, property); - if (IS_ERR_OR_NULL(ippdrv)) { + if (IS_ERR(ippdrv)) { DRM_ERROR("failed to get ipp driver.\n"); return -EINVAL; } @@ -521,19 +521,19 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, c_node->state = IPP_STATE_IDLE; c_node->start_work = ipp_create_cmd_work(); - if (IS_ERR_OR_NULL(c_node->start_work)) { + if (IS_ERR(c_node->start_work)) { DRM_ERROR("failed to create start work.\n"); goto err_clear; } c_node->stop_work = ipp_create_cmd_work(); - if (IS_ERR_OR_NULL(c_node->stop_work)) { + if (IS_ERR(c_node->stop_work)) { DRM_ERROR("failed to create stop work.\n"); goto err_free_start; } c_node->event_work = ipp_create_event_work(); - if (IS_ERR_OR_NULL(c_node->event_work)) { + if (IS_ERR(c_node->event_work)) { DRM_ERROR("failed to create event work.\n"); goto err_free_stop; } @@ -915,7 +915,7 @@ static int ipp_queue_buf_with_run(struct device *dev, DRM_DEBUG_KMS("%s\n", __func__); ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); - if (IS_ERR_OR_NULL(ippdrv)) { + if (IS_ERR(ippdrv)) { DRM_ERROR("failed to get ipp driver.\n"); return -EFAULT; } @@ -1909,7 +1909,7 @@ static int ipp_probe(struct platform_device *pdev) struct exynos_drm_subdrv *subdrv; int ret; - ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -1963,7 +1963,7 @@ static int ipp_probe(struct platform_device *pdev) goto err_cmd_workq; } - dev_info(&pdev->dev, "drm ipp registered successfully.\n"); + dev_info(dev, "drm ipp registered successfully.\n"); return 0; diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 947f09f15ad1..9b6c70964d71 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -666,8 +666,8 @@ static int rotator_probe(struct platform_device *pdev) return rot->irq; } - ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler, - IRQF_ONESHOT, "drm_rotator", rot); + ret = devm_request_threaded_irq(dev, rot->irq, NULL, + rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot); if (ret < 0) { dev_err(dev, "failed to request irq\n"); return ret; @@ -676,8 +676,7 @@ static int rotator_probe(struct platform_device *pdev) rot->clock = devm_clk_get(dev, "rotator"); if (IS_ERR(rot->clock)) { dev_err(dev, "failed to get clock\n"); - ret = PTR_ERR(rot->clock); - goto err_clk_get; + return PTR_ERR(rot->clock); } pm_runtime_enable(dev); @@ -709,10 +708,7 @@ static int rotator_probe(struct platform_device *pdev) return 0; err_ippdrv_register: - devm_kfree(dev, ippdrv->prop_list); pm_runtime_disable(dev); -err_clk_get: - free_irq(rot->irq, rot); return ret; } @@ -722,13 +718,10 @@ static int rotator_remove(struct platform_device *pdev) struct rot_context *rot = dev_get_drvdata(dev); struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; - devm_kfree(dev, ippdrv->prop_list); exynos_drm_ippdrv_unregister(ippdrv); pm_runtime_disable(dev); - free_irq(rot->irq, rot); - return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 9504b0cd825a..24376c194a5e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -594,7 +594,7 @@ static int vidi_probe(struct platform_device *pdev) DRM_DEBUG_KMS("%s\n", __FILE__); - ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -612,7 +612,7 @@ static int vidi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ctx); - ret = device_create_file(&pdev->dev, &dev_attr_connection); + ret = device_create_file(dev, &dev_attr_connection); if (ret < 0) DRM_INFO("failed to create connection sysfs.\n"); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index bbfc3840080c..fd1426dca882 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1946,14 +1946,14 @@ static int hdmi_probe(struct platform_device *pdev) DRM_DEBUG_KMS("[%d]\n", __LINE__); - if (pdev->dev.of_node) { + if (dev->of_node) { pdata = drm_hdmi_dt_parse_pdata(dev); if (IS_ERR(pdata)) { DRM_ERROR("failed to parse dt\n"); return PTR_ERR(pdata); } } else { - pdata = pdev->dev.platform_data; + pdata = dev->platform_data; } if (!pdata) { @@ -1961,14 +1961,14 @@ static int hdmi_probe(struct platform_device *pdev) return -EINVAL; } - drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx), + drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL); if (!drm_hdmi_ctx) { DRM_ERROR("failed to allocate common hdmi context.\n"); return -ENOMEM; } - hdata = devm_kzalloc(&pdev->dev, sizeof(struct hdmi_context), + hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); if (!hdata) { DRM_ERROR("out of memory\n"); @@ -1985,7 +1985,7 @@ static int hdmi_probe(struct platform_device *pdev) if (dev->of_node) { const struct of_device_id *match; match = of_match_node(of_match_ptr(hdmi_match_types), - pdev->dev.of_node); + dev->of_node); if (match == NULL) return -ENODEV; hdata->type = (enum hdmi_type)match->data; @@ -2005,16 +2005,11 @@ static int hdmi_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - DRM_ERROR("failed to find registers\n"); - return -ENOENT; - } - - hdata->regs = devm_ioremap_resource(&pdev->dev, res); + hdata->regs = devm_ioremap_resource(dev, res); if (IS_ERR(hdata->regs)) return PTR_ERR(hdata->regs); - ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD"); + ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD"); if (ret) { DRM_ERROR("failed to request HPD gpio\n"); return ret; @@ -2046,7 +2041,7 @@ static int hdmi_probe(struct platform_device *pdev) hdata->hpd = gpio_get_value(hdata->hpd_gpio); - ret = request_threaded_irq(hdata->irq, NULL, + ret = devm_request_threaded_irq(dev, hdata->irq, NULL, hdmi_irq_thread, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "hdmi", drm_hdmi_ctx); @@ -2075,16 +2070,11 @@ err_ddc: static int hdmi_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev); - struct hdmi_context *hdata = ctx->ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); pm_runtime_disable(dev); - free_irq(hdata->irq, hdata); - - /* hdmiphy i2c driver */ i2c_del_driver(&hdmiphy_driver); /* DDC i2c driver */ diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index ec3e376b7e01..7c197d3820c5 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1061,7 +1061,7 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx, return -ENXIO; } - mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, + mixer_res->mixer_regs = devm_ioremap(dev, res->start, resource_size(res)); if (mixer_res->mixer_regs == NULL) { dev_err(dev, "register mapping failed.\n"); @@ -1074,7 +1074,7 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx, return -ENXIO; } - ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, + ret = devm_request_irq(dev, res->start, mixer_irq_handler, 0, "drm_mixer", ctx); if (ret) { dev_err(dev, "request interrupt failed.\n"); @@ -1118,7 +1118,7 @@ static int vp_resources_init(struct exynos_drm_hdmi_context *ctx, return -ENXIO; } - mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, + mixer_res->vp_regs = devm_ioremap(dev, res->start, resource_size(res)); if (mixer_res->vp_regs == NULL) { dev_err(dev, "register mapping failed.\n"); @@ -1169,14 +1169,14 @@ static int mixer_probe(struct platform_device *pdev) dev_info(dev, "probe start\n"); - drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx), + drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL); if (!drm_hdmi_ctx) { DRM_ERROR("failed to allocate common hdmi context.\n"); return -ENOMEM; } - ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) { DRM_ERROR("failed to alloc mixer context.\n"); return -ENOMEM; @@ -1187,14 +1187,14 @@ static int mixer_probe(struct platform_device *pdev) if (dev->of_node) { const struct of_device_id *match; match = of_match_node(of_match_ptr(mixer_match_types), - pdev->dev.of_node); + dev->of_node); drv = (struct mixer_drv_data *)match->data; } else { drv = (struct mixer_drv_data *) platform_get_device_id(pdev)->driver_data; } - ctx->dev = &pdev->dev; + ctx->dev = dev; ctx->parent_ctx = (void *)drm_hdmi_ctx; drm_hdmi_ctx->ctx = (void *)ctx; ctx->vp_enabled = drv->is_vp_enabled; diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 3cfd0931fbfb..82430ad8ba62 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -1462,7 +1462,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc, size_t addr = 0; struct gtt_range *gt; struct drm_gem_object *obj; - int ret; + int ret = 0; /* if we want to turn of the cursor ignore width and height */ if (!handle) { @@ -1499,7 +1499,8 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc, if (obj->size < width * height * 4) { dev_dbg(dev->dev, "buffer is to small\n"); - return -ENOMEM; + ret = -ENOMEM; + goto unref_cursor; } gt = container_of(obj, struct gtt_range, gem); @@ -1508,7 +1509,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc, ret = psb_gtt_pin(gt); if (ret) { dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); - return ret; + goto unref_cursor; } addr = gt->offset; /* Or resource.start ??? */ @@ -1532,9 +1533,14 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc, struct gtt_range, gem); psb_gtt_unpin(gt); drm_gem_object_unreference(psb_intel_crtc->cursor_obj); - psb_intel_crtc->cursor_obj = obj; } - return 0; + + psb_intel_crtc->cursor_obj = obj; + return ret; + +unref_cursor: + drm_gem_object_unreference(obj); + return ret; } static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) @@ -1750,6 +1756,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc) kfree(psb_intel_crtc); } +static void cdv_intel_crtc_disable(struct drm_crtc *crtc) +{ + struct gtt_range *gt; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + if (crtc->fb) { + gt = to_psb_fb(crtc->fb)->gtt; + psb_gtt_unpin(gt); + } +} + const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { .dpms = cdv_intel_crtc_dpms, .mode_fixup = cdv_intel_crtc_mode_fixup, @@ -1757,6 +1776,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { .mode_set_base = cdv_intel_pipe_set_base, .prepare = cdv_intel_crtc_prepare, .commit = cdv_intel_crtc_commit, + .disable = cdv_intel_crtc_disable, }; const struct drm_crtc_funcs cdv_intel_crtc_funcs = { diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 1534e220097a..8b1b6d923abe 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -121,8 +121,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) unsigned long address; int ret; unsigned long pfn; - /* FIXME: assumes fb at stolen base which may not be true */ - unsigned long phys_addr = (unsigned long)dev_priv->stolen_base; + unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + + psbfb->gtt->offset; page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT); diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 6e8f42b61ff6..6666493789d1 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -843,7 +843,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt; struct drm_gem_object *obj; void *tmp_dst, *tmp_src; - int ret, i, cursor_pages; + int ret = 0, i, cursor_pages; /* if we want to turn of the cursor ignore width and height */ if (!handle) { @@ -880,7 +880,8 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, if (obj->size < width * height * 4) { dev_dbg(dev->dev, "buffer is to small\n"); - return -ENOMEM; + ret = -ENOMEM; + goto unref_cursor; } gt = container_of(obj, struct gtt_range, gem); @@ -889,13 +890,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, ret = psb_gtt_pin(gt); if (ret) { dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); - return ret; + goto unref_cursor; } if (dev_priv->ops->cursor_needs_phys) { if (cursor_gt == NULL) { dev_err(dev->dev, "No hardware cursor mem available"); - return -ENOMEM; + ret = -ENOMEM; + goto unref_cursor; } /* Prevent overflow */ @@ -936,9 +938,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, struct gtt_range, gem); psb_gtt_unpin(gt); drm_gem_object_unreference(psb_intel_crtc->cursor_obj); - psb_intel_crtc->cursor_obj = obj; } - return 0; + + psb_intel_crtc->cursor_obj = obj; + return ret; + +unref_cursor: + drm_gem_object_unreference(obj); + return ret; } static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) @@ -1150,6 +1157,19 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc) kfree(psb_intel_crtc); } +static void psb_intel_crtc_disable(struct drm_crtc *crtc) +{ + struct gtt_range *gt; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + if (crtc->fb) { + gt = to_psb_fb(crtc->fb)->gtt; + psb_gtt_unpin(gt); + } +} + const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { .dpms = psb_intel_crtc_dpms, .mode_fixup = psb_intel_crtc_mode_fixup, @@ -1157,6 +1177,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { .mode_set_base = psb_intel_pipe_set_base, .prepare = psb_intel_crtc_prepare, .commit = psb_intel_crtc_commit, + .disable = psb_intel_crtc_disable, }; const struct drm_crtc_funcs psb_intel_crtc_funcs = { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9ebe895c17d6..a2e4953b8e8d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -364,40 +364,64 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ - INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */ + INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ - INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */ + INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ + INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */ + INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */ + INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */ + INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */ + INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */ + INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */ INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ - INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */ + INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */ INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ - INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */ + INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */ INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ - INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */ + INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */ + INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */ + INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */ + INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */ + INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */ + INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */ + INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */ INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ - INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */ + INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */ INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ - INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */ + INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */ INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ - INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ + INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */ + INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */ + INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */ + INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */ + INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */ + INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */ + INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */ INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ - INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */ INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ - INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */ INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ - INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ + INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */ + INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */ + INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */ + INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */ + INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */ + INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */ + INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d5dcf7fe1ee9..b9d00dcf9a2d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1943,4 +1943,19 @@ static inline void __user *to_user_ptr(u64 address) return (void __user *)(uintptr_t)address; } +static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) +{ + unsigned long j = msecs_to_jiffies(m); + + return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); +} + +static inline unsigned long +timespec_to_jiffies_timeout(const struct timespec *value) +{ + unsigned long j = timespec_to_jiffies(value); + + return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); +} + #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6be940effefd..970ad17c99ab 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -91,14 +91,11 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) { int ret; -#define EXIT_COND (!i915_reset_in_progress(error)) +#define EXIT_COND (!i915_reset_in_progress(error) || \ + i915_terminally_wedged(error)) if (EXIT_COND) return 0; - /* GPU is already declared terminally dead, give up. */ - if (i915_terminally_wedged(error)) - return -EIO; - /* * Only wait 10 seconds for the gpu reset to complete to avoid hanging * userspace. If it takes that long something really bad is going on and @@ -1003,7 +1000,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, wait_forever = false; } - timeout_jiffies = timespec_to_jiffies(&wait_time); + timeout_jiffies = timespec_to_jiffies_timeout(&wait_time); if (WARN_ON(!ring->irq_get(ring))) return -ENODEV; @@ -1045,6 +1042,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, if (timeout) { struct timespec sleep_time = timespec_sub(now, before); *timeout = timespec_sub(*timeout, sleep_time); + if (!timespec_valid(timeout)) /* i.e. negative time remains */ + set_normalized_timespec(timeout, 0, 0); } switch (end) { @@ -1053,8 +1052,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, case -ERESTARTSYS: /* Signal */ return (int)end; case 0: /* Timeout */ - if (timeout) - set_normalized_timespec(timeout, 0, 0); return -ETIME; default: /* Completed */ WARN_ON(end < 0); /* We're not aware of other errors */ @@ -2377,10 +2374,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) mutex_unlock(&dev->struct_mutex); ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); - if (timeout) { - WARN_ON(!timespec_valid(timeout)); + if (timeout) args->timeout_ns = timespec_to_ns(timeout); - } return ret; out: diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index dca614de71b6..bdb0d7717bc7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) return snb_gmch_ctl << 25; /* 32 MB units */ } -static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl) -{ - static const int stolen_decoder[] = { - 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; - snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT; - snb_gmch_ctl &= IVB_GMCH_GMS_MASK; - return stolen_decoder[snb_gmch_ctl] << 20; -} - static int gen6_gmch_probe(struct drm_device *dev, size_t *gtt_total, size_t *stolen, @@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev, pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); - if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) - *stolen = gen7_get_stolen_size(snb_gmch_ctl); - else - *stolen = gen6_get_stolen_size(snb_gmch_ctl); - + *stolen = gen6_get_stolen_size(snb_gmch_ctl); *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; /* For Modern GENs the PTEs and register space are split in the BAR */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 83f9c26e1adb..2d6b62e42daf 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -46,8 +46,6 @@ #define SNB_GMCH_GGMS_MASK 0x3 #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ #define SNB_GMCH_GMS_MASK 0x1f -#define IVB_GMCH_GMS_SHIFT 4 -#define IVB_GMCH_GMS_MASK 0xf /* PCI config space */ diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 26a0a570f92e..fb961bb81903 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); + if (port != PORT_A) + intel_dp_stop_link_train(intel_dp); } } @@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) } else if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + if (port == PORT_A) + intel_dp_stop_link_train(intel_dp); + ironlake_edp_backlight_on(intel_dp); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index efe829919755..56746dcac40f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7937,6 +7937,11 @@ intel_modeset_check_state(struct drm_device *dev) memset(&pipe_config, 0, sizeof(pipe_config)); active = dev_priv->display.get_pipe_config(crtc, &pipe_config); + + /* hw state is inconsistent with the pipe A quirk */ + if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) + active = crtc->active; + WARN(crtc->active != active, "crtc active state doesn't match with hw state " "(expected %i, found %i)\n", crtc->active, active); @@ -8140,6 +8145,21 @@ static void intel_set_config_restore_state(struct drm_device *dev, } } +static bool +is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors, + int num_connectors) +{ + int i; + + for (i = 0; i < num_connectors; i++) + if (connectors[i].encoder && + connectors[i].encoder->crtc == crtc && + connectors[i].dpms != DRM_MODE_DPMS_ON) + return true; + + return false; +} + static void intel_set_config_compute_mode_changes(struct drm_mode_set *set, struct intel_set_config *config) @@ -8147,7 +8167,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, /* We should be able to check here if the fb has the same properties * and then just flip_or_move it */ - if (set->crtc->fb != set->fb) { + if (set->connectors != NULL && + is_crtc_connector_off(set->crtc, *set->connectors, + set->num_connectors)) { + config->mode_changed = true; + } else if (set->crtc->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ if (set->crtc->fb == NULL) { DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); @@ -8157,8 +8181,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, } else if (set->fb->pixel_format != set->crtc->fb->pixel_format) { config->mode_changed = true; - } else + } else { config->fb_changed = true; + } } if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) @@ -8332,11 +8357,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set) ret = intel_set_mode(set->crtc, set->mode, set->x, set->y, set->fb); - if (ret) { - DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", - set->crtc->base.id, ret); - goto fail; - } } else if (config->fb_changed) { intel_crtc_wait_for_pending_flips(set->crtc); @@ -8344,18 +8364,18 @@ static int intel_crtc_set_config(struct drm_mode_set *set) set->x, set->y, set->fb); } - intel_set_config_free(config); - - return 0; - + if (ret) { + DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", + set->crtc->base.id, ret); fail: - intel_set_config_restore_state(dev, config); + intel_set_config_restore_state(dev, config); - /* Try to restore the config */ - if (config->mode_changed && - intel_set_mode(save_set.crtc, save_set.mode, - save_set.x, save_set.y, save_set.fb)) - DRM_ERROR("failed to restore config after modeset failure\n"); + /* Try to restore the config */ + if (config->mode_changed && + intel_set_mode(save_set.crtc, save_set.mode, + save_set.x, save_set.y, save_set.fb)) + DRM_ERROR("failed to restore config after modeset failure\n"); + } out_config: intel_set_config_free(config); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fb2fbc1e08b9..70789b1b5642 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -303,7 +303,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) if (has_aux_irq) done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, - msecs_to_jiffies(10)); + msecs_to_jiffies_timeout(10)); else done = wait_for_atomic(C, 10) == 0; if (!done) @@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ bpp = min_t(int, 8*3, pipe_config->pipe_bpp); + if (is_edp(intel_dp) && dev_priv->edp.bpp) + bpp = min_t(int, bpp, dev_priv->edp.bpp); + for (; bpp >= 6*3; bpp -= 2*3) { mode_rate = intel_dp_link_required(target_clock, bpp); @@ -739,6 +742,7 @@ found: intel_dp->link_bw = bws[clock]; intel_dp->lane_count = lane_count; adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + pipe_config->pipe_bpp = bpp; pipe_config->pixel_target_clock = target_clock; DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", @@ -751,20 +755,6 @@ found: target_clock, adjusted_mode->clock, &pipe_config->dp_m_n); - /* - * XXX: We have a strange regression where using the vbt edp bpp value - * for the link bw computation results in black screens, the panel only - * works when we do the computation at the usual 24bpp (but still - * requires us to use 18bpp). Until that's fully debugged, stay - * bug-for-bug compatible with the old code. - */ - if (is_edp(intel_dp) && dev_priv->edp.bpp) { - DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", - bpp, dev_priv->edp.bpp); - bpp = min_t(int, bpp, dev_priv->edp.bpp); - } - pipe_config->pipe_bpp = bpp; - return true; } @@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder) ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); } @@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dev->dev_private; enum port port = intel_dig_port->port; int ret; - uint32_t temp; if (HAS_DDI(dev)) { - temp = I915_READ(DP_TP_CTL(port)); + uint32_t temp = I915_READ(DP_TP_CTL(port)); if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) temp |= DP_TP_CTL_SCRAMBLE_DISABLE; @@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { case DP_TRAINING_PATTERN_DISABLE: - - if (port != PORT_A) { - temp |= DP_TP_CTL_LINK_TRAIN_IDLE; - I915_WRITE(DP_TP_CTL(port), temp); - - if (wait_for((I915_READ(DP_TP_STATUS(port)) & - DP_TP_STATUS_IDLE_DONE), 1)) - DRM_ERROR("Timed out waiting for DP idle patterns\n"); - - temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; - } - temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; break; @@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, return true; } +static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + uint32_t val; + + if (!HAS_DDI(dev)) + return; + + val = I915_READ(DP_TP_CTL(port)); + val &= ~DP_TP_CTL_LINK_TRAIN_MASK; + val |= DP_TP_CTL_LINK_TRAIN_IDLE; + I915_WRITE(DP_TP_CTL(port), val); + + /* + * On PORT_A we can have only eDP in SST mode. There the only reason + * we need to set idle transmission mode is to work around a HW issue + * where we enable the pipe while not in idle link-training mode. + * In this case there is requirement to wait for a minimum number of + * idle patterns to be sent. + */ + if (port == PORT_A) + return; + + if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), + 1)) + DRM_ERROR("Timed out waiting for DP idle patterns\n"); +} + /* Enable corresponding port and start training pattern 1 */ void intel_dp_start_link_train(struct intel_dp *intel_dp) @@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ++tries; } + intel_dp_set_idle_link_train(intel_dp); + + intel_dp->DP = DP; + if (channel_eq) DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); - intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); +} + +void intel_dp_stop_link_train(struct intel_dp *intel_dp) +{ + intel_dp_set_link_train(intel_dp, intel_dp->DP, + DP_TRAINING_PATTERN_DISABLE); } static void @@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) drm_get_encoder_name(&intel_encoder->base)); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); } } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b5b6d19e6dd3..624a9e6b8d71 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, extern void intel_dp_init_link_config(struct intel_dp *intel_dp); extern void intel_dp_start_link_train(struct intel_dp *intel_dp); extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); +extern void intel_dp_stop_link_train(struct intel_dp *intel_dp); extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); extern void intel_dp_check_link_status(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 0e19e575a1b4..6b7c3ca2c035 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev) void intel_fbdev_set_suspend(struct drm_device *dev, int state) { drm_i915_private_t *dev_priv = dev->dev_private; - if (!dev_priv->fbdev) + struct intel_fbdev *ifbdev = dev_priv->fbdev; + struct fb_info *info; + + if (!ifbdev) return; - fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); + info = ifbdev->helper.fbdev; + + /* On resume from hibernation: If the object is shmemfs backed, it has + * been restored from swap. If the object is stolen however, it will be + * full of whatever garbage was left in there. + */ + if (!state && ifbdev->ifb.obj->stolen) + memset_io(info->screen_base, 0, info->screen_size); + + fb_set_suspend(info, state); } MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 5d245031e391..639fe192997c 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -228,7 +228,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv, * need to wake up periodically and check that ourselves. */ I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en); - for (i = 0; i < msecs_to_jiffies(50) + 1; i++) { + for (i = 0; i < msecs_to_jiffies_timeout(50); i++) { prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait, TASK_UNINTERRUPTIBLE); @@ -263,7 +263,8 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) /* Important: The hw handles only the first bit, so set only one! */ I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN); - ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10); + ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, + msecs_to_jiffies_timeout(10)); I915_WRITE(GMBUS4 + reg_offset, 0); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f36f1baabd5a..29412cc89c7a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -815,10 +815,10 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, - .ident = "Hewlett-Packard HP t5740e Thin Client", + .ident = "Hewlett-Packard HP t5740", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"), + DMI_MATCH(DMI_PRODUCT_NAME, " t5740"), }, }, { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index de3b0dc5658b..aa01128ff192 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev) vlv_update_drain_latency(dev); - if (g4x_compute_wm0(dev, 0, + if (g4x_compute_wm0(dev, PIPE_A, &valleyview_wm_info, latency_ns, &valleyview_cursor_wm_info, latency_ns, &planea_wm, &cursora_wm)) - enabled |= 1; + enabled |= 1 << PIPE_A; - if (g4x_compute_wm0(dev, 1, + if (g4x_compute_wm0(dev, PIPE_B, &valleyview_wm_info, latency_ns, &valleyview_cursor_wm_info, latency_ns, &planeb_wm, &cursorb_wm)) - enabled |= 2; + enabled |= 1 << PIPE_B; if (single_plane_enabled(enabled) && g4x_compute_srwm(dev, ffs(enabled) - 1, @@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev) int plane_sr, cursor_sr; unsigned int enabled = 0; - if (g4x_compute_wm0(dev, 0, + if (g4x_compute_wm0(dev, PIPE_A, &g4x_wm_info, latency_ns, &g4x_cursor_wm_info, latency_ns, &planea_wm, &cursora_wm)) - enabled |= 1; + enabled |= 1 << PIPE_A; - if (g4x_compute_wm0(dev, 1, + if (g4x_compute_wm0(dev, PIPE_B, &g4x_wm_info, latency_ns, &g4x_cursor_wm_info, latency_ns, &planeb_wm, &cursorb_wm)) - enabled |= 2; + enabled |= 1 << PIPE_B; if (single_plane_enabled(enabled) && g4x_compute_srwm(dev, ffs(enabled) - 1, @@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev) unsigned int enabled; enabled = 0; - if (g4x_compute_wm0(dev, 0, + if (g4x_compute_wm0(dev, PIPE_A, &ironlake_display_wm_info, ILK_LP0_PLANE_LATENCY, &ironlake_cursor_wm_info, @@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1; + enabled |= 1 << PIPE_A; } - if (g4x_compute_wm0(dev, 1, + if (g4x_compute_wm0(dev, PIPE_B, &ironlake_display_wm_info, ILK_LP0_PLANE_LATENCY, &ironlake_cursor_wm_info, @@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 2; + enabled |= 1 << PIPE_B; } /* @@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev) unsigned int enabled; enabled = 0; - if (g4x_compute_wm0(dev, 0, + if (g4x_compute_wm0(dev, PIPE_A, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1; + enabled |= 1 << PIPE_A; } - if (g4x_compute_wm0(dev, 1, + if (g4x_compute_wm0(dev, PIPE_B, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 2; + enabled |= 1 << PIPE_B; } /* @@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev) unsigned int enabled; enabled = 0; - if (g4x_compute_wm0(dev, 0, + if (g4x_compute_wm0(dev, PIPE_A, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1; + enabled |= 1 << PIPE_A; } - if (g4x_compute_wm0(dev, 1, + if (g4x_compute_wm0(dev, PIPE_B, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 2; + enabled |= 1 << PIPE_B; } - if (g4x_compute_wm0(dev, 2, + if (g4x_compute_wm0(dev, PIPE_C, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe C -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 3; + enabled |= 1 << PIPE_C; } /* diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d15428404b9a..d4ea6c265ce1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1776,11 +1776,14 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) * Assume that the preferred modes are * arranged in priority order. */ - intel_ddc_get_modes(connector, intel_sdvo->i2c); - if (list_empty(&connector->probed_modes) == false) - goto end; + intel_ddc_get_modes(connector, &intel_sdvo->ddc); - /* Fetch modes from VBT */ + /* + * Fetch modes from VBT. For SDVO prefer the VBT mode since some + * SDVO->LVDS transcoders can't cope with the EDID mode. Since + * drm_mode_probed_add adds the mode at the head of the list we add it + * last. + */ if (dev_priv->sdvo_lvds_vbt_mode != NULL) { newmode = drm_mode_duplicate(connector->dev, dev_priv->sdvo_lvds_vbt_mode); @@ -1792,7 +1795,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) } } -end: list_for_each_entry(newmode, &connector->probed_modes, head) { if (newmode->type & DRM_MODE_TYPE_PREFERRED) { intel_sdvo->sdvo_lvds_fixed_mode = @@ -2790,12 +2792,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; } - /* Only enable the hotplug irq if we need it, to work around noisy - * hotplug lines. - */ - if (intel_sdvo->hotplug_active) - intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C; - intel_encoder->compute_config = intel_sdvo_compute_config; intel_encoder->disable = intel_disable_sdvo; intel_encoder->mode_set = intel_sdvo_mode_set; @@ -2814,6 +2810,14 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) goto err_output; } + /* Only enable the hotplug irq if we need it, to work around noisy + * hotplug lines. + */ + if (intel_sdvo->hotplug_active) { + intel_encoder->hpd_pin = + intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C; + } + /* * Cloning SDVO with anything is often impossible, since the SDVO * encoder can request a special input timing mode. And even if that's diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index f9889658329b..ee66badc8bb6 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) static inline void mga_wait_vsync(struct mga_device *mdev) { - unsigned int count = 0; + unsigned long timeout = jiffies + HZ/10; unsigned int status = 0; do { status = RREG32(MGAREG_Status); - count++; - } while ((status & 0x08) && (count < 250000)); - count = 0; + } while ((status & 0x08) && time_before(jiffies, timeout)); + timeout = jiffies + HZ/10; status = 0; do { status = RREG32(MGAREG_Status); - count++; - } while (!(status & 0x08) && (count < 250000)); + } while (!(status & 0x08) && time_before(jiffies, timeout)); } static inline void mga_wait_busy(struct mga_device *mdev) { - unsigned int count = 0; + unsigned long timeout = jiffies + HZ; unsigned int status = 0; do { status = RREG8(MGAREG_Status + 2); - count++; - } while ((status & 0x01) && (count < 500000)); + } while ((status & 0x01) && time_before(jiffies, timeout)); } /* @@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); + WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_REMHEADCTL_CLKDIS; - WREG_DAC(MGA1064_REMHEADCTL, tmp); + WREG8(DAC_DATA, tmp); /* select PLL Set C */ tmp = RREG8(MGAREG_MEM_MISC_READ); @@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); udelay(500); @@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_VREF_CTL); tmp = RREG8(DAC_DATA); tmp &= ~0x04; - WREG_DAC(MGA1064_VREF_CTL, tmp); + WREG8(DAC_DATA, tmp); udelay(50); @@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; - WREG_DAC(MGA1064_REMHEADCTL, tmp); + WREG8(DAC_DATA, tmp); /* reset dotclock rate bit */ WREG8(MGAREG_SEQ_INDEX, 1); @@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); vcount = RREG8(MGAREG_VCOUNT); @@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); + WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; @@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); tmp = RREG8(DAC_DATA); - WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40); + WREG8(DAC_DATA, tmp & ~0x40); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); @@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); udelay(500); @@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); tmp = RREG8(DAC_DATA); - WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40); + WREG8(DAC_DATA, tmp | 0x40); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= (0x3 << 2); @@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); return 0; } @@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); + WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; @@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); udelay(500); @@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); vcount = RREG8(MGAREG_VCOUNT); @@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); + WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_REMHEADCTL_CLKDIS; - WREG_DAC(MGA1064_REMHEADCTL, tmp); + WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= (0x3<<2) | 0xc0; @@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); udelay(500); @@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc) WREG_DAC(MGA1064_GEN_IO_DATA, tmp); } - +/* + This is how the framebuffer base address is stored in g200 cards: + * Assume @offset is the gpu_addr variable of the framebuffer object + * Then addr is the number of _pixels_ (not bytes) from the start of + VRAM to the first pixel we want to display. (divided by 2 for 32bit + framebuffers) + * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers + addr<20> -> CRTCEXT0<6> + addr<19-16> -> CRTCEXT0<3-0> + addr<15-8> -> CRTCC<7-0> + addr<7-0> -> CRTCD<7-0> + CRTCEXT0 has to be programmed last to trigger an update and make the + new addr variable take effect. + */ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) { struct mga_device *mdev = crtc->dev->dev_private; u32 addr; int count; + u8 crtcext0; while (RREG8(0x1fda) & 0x08); while (!(RREG8(0x1fda) & 0x08)); @@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) count = RREG8(MGAREG_VCOUNT) + 2; while (RREG8(MGAREG_VCOUNT) < count); - addr = offset >> 2; + WREG8(MGAREG_CRTCEXT_INDEX, 0); + crtcext0 = RREG8(MGAREG_CRTCEXT_DATA); + crtcext0 &= 0xB0; + addr = offset / 8; + /* Can't store addresses any higher than that... + but we also don't have more than 16MB of memory, so it should be fine. */ + WARN_ON(addr > 0x1fffff); + crtcext0 |= (!!(addr & (1<<20)))<<6; WREG_CRT(0x0d, (u8)(addr & 0xff)); WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff); - WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf); + WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0); } @@ -829,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, for (i = 0; i < sizeof(dacvalue); i++) { - if ((i <= 0x03) || - (i == 0x07) || - (i == 0x0b) || - (i == 0x0f) || - ((i >= 0x13) && (i <= 0x17)) || + if ((i <= 0x17) || (i == 0x1b) || (i == 0x1c) || ((i >= 0x1f) && (i <= 0x29)) || @@ -1020,13 +1034,14 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, else hi_pri_lvl = 5; - WREG8(0x1fde, 0x06); - WREG8(0x1fdf, hi_pri_lvl); + WREG8(MGAREG_CRTCEXT_INDEX, 0x06); + WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl); } else { + WREG8(MGAREG_CRTCEXT_INDEX, 0x06); if (mdev->reg_1e24 >= 0x01) - WREG8(0x1fdf, 0x03); + WREG8(MGAREG_CRTCEXT_DATA, 0x03); else - WREG8(0x1fdf, 0x04); + WREG8(MGAREG_CRTCEXT_DATA, 0x04); } } return 0; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index 955af122c3a6..a36e64e98ef3 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c @@ -138,7 +138,6 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; - device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; break; case 0xce: @@ -225,7 +224,6 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; - device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; break; case 0xc8: diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c index d0817d94454c..f02fd9f443ff 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c @@ -50,11 +50,16 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) { const u32 doff = (or * 0x800); int load = -EINVAL; + nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000); + nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval); - udelay(9500); + mdelay(9); + udelay(500); nv_wr32(priv, 0x61a00c + doff, 0x80000000); load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27; nv_wr32(priv, 0x61a00c + doff, 0x00000000); + nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000); + nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); return load; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c index 0d36bdc51417..7fdade6e604d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c @@ -55,6 +55,10 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) nv_wr32(priv, 0x616510 + hoff, 0x00000000); nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001); + nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */ + nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ + nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ + /* ??? */ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c index ddaeb5572903..e9b8217d0075 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c @@ -40,8 +40,8 @@ * FIFO channel objects ******************************************************************************/ -void -nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) +static void +nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv) { struct nouveau_bar *bar = nouveau_bar(priv); struct nouveau_gpuobj *cur; @@ -62,6 +62,14 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) nv_wr32(priv, 0x002500, 0x00000101); } +void +nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) +{ + mutex_lock(&nv_subdev(priv)->mutex); + nv50_fifo_playlist_update_locked(priv); + mutex_unlock(&nv_subdev(priv)->mutex); +} + static int nv50_fifo_context_attach(struct nouveau_object *parent, struct nouveau_object *object) @@ -487,7 +495,7 @@ nv50_fifo_init(struct nouveau_object *object) for (i = 0; i < 128; i++) nv_wr32(priv, 0x002600 + (i * 4), 0x00000000); - nv50_fifo_playlist_update(priv); + nv50_fifo_playlist_update_locked(priv); nv_wr32(priv, 0x003200, 0x00000001); nv_wr32(priv, 0x003250, 0x00000001); diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index 4d4a6b905370..46dfa68c47bb 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c @@ -71,6 +71,7 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv) struct nouveau_gpuobj *cur; int i, p; + mutex_lock(&nv_subdev(priv)->mutex); cur = priv->playlist[priv->cur_playlist]; priv->cur_playlist = !priv->cur_playlist; @@ -87,6 +88,7 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv) nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3)); if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000)) nv_error(priv, "playlist update failed\n"); + mutex_unlock(&nv_subdev(priv)->mutex); } static int @@ -248,9 +250,17 @@ nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend) struct nvc0_fifo_priv *priv = (void *)object->engine; struct nvc0_fifo_chan *chan = (void *)object; u32 chid = chan->base.chid; + u32 mask, engine; nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000); nvc0_fifo_playlist_update(priv); + mask = nv_rd32(priv, 0x0025a4); + for (engine = 0; mask && engine < 16; engine++) { + if (!(mask & (1 << engine))) + continue; + nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000); + mask &= ~(1 << engine); + } nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000); return nouveau_fifo_channel_fini(&chan->base, suspend); diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 9151919fb831..56192a7242ae 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -94,11 +94,13 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine) u32 match = (engine << 16) | 0x00000001; int i, p; + mutex_lock(&nv_subdev(priv)->mutex); cur = engn->playlist[engn->cur_playlist]; if (unlikely(cur == NULL)) { int ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, 0, &cur); if (ret) { + mutex_unlock(&nv_subdev(priv)->mutex); nv_error(priv, "playlist alloc failed\n"); return; } @@ -122,6 +124,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine) nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) nv_error(priv, "playlist %d update timeout\n", engine); + mutex_unlock(&nv_subdev(priv)->mutex); } static int diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h index 0a393f7f055f..5a5961b6a6a3 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/class.h +++ b/drivers/gpu/drm/nouveau/core/include/core/class.h @@ -218,7 +218,7 @@ struct nv04_display_class { #define NV50_DISP_DAC_PWR_STATE 0x00000040 #define NV50_DISP_DAC_PWR_STATE_ON 0x00000000 #define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040 -#define NV50_DISP_DAC_LOAD 0x0002000c +#define NV50_DISP_DAC_LOAD 0x00020100 #define NV50_DISP_DAC_LOAD_VALUE 0x00000007 #define NV50_DISP_PIOR_MTHD 0x00030000 diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index c300b5e7b670..c434d398d16f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c @@ -1940,8 +1940,8 @@ init_zm_mask_add(struct nvbios_init *init) trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add); init->offset += 13; - data = init_rd32(init, addr) & mask; - data |= ((data + add) & ~mask); + data = init_rd32(init, addr); + data = (data & mask) | ((data + add) & ~mask); init_wr32(init, addr, data); } diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c index e4940fb166e8..fb794e997fbc 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c @@ -29,7 +29,6 @@ struct nvc0_ltcg_priv { struct nouveau_ltcg base; u32 part_nr; - u32 part_mask; u32 subp_nr; struct nouveau_mm tags; u32 num_tags; @@ -105,8 +104,6 @@ nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count) /* wait until it's finished with clearing */ for (p = 0; p < priv->part_nr; ++p) { - if (!(priv->part_mask & (1 << p))) - continue; for (i = 0; i < priv->subp_nr; ++i) nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0); } @@ -121,6 +118,8 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) int ret; nv_wr32(priv, 0x17e8d8, priv->part_nr); + if (nv_device(pfb)->card_type >= NV_E0) + nv_wr32(priv, 0x17e000, priv->part_nr); /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ priv->num_tags = (pfb->ram.size >> 17) / 4; @@ -167,16 +166,20 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, { struct nvc0_ltcg_priv *priv; struct nouveau_fb *pfb = nouveau_fb(parent); - int ret; + u32 parts, mask; + int ret, i; ret = nouveau_ltcg_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->part_nr = nv_rd32(priv, 0x022438); - priv->part_mask = nv_rd32(priv, 0x022554); - + parts = nv_rd32(priv, 0x022438); + mask = nv_rd32(priv, 0x022554); + for (i = 0; i < parts; i++) { + if (!(mask & (1 << i))) + priv->part_nr++; + } priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 7bf22d4a3d96..f17dc2ab03ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -638,17 +638,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, } s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); - if (s->event) { - struct drm_pending_vblank_event *e = s->event; - struct timeval now; - - do_gettimeofday(&now); - e->event.sequence = 0; - e->event.tv_sec = now.tv_sec; - e->event.tv_usec = now.tv_usec; - list_add_tail(&e->base.link, &e->base.file_priv->event_list); - wake_up_interruptible(&e->base.file_priv->event_wait); - } + if (s->event) + drm_send_vblank_event(dev, -1, s->event); list_del(&s->head); if (ps) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 46c152ff0a80..383f4e6ea9d1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -453,18 +453,32 @@ nouveau_do_suspend(struct drm_device *dev) NV_INFO(drm, "evicting buffers...\n"); ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); + NV_INFO(drm, "waiting for kernel channels to go idle...\n"); + if (drm->cechan) { + ret = nouveau_channel_idle(drm->cechan); + if (ret) + return ret; + } + + if (drm->channel) { + ret = nouveau_channel_idle(drm->channel); + if (ret) + return ret; + } + + NV_INFO(drm, "suspending client object trees...\n"); if (drm->fence && nouveau_fence(drm)->suspend) { if (!nouveau_fence(drm)->suspend(drm)) return -ENOMEM; } - NV_INFO(drm, "suspending client object trees...\n"); list_for_each_entry(cli, &drm->clients, head) { ret = nouveau_client_fini(&cli->base, true); if (ret) goto fail_client; } + NV_INFO(drm, "suspending kernel object tree...\n"); ret = nouveau_client_fini(&drm->client.base, true); if (ret) goto fail_client; @@ -514,17 +528,18 @@ nouveau_do_resume(struct drm_device *dev) nouveau_agp_reset(drm); - NV_INFO(drm, "resuming client object trees...\n"); + NV_INFO(drm, "resuming kernel object tree...\n"); nouveau_client_init(&drm->client.base); nouveau_agp_init(drm); + NV_INFO(drm, "resuming client object trees...\n"); + if (drm->fence && nouveau_fence(drm)->resume) + nouveau_fence(drm)->resume(drm); + list_for_each_entry(cli, &drm->clients, head) { nouveau_client_init(&cli->base); } - if (drm->fence && nouveau_fence(drm)->resume) - nouveau_fence(drm)->resume(drm); - nouveau_run_vbios_init(dev); nouveau_pm_resume(dev); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index ebf0a683305e..dd5e01f89f28 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1554,7 +1554,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct nv50_disp *disp = nv50_disp(encoder->dev); int ret, or = nouveau_encoder(encoder)->or; - u32 load = 0; + u32 load = nouveau_drm(encoder->dev)->vbios.dactestval; + if (load == 0) + load = 340; ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); if (ret || load != 7) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 9c53c25e5201..826586ffbe83 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -649,6 +649,9 @@ static void pdev_shutdown(struct platform_device *device) static int pdev_probe(struct platform_device *device) { + if (omapdss_is_initialized() == false) + return -EPROBE_DEFER; + DBG("%s", device->name); return drm_platform_init(&omap_drm_driver, device); } diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index 2f1a57e11140..d6c12796023c 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig @@ -4,6 +4,7 @@ config DRM_QXL select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT + select FB_DEFERRED_IO select DRM_KMS_HELPER select DRM_TTM help diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 08b0823c93d5..f86771481317 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -277,7 +277,7 @@ out_unref: return 0; } -static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) +static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) { int irq_num; long addr = qdev->io_base + port; @@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) mutex_lock(&qdev->async_io_mutex); irq_num = atomic_read(&qdev->irq_received_io_cmd); - - if (qdev->last_sent_io_cmd > irq_num) { - ret = wait_event_interruptible(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num); - if (ret) + if (intr) + ret = wait_event_interruptible_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + else + ret = wait_event_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + /* 0 is timeout, just bail the "hw" has gone away */ + if (ret <= 0) goto out; irq_num = atomic_read(&qdev->irq_received_io_cmd); } outb(val, addr); qdev->last_sent_io_cmd = irq_num + 1; - ret = wait_event_interruptible(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num); + if (intr) + ret = wait_event_interruptible_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + else + ret = wait_event_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); out: + if (ret > 0) + ret = 0; mutex_unlock(&qdev->async_io_mutex); return ret; } @@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port) int ret; restart: - ret = wait_for_io_cmd_user(qdev, val, port); + ret = wait_for_io_cmd_user(qdev, val, port, false); if (ret == -ERESTARTSYS) goto restart; } @@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, mutex_lock(&qdev->update_area_mutex); qdev->ram_header->update_area = *area; qdev->ram_header->update_surface = surface_id; - ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); + ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true); mutex_unlock(&qdev->update_area_mutex); return ret; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index fcfd4436ceed..823d29e926ec 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -428,10 +428,10 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, int inc = 1; qobj = gem_to_qxl_bo(qxl_fb->obj); - if (qxl_fb != qdev->active_user_framebuffer) { - DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n", - __func__, qxl_fb, qdev->active_user_framebuffer); - } + /* if we aren't primary surface ignore this */ + if (!qobj->is_primary) + return 0; + if (!num_clips) { num_clips = 1; clips = &norect; @@ -604,7 +604,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, mode->hdisplay, mode->vdisplay); } - qdev->mode_set = true; return 0; } @@ -893,7 +892,6 @@ qxl_user_framebuffer_create(struct drm_device *dev, { struct drm_gem_object *obj; struct qxl_framebuffer *qxl_fb; - struct qxl_device *qdev = dev->dev_private; int ret; obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); @@ -909,13 +907,6 @@ qxl_user_framebuffer_create(struct drm_device *dev, return NULL; } - if (qdev->active_user_framebuffer) { - DRM_INFO("%s: active_user_framebuffer %p -> %p\n", - __func__, - qdev->active_user_framebuffer, qxl_fb); - } - qdev->active_user_framebuffer = qxl_fb; - return &qxl_fb->base; } diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 52b582c211da..43d06ab28a21 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -255,12 +255,6 @@ struct qxl_device { struct qxl_gem gem; struct qxl_mode_info mode_info; - /* - * last created framebuffer with fb_create - * only used by debugfs dumbppm - */ - struct qxl_framebuffer *active_user_framebuffer; - struct fb_info *fbdev_info; struct qxl_framebuffer *fbdev_qfb; void *ram_physical; @@ -270,7 +264,6 @@ struct qxl_device { struct qxl_ring *cursor_ring; struct qxl_ram_header *ram_header; - bool mode_set; bool primary_created; diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 04b64f9cbfdb..a4b71b25fa53 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -151,7 +151,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct qxl_bo *cmd_bo; int release_type; struct drm_qxl_command *commands = - (struct drm_qxl_command *)execbuffer->commands; + (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], sizeof(user_cmd))) @@ -193,7 +193,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, for (i = 0 ; i < user_cmd.relocs_num; ++i) { if (DRM_COPY_FROM_USER(&reloc, - &((struct drm_qxl_reloc *)user_cmd.relocs)[i], + &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], sizeof(reloc))) { qxl_bo_list_unreserve(&reloc_list, true); qxl_release_unreserve(qdev, release); @@ -294,6 +294,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, goto out; if (!qobj->pin_count) { + qxl_ttm_placement_from_domain(qobj, qobj->type); ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, true, false); if (unlikely(ret)) diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index 85127ed24cfd..e27ce2a907cf 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -128,12 +128,13 @@ int qxl_device_init(struct qxl_device *qdev, qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size); - DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n", - (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0), + DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n", + (unsigned long long)qdev->vram_base, + (unsigned long long)pci_resource_end(pdev, 0), (int)pci_resource_len(pdev, 0) / 1024 / 1024, (int)pci_resource_len(pdev, 0) / 1024, - (void *)qdev->surfaceram_base, - (void *)pci_resource_end(pdev, 1), + (unsigned long long)qdev->surfaceram_base, + (unsigned long long)pci_resource_end(pdev, 1), (int)qdev->surfaceram_size / 1024 / 1024, (int)qdev->surfaceram_size / 1024); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 6d6fdb3ba0d0..d5df8fd10217 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1811,12 +1811,9 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; - radeon_crtc->in_mode_set = true; - /* disable crtc pair power gating before programming */ if (ASIC_IS_DCE6(rdev)) atombios_powergate_crtc(crtc, ATOM_DISABLE); @@ -1827,11 +1824,8 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc) static void atombios_crtc_commit(struct drm_crtc *crtc) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); atombios_lock_crtc(crtc, ATOM_DISABLE); - radeon_crtc->in_mode_set = false; } static void atombios_crtc_disable(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 44a7da66e081..8406c8251fbf 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -667,6 +667,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) int atombios_get_encoder_mode(struct drm_encoder *encoder) { + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_connector *connector; struct radeon_connector *radeon_connector; @@ -693,7 +695,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio) + radeon_audio && + !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */ return ATOM_ENCODER_MODE_HDMI; else if (radeon_connector->use_digital) return ATOM_ENCODER_MODE_DVI; @@ -704,7 +707,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) case DRM_MODE_CONNECTOR_HDMIA: default: if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio) + radeon_audio && + !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */ return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; @@ -718,7 +722,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return ATOM_ENCODER_MODE_DP; else if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio) + radeon_audio && + !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */ return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 105bafb6c29d..0f89ce3d02b9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2343,11 +2343,13 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav u32 crtc_enabled, tmp, frame_count, blackout; int i, j; - save->vga_render_control = RREG32(VGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); + if (!ASIC_IS_NODCE(rdev)) { + save->vga_render_control = RREG32(VGA_RENDER_CONTROL); + save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); - /* disable VGA render */ - WREG32(VGA_RENDER_CONTROL, 0); + /* disable VGA render */ + WREG32(VGA_RENDER_CONTROL, 0); + } /* blank the display controllers */ for (i = 0; i < rdev->num_crtc; i++) { crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN; @@ -2438,8 +2440,11 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], (u32)rdev->mc.vram_start); } - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); + + if (!ASIC_IS_NODCE(rdev)) { + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); + } /* unlock regs and wait for update */ for (i = 0; i < rdev->num_crtc; i++) { @@ -2499,10 +2504,12 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s } } } - /* Unlock vga access */ - WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(VGA_RENDER_CONTROL, save->vga_render_control); + if (!ASIC_IS_NODCE(rdev)) { + /* Unlock vga access */ + WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); + mdelay(1); + WREG32(VGA_RENDER_CONTROL, save->vga_render_control); + } } void evergreen_mc_program(struct radeon_device *rdev) @@ -3405,8 +3412,8 @@ int evergreen_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); } else { /* size in MB on evergreen/cayman/tn */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; } rdev->mc.visible_vram_size = rdev->mc.aper_size; r700_vram_gtt_location(rdev, &rdev->mc); @@ -4747,6 +4754,12 @@ static int evergreen_startup(struct radeon_device *rdev) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + r = r600_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); @@ -4916,10 +4929,6 @@ int evergreen_init(struct radeon_device *rdev) if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; - rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -4992,8 +5001,7 @@ void evergreen_fini(struct radeon_device *rdev) void evergreen_pcie_gen2_enable(struct radeon_device *rdev) { - u32 link_width_cntl, speed_cntl, mask; - int ret; + u32 link_width_cntl, speed_cntl; if (radeon_pcie_gen2 == 0) return; @@ -5008,11 +5016,8 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev) if (ASIC_IS_X2(rdev)) return; - ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); - if (ret != 0) - return; - - if (!(mask & DRM_PCIE_SPEED_50)) + if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) && + (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT)) return; speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index b4ab8ceb1654..ed7c8a768092 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -154,19 +154,18 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - u32 base_rate = 48000; + u32 base_rate = 24000; if (!dig || !dig->afmt) return; - /* XXX: properly calculate this */ /* XXX two dtos; generally use dto0 for hdmi */ /* Express [24MHz / target pixel clock] as an exact rational * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator */ - WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff); - WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff); + WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); + WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); } diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 7969c0c8ec20..84583302b081 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2025,6 +2025,12 @@ static int cayman_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + r = r600_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); @@ -2190,10 +2196,6 @@ int cayman_init(struct radeon_device *rdev) if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; - ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 4973bff37fec..d0314ecbd7c1 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3869,6 +3869,12 @@ static int r100_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + r100_irq_set(rdev); rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -4024,9 +4030,6 @@ int r100_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index c60350e6872d..b9b776f1e582 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1382,6 +1382,12 @@ static int r300_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -1516,9 +1522,6 @@ int r300_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index 865e2c9980db..60170ea5e3a2 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c @@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); for (i = 0; i < nr; ++i) { - if (DRM_COPY_FROM_USER_UNCHECKED + if (DRM_COPY_FROM_USER (&box, &cmdbuf->boxes[n + i], sizeof(box))) { DRM_ERROR("copy cliprect faulted\n"); return -EFAULT; diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 6fce2eb4dd16..4e796ecf9ea4 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -265,6 +265,12 @@ static int r420_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -411,10 +417,6 @@ int r420_init(struct radeon_device *rdev) if (r) { return r; } - r = radeon_irq_kms_init(rdev); - if (r) { - return r; - } /* Memory manager */ r = radeon_bo_init(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index f795a4e092cb..e1aece73b370 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -194,6 +194,12 @@ static int r520_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -297,9 +303,6 @@ int r520_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 1a08008c978b..0e5341695922 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1046,6 +1046,24 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev) return -1; } +uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) +{ + uint32_t r; + + WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); + r = RREG32(R_0028FC_MC_DATA); + WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); + return r; +} + +void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) +{ + WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | + S_0028F8_MC_IND_WR_EN(1)); + WREG32(R_0028FC_MC_DATA, v); + WREG32(R_0028F8_MC_INDEX, 0x7F); +} + static void r600_mc_program(struct radeon_device *rdev) { struct rv515_mc_save save; @@ -1181,6 +1199,8 @@ static int r600_mc_init(struct radeon_device *rdev) { u32 tmp; int chansize, numchan; + uint32_t h_addr, l_addr; + unsigned long long k8_addr; /* Get VRAM informations */ rdev->mc.vram_is_ddr = true; @@ -1221,7 +1241,30 @@ static int r600_mc_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) { rs690_pm_info(rdev); rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); + + if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { + /* Use K8 direct mapping for fast fb access. */ + rdev->fastfb_working = false; + h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL)); + l_addr = RREG32_MC(R_000011_K8_FB_LOCATION); + k8_addr = ((unsigned long long)h_addr) << 32 | l_addr; +#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) + if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL) +#endif + { + /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport + * memory is present. + */ + if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) { + DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", + (unsigned long long)rdev->mc.aper_base, k8_addr); + rdev->mc.aper_base = (resource_size_t)k8_addr; + rdev->fastfb_working = true; + } + } + } } + radeon_update_bandwidth_info(rdev); return 0; } @@ -3202,6 +3245,12 @@ static int r600_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + r = r600_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); @@ -3356,10 +3405,6 @@ int r600_init(struct radeon_device *rdev) if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; - rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -4631,8 +4676,6 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) { u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; u16 link_cntl2; - u32 mask; - int ret; if (radeon_pcie_gen2 == 0) return; @@ -4651,11 +4694,8 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) if (rdev->family <= CHIP_R600) return; - ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); - if (ret != 0) - return; - - if (!(mask & DRM_PCIE_SPEED_50)) + if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) && + (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT)) return; speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 47f180a79352..456750a0daa5 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -232,7 +232,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 base_rate = 48000; + u32 base_rate = 24000; if (!dig || !dig->afmt) return; @@ -240,7 +240,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. * doesn't matter which one you use. Just use the first one. */ - /* XXX: properly calculate this */ /* XXX two dtos; generally use dto0 for hdmi */ /* Express [24MHz / target pixel clock] as an exact rational * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE @@ -250,13 +249,13 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) /* according to the reg specs, this should DCE3.2 only, but in * practice it seems to cover DCE3.0 as well. */ - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50); + WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ } else { /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ - WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) | - AUDIO_DTO_MODULE(clock * 100)); + WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | + AUDIO_DTO_MODULE(clock / 10)); } } diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index acb146c06973..79df558f8c40 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -1342,6 +1342,14 @@ #define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */ #define PACKET3_SURFACE_BASE_UPDATE 0x73 +#define R_000011_K8_FB_LOCATION 0x11 +#define R_000012_MC_MISC_UMA_CNTL 0x12 +#define G_000012_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF) +#define R_0028F8_MC_INDEX 0x28F8 +#define S_0028F8_MC_IND_ADDR(x) (((x) & 0x1FF) << 0) +#define C_0028F8_MC_IND_ADDR 0xFFFFFE00 +#define S_0028F8_MC_IND_WR_EN(x) (((x) & 0x1) << 9) +#define R_0028FC_MC_DATA 0x28FC #define R_008020_GRBM_SOFT_RESET 0x8020 #define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1442ce765d48..142ce6cc69f5 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1694,6 +1694,7 @@ struct radeon_device { int num_crtc; /* number of crtcs */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ bool audio_enabled; + bool has_uvd; struct r600_audio audio_status; /* audio stuff */ struct notifier_block acpi_nb; /* only one userspace can use Hyperz features or CMASK at a time */ @@ -1838,6 +1839,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \ (rdev->flags & RADEON_IS_IGP)) #define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND)) +#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN)) /* * BIOS helpers. diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 6417132c50cf..a2802b47ee95 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -122,6 +122,10 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) rdev->mc_rreg = &rs600_mc_rreg; rdev->mc_wreg = &rs600_mc_wreg; } + if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { + rdev->mc_rreg = &rs780_mc_rreg; + rdev->mc_wreg = &rs780_mc_wreg; + } if (rdev->family >= CHIP_R600) { rdev->pciep_rreg = &r600_pciep_rreg; rdev->pciep_wreg = &r600_pciep_wreg; @@ -1935,6 +1939,8 @@ int radeon_asic_init(struct radeon_device *rdev) else rdev->num_crtc = 2; + rdev->has_uvd = false; + switch (rdev->family) { case CHIP_R100: case CHIP_RV100: @@ -1999,16 +2005,22 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_RV635: case CHIP_RV670: rdev->asic = &r600_asic; + if (rdev->family == CHIP_R600) + rdev->has_uvd = false; + else + rdev->has_uvd = true; break; case CHIP_RS780: case CHIP_RS880: rdev->asic = &rs780_asic; + rdev->has_uvd = true; break; case CHIP_RV770: case CHIP_RV730: case CHIP_RV710: case CHIP_RV740: rdev->asic = &rv770_asic; + rdev->has_uvd = true; break; case CHIP_CEDAR: case CHIP_REDWOOD: @@ -2021,11 +2033,13 @@ int radeon_asic_init(struct radeon_device *rdev) else rdev->num_crtc = 6; rdev->asic = &evergreen_asic; + rdev->has_uvd = true; break; case CHIP_PALM: case CHIP_SUMO: case CHIP_SUMO2: rdev->asic = &sumo_asic; + rdev->has_uvd = true; break; case CHIP_BARTS: case CHIP_TURKS: @@ -2036,27 +2050,37 @@ int radeon_asic_init(struct radeon_device *rdev) else rdev->num_crtc = 6; rdev->asic = &btc_asic; + rdev->has_uvd = true; break; case CHIP_CAYMAN: rdev->asic = &cayman_asic; /* set num crtcs */ rdev->num_crtc = 6; + rdev->has_uvd = true; break; case CHIP_ARUBA: rdev->asic = &trinity_asic; /* set num crtcs */ rdev->num_crtc = 4; + rdev->has_uvd = true; break; case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + case CHIP_HAINAN: rdev->asic = &si_asic; /* set num crtcs */ - if (rdev->family == CHIP_OLAND) + if (rdev->family == CHIP_HAINAN) + rdev->num_crtc = 0; + else if (rdev->family == CHIP_OLAND) rdev->num_crtc = 2; else rdev->num_crtc = 6; + if (rdev->family == CHIP_HAINAN) + rdev->has_uvd = false; + else + rdev->has_uvd = true; break; default: /* FIXME: not supported yet */ diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 2c87365d345f..a72759ede753 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -347,6 +347,8 @@ extern bool r600_gui_idle(struct radeon_device *rdev); extern void r600_pm_misc(struct radeon_device *rdev); extern void r600_pm_init_profile(struct radeon_device *rdev); extern void rs780_pm_init_profile(struct radeon_device *rdev); +extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg); +extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int r600_get_pcie_lanes(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index fa3c56fba294..061b227dae0c 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -244,24 +244,28 @@ static bool ni_read_disabled_bios(struct radeon_device *rdev) /* enable the rom */ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); - /* Disable VGA mode */ - WREG32(AVIVO_D1VGA_CONTROL, - (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | - AVIVO_DVGA_CONTROL_TIMING_SELECT))); - WREG32(AVIVO_D2VGA_CONTROL, - (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | - AVIVO_DVGA_CONTROL_TIMING_SELECT))); - WREG32(AVIVO_VGA_RENDER_CONTROL, - (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); + if (!ASIC_IS_NODCE(rdev)) { + /* Disable VGA mode */ + WREG32(AVIVO_D1VGA_CONTROL, + (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | + AVIVO_DVGA_CONTROL_TIMING_SELECT))); + WREG32(AVIVO_D2VGA_CONTROL, + (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | + AVIVO_DVGA_CONTROL_TIMING_SELECT))); + WREG32(AVIVO_VGA_RENDER_CONTROL, + (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); + } WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); r = radeon_read_bios(rdev); /* restore regs */ WREG32(R600_BUS_CNTL, bus_cntl); - WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); - WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); - WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); + if (!ASIC_IS_NODCE(rdev)) { + WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); + WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); + WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); + } WREG32(R600_ROM_CNTL, rom_cntl); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index a8f608903989..189973836cff 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -94,6 +94,7 @@ static const char radeon_family_name[][16] = { "PITCAIRN", "VERDE", "OLAND", + "HAINAN", "LAST", }; @@ -466,23 +467,27 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; + /* required for EFI mode on macbook2,1 which uses an r5xx asic */ if (efi_enabled(EFI_BOOT) && - rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) + (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && + (rdev->family < CHIP_R600)) return false; + if (ASIC_IS_NODCE(rdev)) + goto check_memsize; + /* first check CRTCs */ - if (ASIC_IS_DCE41(rdev)) { + if (ASIC_IS_DCE4(rdev)) { reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - if (reg & EVERGREEN_CRTC_MASTER_EN) - return true; - } else if (ASIC_IS_DCE4(rdev)) { - reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (rdev->num_crtc >= 4) { + reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); + } + if (rdev->num_crtc >= 6) { + reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + } if (reg & EVERGREEN_CRTC_MASTER_EN) return true; } else if (ASIC_IS_AVIVO(rdev)) { @@ -499,6 +504,7 @@ bool radeon_card_posted(struct radeon_device *rdev) } } +check_memsize: /* then check MEM_SIZE, in case the crtcs are off */ if (rdev->family >= CHIP_R600) reg = RREG32(R600_CONFIG_MEMSIZE); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index e38fd559f1ab..eb18bb7af1cc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -271,8 +271,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; struct radeon_unpin_work *work; - struct drm_pending_vblank_event *e; - struct timeval now; unsigned long flags; u32 update_pending; int vpos, hpos; @@ -328,14 +326,9 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) radeon_crtc->unpin_work = NULL; /* wakeup userspace */ - if (work->event) { - e = work->event; - e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now); - e->event.tv_sec = now.tv_sec; - e->event.tv_usec = now.tv_usec; - list_add_tail(&e->base.link, &e->base.file_priv->event_list); - wake_up_interruptible(&e->base.file_priv->event_wait); - } + if (work->event) + drm_send_vblank_event(rdev->ddev, crtc_id, work->event); + spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index d33f484ace48..094e7e5ea39e 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -147,7 +147,7 @@ static inline void radeon_unregister_atpx_handler(void) {} #endif int radeon_no_wb; -int radeon_modeset = 1; +int radeon_modeset = -1; int radeon_dynclks = -1; int radeon_r4xx_atom = 0; int radeon_agpmode = 0; @@ -456,6 +456,16 @@ static struct pci_driver radeon_kms_pci_driver = { static int __init radeon_init(void) { +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force() && radeon_modeset == -1) { + DRM_INFO("VGACON disable radeon kernel modesetting.\n"); + radeon_modeset = 0; + } +#endif + /* set to modesetting by default if not nomodeset */ + if (radeon_modeset == -1) + radeon_modeset = 1; + if (radeon_modeset == 1) { DRM_INFO("radeon kernel modesetting enabled.\n"); driver = &kms_driver; diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 2d91123f2759..36e9803b077d 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -92,6 +92,7 @@ enum radeon_family { CHIP_PITCAIRN, CHIP_VERDE, CHIP_OLAND, + CHIP_HAINAN, CHIP_LAST, }; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 6857cb4efb76..7cb178a34a0f 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -1031,11 +1031,9 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, static void radeon_crtc_prepare(struct drm_crtc *crtc) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; - radeon_crtc->in_mode_set = true; /* * The hardware wedges sometimes if you reconfigure one CRTC * whilst another is running (see fdo bug #24611). @@ -1046,7 +1044,6 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc) static void radeon_crtc_commit(struct drm_crtc *crtc) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; @@ -1057,7 +1054,6 @@ static void radeon_crtc_commit(struct drm_crtc *crtc) if (crtci->enabled) radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); } - radeon_crtc->in_mode_set = false; } static const struct drm_crtc_helper_funcs legacy_helper_funcs = { diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 44e579e75fd0..69ad4fe224c1 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -302,7 +302,6 @@ struct radeon_crtc { u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; - bool in_mode_set; uint32_t crtc_offset; struct drm_gem_object *cursor_bo; uint64_t cursor_addr; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 93f760e27a92..6c0ce8915fac 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -726,7 +726,7 @@ int radeon_ttm_init(struct radeon_device *rdev) return r; } DRM_INFO("radeon: %uM of VRAM memory ready\n", - (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); + (unsigned) (rdev->mc.real_vram_size / (1024 * 1024))); r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, rdev->mc.gtt_size >> PAGE_SHIFT); if (r) { diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 73051ce3121e..233a9b9fa1f7 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -417,6 +417,12 @@ static int rs400_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -535,9 +541,6 @@ int rs400_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 46fa1b07c560..670b555d2ca2 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -923,6 +923,12 @@ static int rs600_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -1047,9 +1053,6 @@ int rs600_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index ab4c86cfd552..55880d5962c3 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -651,6 +651,12 @@ static int rs690_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -776,9 +782,6 @@ int rs690_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index ffcba730c57c..21c7d7b26e55 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -532,6 +532,12 @@ static int rv515_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -662,9 +668,6 @@ int rv515_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 83f612a9500b..4a62ad2e5399 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -862,10 +862,8 @@ int rv770_uvd_resume(struct radeon_device *rdev) chip_id = 0x0100000b; break; case CHIP_SUMO: - chip_id = 0x0100000c; - break; case CHIP_SUMO2: - chip_id = 0x0100000d; + chip_id = 0x0100000c; break; case CHIP_PALM: chip_id = 0x0100000e; @@ -1889,6 +1887,12 @@ static int rv770_startup(struct radeon_device *rdev) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + r = r600_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); @@ -2047,10 +2051,6 @@ int rv770_init(struct radeon_device *rdev) if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; - rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -2113,8 +2113,6 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev) { u32 link_width_cntl, lanes, speed_cntl, tmp; u16 link_cntl2; - u32 mask; - int ret; if (radeon_pcie_gen2 == 0) return; @@ -2129,11 +2127,8 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev) if (ASIC_IS_X2(rdev)) return; - ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); - if (ret != 0) - return; - - if (!(mask & DRM_PCIE_SPEED_50)) + if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) && + (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT)) return; DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index f0b6c2f87c4d..a1b0da6b5808 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -60,6 +60,11 @@ MODULE_FIRMWARE("radeon/OLAND_me.bin"); MODULE_FIRMWARE("radeon/OLAND_ce.bin"); MODULE_FIRMWARE("radeon/OLAND_mc.bin"); MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); +MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); +MODULE_FIRMWARE("radeon/HAINAN_me.bin"); +MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); +MODULE_FIRMWARE("radeon/HAINAN_mc.bin"); +MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); @@ -265,6 +270,40 @@ static const u32 oland_golden_registers[] = 0x15c0, 0x000c0fc0, 0x000c0400 }; +static const u32 hainan_golden_registers[] = +{ + 0x9a10, 0x00010000, 0x00018208, + 0x9830, 0xffffffff, 0x00000000, + 0x9834, 0xf00fffff, 0x00000400, + 0x9838, 0x0002021c, 0x00020200, + 0xd0c0, 0xff000fff, 0x00000100, + 0xd030, 0x000300c0, 0x00800040, + 0xd8c0, 0xff000fff, 0x00000100, + 0xd830, 0x000300c0, 0x00800040, + 0x2ae4, 0x00073ffe, 0x000022a2, + 0x240c, 0x000007ff, 0x00000000, + 0x8a14, 0xf000001f, 0x00000007, + 0x8b24, 0xffffffff, 0x00ffffff, + 0x8b10, 0x0000ff0f, 0x00000000, + 0x28a4c, 0x07ffffff, 0x4e000000, + 0x28350, 0x3f3f3fff, 0x00000000, + 0x30, 0x000000ff, 0x0040, + 0x34, 0x00000040, 0x00004040, + 0x9100, 0x03e00000, 0x03600000, + 0x9060, 0x0000007f, 0x00000020, + 0x9508, 0x00010000, 0x00010000, + 0xac14, 0x000003ff, 0x000000f1, + 0xac10, 0xffffffff, 0x00000000, + 0xac0c, 0xffffffff, 0x00003210, + 0x88d4, 0x0000001f, 0x00000010, + 0x15c0, 0x000c0fc0, 0x000c0400 +}; + +static const u32 hainan_golden_registers2[] = +{ + 0x98f8, 0xffffffff, 0x02010001 +}; + static const u32 tahiti_mgcg_cgcg_init[] = { 0xc400, 0xffffffff, 0xfffffffc, @@ -673,6 +712,83 @@ static const u32 oland_mgcg_cgcg_init[] = 0xd8c0, 0xfffffff0, 0x00000100 }; +static const u32 hainan_mgcg_cgcg_init[] = +{ + 0xc400, 0xffffffff, 0xfffffffc, + 0x802c, 0xffffffff, 0xe0000000, + 0x9a60, 0xffffffff, 0x00000100, + 0x92a4, 0xffffffff, 0x00000100, + 0xc164, 0xffffffff, 0x00000100, + 0x9774, 0xffffffff, 0x00000100, + 0x8984, 0xffffffff, 0x06000100, + 0x8a18, 0xffffffff, 0x00000100, + 0x92a0, 0xffffffff, 0x00000100, + 0xc380, 0xffffffff, 0x00000100, + 0x8b28, 0xffffffff, 0x00000100, + 0x9144, 0xffffffff, 0x00000100, + 0x8d88, 0xffffffff, 0x00000100, + 0x8d8c, 0xffffffff, 0x00000100, + 0x9030, 0xffffffff, 0x00000100, + 0x9034, 0xffffffff, 0x00000100, + 0x9038, 0xffffffff, 0x00000100, + 0x903c, 0xffffffff, 0x00000100, + 0xad80, 0xffffffff, 0x00000100, + 0xac54, 0xffffffff, 0x00000100, + 0x897c, 0xffffffff, 0x06000100, + 0x9868, 0xffffffff, 0x00000100, + 0x9510, 0xffffffff, 0x00000100, + 0xaf04, 0xffffffff, 0x00000100, + 0xae04, 0xffffffff, 0x00000100, + 0x949c, 0xffffffff, 0x00000100, + 0x802c, 0xffffffff, 0xe0000000, + 0x9160, 0xffffffff, 0x00010000, + 0x9164, 0xffffffff, 0x00030002, + 0x9168, 0xffffffff, 0x00040007, + 0x916c, 0xffffffff, 0x00060005, + 0x9170, 0xffffffff, 0x00090008, + 0x9174, 0xffffffff, 0x00020001, + 0x9178, 0xffffffff, 0x00040003, + 0x917c, 0xffffffff, 0x00000007, + 0x9180, 0xffffffff, 0x00060005, + 0x9184, 0xffffffff, 0x00090008, + 0x9188, 0xffffffff, 0x00030002, + 0x918c, 0xffffffff, 0x00050004, + 0x9190, 0xffffffff, 0x00000008, + 0x9194, 0xffffffff, 0x00070006, + 0x9198, 0xffffffff, 0x000a0009, + 0x919c, 0xffffffff, 0x00040003, + 0x91a0, 0xffffffff, 0x00060005, + 0x91a4, 0xffffffff, 0x00000009, + 0x91a8, 0xffffffff, 0x00080007, + 0x91ac, 0xffffffff, 0x000b000a, + 0x91b0, 0xffffffff, 0x00050004, + 0x91b4, 0xffffffff, 0x00070006, + 0x91b8, 0xffffffff, 0x0008000b, + 0x91bc, 0xffffffff, 0x000a0009, + 0x91c0, 0xffffffff, 0x000d000c, + 0x91c4, 0xffffffff, 0x00060005, + 0x91c8, 0xffffffff, 0x00080007, + 0x91cc, 0xffffffff, 0x0000000b, + 0x91d0, 0xffffffff, 0x000a0009, + 0x91d4, 0xffffffff, 0x000d000c, + 0x9150, 0xffffffff, 0x96940200, + 0x8708, 0xffffffff, 0x00900100, + 0xc478, 0xffffffff, 0x00000080, + 0xc404, 0xffffffff, 0x0020003f, + 0x30, 0xffffffff, 0x0000001c, + 0x34, 0x000f0000, 0x000f0000, + 0x160c, 0xffffffff, 0x00000100, + 0x1024, 0xffffffff, 0x00000100, + 0x20a8, 0xffffffff, 0x00000104, + 0x264c, 0x000c0000, 0x000c0000, + 0x2648, 0x000c0000, 0x000c0000, + 0x2f50, 0x00000001, 0x00000001, + 0x30cc, 0xc0000fff, 0x00000104, + 0xc1e4, 0x00000001, 0x00000001, + 0xd0c0, 0xfffffff0, 0x00000100, + 0xd8c0, 0xfffffff0, 0x00000100 +}; + static u32 verde_pg_init[] = { 0x353c, 0xffffffff, 0x40000, @@ -853,6 +969,17 @@ static void si_init_golden_registers(struct radeon_device *rdev) oland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); break; + case CHIP_HAINAN: + radeon_program_register_sequence(rdev, + hainan_golden_registers, + (const u32)ARRAY_SIZE(hainan_golden_registers)); + radeon_program_register_sequence(rdev, + hainan_golden_registers2, + (const u32)ARRAY_SIZE(hainan_golden_registers2)); + radeon_program_register_sequence(rdev, + hainan_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init)); + break; default: break; } @@ -1062,6 +1189,45 @@ static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { {0x0000009f, 0x00a17730} }; +static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { + {0x0000006f, 0x03044000}, + {0x00000070, 0x0480c018}, + {0x00000071, 0x00000040}, + {0x00000072, 0x01000000}, + {0x00000074, 0x000000ff}, + {0x00000075, 0x00143400}, + {0x00000076, 0x08ec0800}, + {0x00000077, 0x040000cc}, + {0x00000079, 0x00000000}, + {0x0000007a, 0x21000409}, + {0x0000007c, 0x00000000}, + {0x0000007d, 0xe8000000}, + {0x0000007e, 0x044408a8}, + {0x0000007f, 0x00000003}, + {0x00000080, 0x00000000}, + {0x00000081, 0x01000000}, + {0x00000082, 0x02000000}, + {0x00000083, 0x00000000}, + {0x00000084, 0xe3f3e4f4}, + {0x00000085, 0x00052024}, + {0x00000087, 0x00000000}, + {0x00000088, 0x66036603}, + {0x00000089, 0x01000000}, + {0x0000008b, 0x1c0a0000}, + {0x0000008c, 0xff010000}, + {0x0000008e, 0xffffefff}, + {0x0000008f, 0xfff3efff}, + {0x00000090, 0xfff3efbf}, + {0x00000094, 0x00101101}, + {0x00000095, 0x00000fff}, + {0x00000096, 0x00116fff}, + {0x00000097, 0x60010000}, + {0x00000098, 0x10010000}, + {0x00000099, 0x00006000}, + {0x0000009a, 0x00001000}, + {0x0000009f, 0x00a07730} +}; + /* ucode loading */ static int si_mc_load_microcode(struct radeon_device *rdev) { @@ -1095,6 +1261,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev) ucode_size = OLAND_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; + case CHIP_HAINAN: + io_mc_regs = (u32 *)&hainan_io_mc_regs; + ucode_size = OLAND_MC_UCODE_SIZE; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; } running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; @@ -1198,6 +1369,15 @@ static int si_init_microcode(struct radeon_device *rdev) rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = OLAND_MC_UCODE_SIZE * 4; break; + case CHIP_HAINAN: + chip_name = "HAINAN"; + rlc_chip_name = "HAINAN"; + pfp_req_size = SI_PFP_UCODE_SIZE * 4; + me_req_size = SI_PM4_UCODE_SIZE * 4; + ce_req_size = SI_CE_UCODE_SIZE * 4; + rlc_req_size = SI_RLC_UCODE_SIZE * 4; + mc_req_size = OLAND_MC_UCODE_SIZE * 4; + break; default: BUG(); } @@ -2003,7 +2183,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev) WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if ((rdev->family == CHIP_VERDE) || - (rdev->family == CHIP_OLAND)) { + (rdev->family == CHIP_OLAND) || + (rdev->family == CHIP_HAINAN)) { for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: /* non-AA compressed depth or any compressed stencil */ @@ -2435,7 +2616,7 @@ static void si_gpu_init(struct radeon_device *rdev) default: rdev->config.si.max_shader_engines = 1; rdev->config.si.max_tile_pipes = 4; - rdev->config.si.max_cu_per_sh = 2; + rdev->config.si.max_cu_per_sh = 5; rdev->config.si.max_sh_per_se = 2; rdev->config.si.max_backends_per_se = 4; rdev->config.si.max_texture_channel_caches = 4; @@ -2466,6 +2647,23 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; break; + case CHIP_HAINAN: + rdev->config.si.max_shader_engines = 1; + rdev->config.si.max_tile_pipes = 4; + rdev->config.si.max_cu_per_sh = 5; + rdev->config.si.max_sh_per_se = 1; + rdev->config.si.max_backends_per_se = 1; + rdev->config.si.max_texture_channel_caches = 2; + rdev->config.si.max_gprs = 256; + rdev->config.si.max_gs_threads = 16; + rdev->config.si.max_hw_contexts = 8; + + rdev->config.si.sc_prim_fifo_size_frontend = 0x20; + rdev->config.si.sc_prim_fifo_size_backend = 0x40; + rdev->config.si.sc_hiz_tile_fifo_size = 0x30; + rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN; + break; } /* Initialize HDP */ @@ -2559,9 +2757,11 @@ static void si_gpu_init(struct radeon_device *rdev) WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); - WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); - WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); - WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); + if (rdev->has_uvd) { + WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); + WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); + WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); + } si_tiling_mode_table_init(rdev); @@ -3304,8 +3504,9 @@ static void si_mc_program(struct radeon_device *rdev) if (radeon_mc_wait_for_idle(rdev)) { dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } - /* Lockout access through VGA aperture*/ - WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); + if (!ASIC_IS_NODCE(rdev)) + /* Lockout access through VGA aperture*/ + WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); /* Update configuration */ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); @@ -3327,9 +3528,11 @@ static void si_mc_program(struct radeon_device *rdev) dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } evergreen_mc_resume(rdev, &save); - /* we need to own VRAM, so turn off the VGA renderer here - * to stop it overwriting our objects */ - rv515_vga_render_disable(rdev); + if (!ASIC_IS_NODCE(rdev)) { + /* we need to own VRAM, so turn off the VGA renderer here + * to stop it overwriting our objects */ + rv515_vga_render_disable(rdev); + } } static void si_vram_gtt_location(struct radeon_device *rdev, @@ -3397,8 +3600,8 @@ static int si_mc_init(struct radeon_device *rdev) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* size in MB on si */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; rdev->mc.visible_vram_size = rdev->mc.aper_size; si_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); @@ -4251,8 +4454,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); WREG32(GRBM_INT_CNTL, 0); - WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + if (rdev->num_crtc >= 2) { + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + } if (rdev->num_crtc >= 4) { WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); @@ -4262,8 +4467,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + } if (rdev->num_crtc >= 4) { WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); @@ -4273,21 +4480,22 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } - WREG32(DACA_AUTODETECT_INT_CONTROL, 0); - - tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD1_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD2_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD3_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD4_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD5_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD6_INT_CONTROL, tmp); + if (!ASIC_IS_NODCE(rdev)) { + WREG32(DACA_AUTODETECT_INT_CONTROL, 0); + tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } } static int si_irq_init(struct radeon_device *rdev) @@ -4366,7 +4574,7 @@ int si_irq_set(struct radeon_device *rdev) u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; - u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; + u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 dma_cntl, dma_cntl1; @@ -4383,12 +4591,14 @@ int si_irq_set(struct radeon_device *rdev) return 0; } - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + if (!ASIC_IS_NODCE(rdev)) { + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + } dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; @@ -4479,8 +4689,10 @@ int si_irq_set(struct radeon_device *rdev) WREG32(GRBM_INT_CNTL, grbm_int_cntl); - WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); - WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); + if (rdev->num_crtc >= 2) { + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); + } if (rdev->num_crtc >= 4) { WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); @@ -4490,8 +4702,10 @@ int si_irq_set(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + } if (rdev->num_crtc >= 4) { WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); @@ -4501,12 +4715,14 @@ int si_irq_set(struct radeon_device *rdev) WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); } - WREG32(DC_HPD1_INT_CONTROL, hpd1); - WREG32(DC_HPD2_INT_CONTROL, hpd2); - WREG32(DC_HPD3_INT_CONTROL, hpd3); - WREG32(DC_HPD4_INT_CONTROL, hpd4); - WREG32(DC_HPD5_INT_CONTROL, hpd5); - WREG32(DC_HPD6_INT_CONTROL, hpd6); + if (!ASIC_IS_NODCE(rdev)) { + WREG32(DC_HPD1_INT_CONTROL, hpd1); + WREG32(DC_HPD2_INT_CONTROL, hpd2); + WREG32(DC_HPD3_INT_CONTROL, hpd3); + WREG32(DC_HPD4_INT_CONTROL, hpd4); + WREG32(DC_HPD5_INT_CONTROL, hpd5); + WREG32(DC_HPD6_INT_CONTROL, hpd6); + } return 0; } @@ -4515,6 +4731,9 @@ static inline void si_irq_ack(struct radeon_device *rdev) { u32 tmp; + if (ASIC_IS_NODCE(rdev)) + return; + rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); @@ -5118,17 +5337,25 @@ static int si_startup(struct radeon_device *rdev) return r; } - r = rv770_uvd_resume(rdev); - if (!r) { - r = radeon_fence_driver_start_ring(rdev, - R600_RING_TYPE_UVD_INDEX); + if (rdev->has_uvd) { + r = rv770_uvd_resume(rdev); + if (!r) { + r = radeon_fence_driver_start_ring(rdev, + R600_RING_TYPE_UVD_INDEX); + if (r) + dev_err(rdev->dev, "UVD fences init error (%d).\n", r); + } if (r) - dev_err(rdev->dev, "UVD fences init error (%d).\n", r); + rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; } - if (r) - rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + r = si_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); @@ -5185,16 +5412,18 @@ static int si_startup(struct radeon_device *rdev) if (r) return r; - ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, - UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); - if (!r) - r = r600_uvd_init(rdev); - if (r) - DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); + if (rdev->has_uvd) { + ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + if (ring->ring_size) { + r = radeon_ring_init(rdev, ring, ring->ring_size, + R600_WB_UVD_RPTR_OFFSET, + UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, + 0, 0xfffff, RADEON_CP_PACKET2); + if (!r) + r = r600_uvd_init(rdev); + if (r) + DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); + } } r = radeon_ib_pool_init(rdev); @@ -5243,8 +5472,10 @@ int si_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); - r600_uvd_rbc_stop(rdev); - radeon_uvd_suspend(rdev); + if (rdev->has_uvd) { + r600_uvd_rbc_stop(rdev); + radeon_uvd_suspend(rdev); + } si_irq_suspend(rdev); radeon_wb_disable(rdev); si_pcie_gart_disable(rdev); @@ -5308,10 +5539,6 @@ int si_init(struct radeon_device *rdev) if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; - ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); @@ -5332,11 +5559,13 @@ int si_init(struct radeon_device *rdev) ring->ring_obj = NULL; r600_ring_init(rdev, ring, 64 * 1024); - r = radeon_uvd_init(rdev); - if (!r) { - ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - ring->ring_obj = NULL; - r600_ring_init(rdev, ring, 4096); + if (rdev->has_uvd) { + r = radeon_uvd_init(rdev); + if (!r) { + ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 4096); + } } rdev->ih.ring_obj = NULL; @@ -5384,7 +5613,8 @@ void si_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - radeon_uvd_fini(rdev); + if (rdev->has_uvd) + radeon_uvd_fini(rdev); si_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 222877ba6cf5..8f2d7d4f9b28 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -28,6 +28,7 @@ #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 +#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001 /* discrete uvd clocks */ #define CG_UPLL_FUNC_CNTL 0x634 diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index 7dff49ed66e7..99e2034e49cc 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -451,27 +451,16 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc) { struct drm_pending_vblank_event *event; struct drm_device *dev = scrtc->crtc.dev; - struct timeval vblanktime; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); event = scrtc->event; scrtc->event = NULL; + if (event) { + drm_send_vblank_event(dev, 0, event); + drm_vblank_put(dev, 0); + } spin_unlock_irqrestore(&dev->event_lock, flags); - - if (event == NULL) - return; - - event->event.sequence = drm_vblank_count_and_time(dev, 0, &vblanktime); - event->event.tv_sec = vblanktime.tv_sec; - event->event.tv_usec = vblanktime.tv_usec; - - spin_lock_irqsave(&dev->event_lock, flags); - list_add_tail(&event->base.link, &event->base.file_priv->event_list); - wake_up_interruptible(&event->base.file_priv->event_wait); - spin_unlock_irqrestore(&dev->event_lock, flags); - - drm_vblank_put(dev, 0); } static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index e461e9972455..7a4d10106906 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -6,6 +6,7 @@ config DRM_TILCDC select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS select BACKLIGHT_CLASS_DEVICE + select BACKLIGHT_LCD_SUPPORT help Choose this option if you have an TI SoC with LCDC display controller, for example AM33xx in beagle-bone, DA8xx, or diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c index 1e2060324f02..8c04943f82e3 100644 --- a/drivers/gpu/host1x/drm/dc.c +++ b/drivers/gpu/host1x/drm/dc.c @@ -1128,11 +1128,6 @@ static int tegra_dc_probe(struct platform_device *pdev) return err; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_err(&pdev->dev, "failed to get registers\n"); - return -ENXIO; - } - dc->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(dc->regs)) return PTR_ERR(dc->regs); diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 8280eba43e14..e25fb9829729 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -217,6 +217,13 @@ config HID_ELECOM ---help--- Support for the ELECOM BM084 (bluetooth mouse). +config HID_ELO + tristate "ELO USB 4000/4500 touchscreen" + depends on USB_HID + ---help--- + Support for the ELO USB 4000/4500 touchscreens. Note that this is for + different devices than those handled by CONFIG_TOUCHSCREEN_USB_ELO. + config HID_EZKEY tristate "Ezkey BTC 8193 keyboard" if EXPERT depends on HID @@ -231,6 +238,9 @@ config HID_HOLTEK Support for Holtek based devices: - Holtek On Line Grip based game controller - Trust GXT 18 Gaming Keyboard + - Sharkoon Drakonia / Perixx MX-2000 gaming mice + - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 / + Zalman ZM-GM1 config HOLTEK_FF bool "Holtek On Line Grip force feedback support" @@ -240,6 +250,12 @@ config HOLTEK_FF Say Y here if you have a Holtek On Line Grip based game controller and want to have force feedback support for it. +config HID_HUION + tristate "Huion tablets" + depends on USB_HID + ---help--- + Support for Huion 580 tablet. + config HID_KEYTOUCH tristate "Keytouch HID devices" depends on HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 833abcf19d68..ffd0d9826d84 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -48,10 +48,13 @@ obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o obj-$(CONFIG_HID_DRAGONRISE) += hid-dr.o obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o obj-$(CONFIG_HID_ELECOM) += hid-elecom.o +obj-$(CONFIG_HID_ELO) += hid-elo.o obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o obj-$(CONFIG_HID_GYRATION) += hid-gyration.o obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o +obj-$(CONFIG_HID_HOLTEK) += hid-holtek-mouse.o obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o +obj-$(CONFIG_HID_HUION) += hid-huion.o obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o obj-$(CONFIG_HID_ICADE) += hid-icade.o obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index ecd1107a5488..e39dac68063c 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1293,7 +1293,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) { ret = hdrv->raw_event(hid, report, data, size); - if (ret != 0) { + if (ret < 0) { ret = ret < 0 ? ret : 0; goto unlock; } @@ -1573,6 +1573,8 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, @@ -1584,10 +1586,14 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, @@ -2044,6 +2050,8 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) }, { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) }, { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_410) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_510) }, { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) }, { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) }, diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c new file mode 100644 index 000000000000..f042a6cf8b18 --- /dev/null +++ b/drivers/hid/hid-elo.c @@ -0,0 +1,273 @@ +/* + * HID driver for ELO usb touchscreen 4000/4500 + * + * Copyright (c) 2013 Jiri Slaby + * + * Data parsing taken from elousb driver by Vojtech Pavlik. + * + * This driver is licensed under the terms of GPLv2. + */ + +#include <linux/hid.h> +#include <linux/input.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/workqueue.h> + +#include "hid-ids.h" + +#define ELO_PERIODIC_READ_INTERVAL HZ +#define ELO_SMARTSET_CMD_TIMEOUT 2000 /* msec */ + +/* Elo SmartSet commands */ +#define ELO_FLUSH_SMARTSET_RESPONSES 0x02 /* Flush all pending smartset responses */ +#define ELO_SEND_SMARTSET_COMMAND 0x05 /* Send a smartset command */ +#define ELO_GET_SMARTSET_RESPONSE 0x06 /* Get a smartset response */ +#define ELO_DIAG 0x64 /* Diagnostics command */ +#define ELO_SMARTSET_PACKET_SIZE 8 + +struct elo_priv { + struct usb_device *usbdev; + struct delayed_work work; + unsigned char buffer[ELO_SMARTSET_PACKET_SIZE]; +}; + +static struct workqueue_struct *wq; +static bool use_fw_quirk = true; +module_param(use_fw_quirk, bool, S_IRUGO); +MODULE_PARM_DESC(use_fw_quirk, "Do periodic pokes for broken M firmwares (default = true)"); + +static void elo_input_configured(struct hid_device *hdev, + struct hid_input *hidinput) +{ + struct input_dev *input = hidinput->input; + + set_bit(BTN_TOUCH, input->keybit); + set_bit(ABS_PRESSURE, input->absbit); + input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0); +} + +static void elo_process_data(struct input_dev *input, const u8 *data, int size) +{ + int press; + + input_report_abs(input, ABS_X, (data[3] << 8) | data[2]); + input_report_abs(input, ABS_Y, (data[5] << 8) | data[4]); + + press = 0; + if (data[1] & 0x80) + press = (data[7] << 8) | data[6]; + input_report_abs(input, ABS_PRESSURE, press); + + if (data[1] & 0x03) { + input_report_key(input, BTN_TOUCH, 1); + input_sync(input); + } + + if (data[1] & 0x04) + input_report_key(input, BTN_TOUCH, 0); + + input_sync(input); +} + +static int elo_raw_event(struct hid_device *hdev, struct hid_report *report, + u8 *data, int size) +{ + struct hid_input *hidinput; + + if (!(hdev->claimed & HID_CLAIMED_INPUT) || list_empty(&hdev->inputs)) + return 0; + + hidinput = list_first_entry(&hdev->inputs, struct hid_input, list); + + switch (report->id) { + case 0: + if (data[0] == 'T') { /* Mandatory ELO packet marker */ + elo_process_data(hidinput->input, data, size); + return 1; + } + break; + default: /* unknown report */ + /* Unknown report type; pass upstream */ + hid_info(hdev, "unknown report type %d\n", report->id); + break; + } + + return 0; +} + +static int elo_smartset_send_get(struct usb_device *dev, u8 command, + void *data) +{ + unsigned int pipe; + u8 dir; + + if (command == ELO_SEND_SMARTSET_COMMAND) { + pipe = usb_sndctrlpipe(dev, 0); + dir = USB_DIR_OUT; + } else if (command == ELO_GET_SMARTSET_RESPONSE) { + pipe = usb_rcvctrlpipe(dev, 0); + dir = USB_DIR_IN; + } else + return -EINVAL; + + return usb_control_msg(dev, pipe, command, + dir | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, 0, data, ELO_SMARTSET_PACKET_SIZE, + ELO_SMARTSET_CMD_TIMEOUT); +} + +static int elo_flush_smartset_responses(struct usb_device *dev) +{ + return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + ELO_FLUSH_SMARTSET_RESPONSES, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); +} + +static void elo_work(struct work_struct *work) +{ + struct elo_priv *priv = container_of(work, struct elo_priv, work.work); + struct usb_device *dev = priv->usbdev; + unsigned char *buffer = priv->buffer; + int ret; + + ret = elo_flush_smartset_responses(dev); + if (ret < 0) { + dev_err(&dev->dev, "initial FLUSH_SMARTSET_RESPONSES failed, error %d\n", + ret); + goto fail; + } + + /* send Diagnostics command */ + *buffer = ELO_DIAG; + ret = elo_smartset_send_get(dev, ELO_SEND_SMARTSET_COMMAND, buffer); + if (ret < 0) { + dev_err(&dev->dev, "send Diagnostics Command failed, error %d\n", + ret); + goto fail; + } + + /* get the result */ + ret = elo_smartset_send_get(dev, ELO_GET_SMARTSET_RESPONSE, buffer); + if (ret < 0) { + dev_err(&dev->dev, "get Diagnostics Command response failed, error %d\n", + ret); + goto fail; + } + + /* read the ack */ + if (*buffer != 'A') { + ret = elo_smartset_send_get(dev, ELO_GET_SMARTSET_RESPONSE, + buffer); + if (ret < 0) { + dev_err(&dev->dev, "get acknowledge response failed, error %d\n", + ret); + goto fail; + } + } + +fail: + ret = elo_flush_smartset_responses(dev); + if (ret < 0) + dev_err(&dev->dev, "final FLUSH_SMARTSET_RESPONSES failed, error %d\n", + ret); + queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); +} + +/* + * Not all Elo devices need the periodic HID descriptor reads. + * Only firmware version M needs this. + */ +static bool elo_broken_firmware(struct usb_device *dev) +{ + return use_fw_quirk && le16_to_cpu(dev->descriptor.bcdDevice) == 0x10d; +} + +static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + struct elo_priv *priv; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + INIT_DELAYED_WORK(&priv->work, elo_work); + priv->usbdev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); + + hid_set_drvdata(hdev, priv); + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "parse failed\n"); + goto err_free; + } + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (ret) { + hid_err(hdev, "hw start failed\n"); + goto err_free; + } + + if (elo_broken_firmware(priv->usbdev)) { + hid_info(hdev, "broken firmware found, installing workaround\n"); + queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); + } + + return 0; +err_free: + kfree(priv); + return ret; +} + +static void elo_remove(struct hid_device *hdev) +{ + struct elo_priv *priv = hid_get_drvdata(hdev); + + hid_hw_stop(hdev); + flush_workqueue(wq); + kfree(priv); +} + +static const struct hid_device_id elo_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009), }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030), }, + { } +}; +MODULE_DEVICE_TABLE(hid, elo_devices); + +static struct hid_driver elo_driver = { + .name = "elo", + .id_table = elo_devices, + .probe = elo_probe, + .remove = elo_remove, + .raw_event = elo_raw_event, + .input_configured = elo_input_configured, +}; + +static int __init elo_driver_init(void) +{ + int ret; + + wq = create_singlethread_workqueue("elousb"); + if (!wq) + return -ENOMEM; + + ret = hid_register_driver(&elo_driver); + if (ret) + destroy_workqueue(wq); + + return ret; +} +module_init(elo_driver_init); + +static void __exit elo_driver_exit(void) +{ + hid_unregister_driver(&elo_driver); + destroy_workqueue(wq); +} +module_exit(elo_driver_exit); + +MODULE_AUTHOR("Jiri Slaby <jslaby@suse.cz>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c new file mode 100644 index 000000000000..7e6db3cf46f9 --- /dev/null +++ b/drivers/hid/hid-holtek-mouse.c @@ -0,0 +1,77 @@ +/* + * HID driver for Holtek gaming mice + * Copyright (c) 2013 Christian Ohm + * Heavily inspired by various other HID drivers that adjust the report + * descriptor. +*/ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/hid.h> +#include <linux/module.h> +#include <linux/usb.h> + +#include "hid-ids.h" + +/* + * The report descriptor of some Holtek based gaming mice specifies an + * excessively large number of consumer usages (2^15), which is more than + * HID_MAX_USAGES. This prevents proper parsing of the report descriptor. + * + * This driver fixes the report descriptor for: + * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000 + * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200 + * and Zalman ZM-GM1 + */ + +static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + + if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { + /* Change usage maximum and logical maximum from 0x7fff to + * 0x2fff, so they don't exceed HID_MAX_USAGES */ + switch (hdev->product) { + case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067: + if (*rsize >= 122 && rdesc[115] == 0xff && rdesc[116] == 0x7f + && rdesc[120] == 0xff && rdesc[121] == 0x7f) { + hid_info(hdev, "Fixing up report descriptor\n"); + rdesc[116] = rdesc[121] = 0x2f; + } + break; + case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A: + if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f + && rdesc[111] == 0xff && rdesc[112] == 0x7f) { + hid_info(hdev, "Fixing up report descriptor\n"); + rdesc[107] = rdesc[112] = 0x2f; + } + break; + } + + } + return rdesc; +} + +static const struct hid_device_id holtek_mouse_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, + USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, + USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, + { } +}; +MODULE_DEVICE_TABLE(hid, holtek_mouse_devices); + +static struct hid_driver holtek_mouse_driver = { + .name = "holtek_mouse", + .id_table = holtek_mouse_devices, + .report_fixup = holtek_mouse_report_fixup, +}; + +module_hid_driver(holtek_mouse_driver); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-huion.c b/drivers/hid/hid-huion.c new file mode 100644 index 000000000000..cbf4da4689ba --- /dev/null +++ b/drivers/hid/hid-huion.c @@ -0,0 +1,177 @@ +/* + * HID driver for Huion devices not fully compliant with HID standard + * + * Copyright (c) 2013 Martin Rusko + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/device.h> +#include <linux/hid.h> +#include <linux/module.h> +#include <linux/usb.h> +#include "usbhid/usbhid.h" + +#include "hid-ids.h" + +/* Original Huion 580 report descriptor size */ +#define HUION_580_RDESC_ORIG_SIZE 177 + +/* Fixed Huion 580 report descriptor */ +static __u8 huion_580_rdesc_fixed[] = { + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x09, 0x02, /* Usage (Pen), */ + 0xA1, 0x01, /* Collection (Application), */ + 0x85, 0x07, /* Report ID (7), */ + 0x09, 0x20, /* Usage (Stylus), */ + 0xA0, /* Collection (Physical), */ + 0x14, /* Logical Minimum (0), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x75, 0x01, /* Report Size (1), */ + 0x09, 0x42, /* Usage (Tip Switch), */ + 0x09, 0x44, /* Usage (Barrel Switch), */ + 0x09, 0x46, /* Usage (Tablet Pick), */ + 0x95, 0x03, /* Report Count (3), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x03, /* Report Count (3), */ + 0x81, 0x03, /* Input (Constant, Variable), */ + 0x09, 0x32, /* Usage (In Range), */ + 0x95, 0x01, /* Report Count (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x01, /* Report Count (1), */ + 0x81, 0x03, /* Input (Constant, Variable), */ + 0x75, 0x10, /* Report Size (16), */ + 0x95, 0x01, /* Report Count (1), */ + 0xA4, /* Push, */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x65, 0x13, /* Unit (Inch), */ + 0x55, 0xFD, /* Unit Exponent (-3), */ + 0x34, /* Physical Minimum (0), */ + 0x09, 0x30, /* Usage (X), */ + 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */ + 0x26, 0x00, 0x7D, /* Logical Maximum (32000), */ + 0x81, 0x02, /* Input (Variable), */ + 0x09, 0x31, /* Usage (Y), */ + 0x46, 0x88, 0x13, /* Physical Maximum (5000), */ + 0x26, 0x20, 0x4E, /* Logical Maximum (20000), */ + 0x81, 0x02, /* Input (Variable), */ + 0xB4, /* Pop, */ + 0x09, 0x30, /* Usage (Tip Pressure), */ + 0x26, 0xFF, 0x07, /* Logical Maximum (2047), */ + 0x81, 0x02, /* Input (Variable), */ + 0xC0, /* End Collection, */ + 0xC0 /* End Collection */ +}; + +static __u8 *huion_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + switch (hdev->product) { + case USB_DEVICE_ID_HUION_580: + if (*rsize == HUION_580_RDESC_ORIG_SIZE) { + rdesc = huion_580_rdesc_fixed; + *rsize = sizeof(huion_580_rdesc_fixed); + } + break; + } + return rdesc; +} + +/** + * Enable fully-functional tablet mode by reading special string + * descriptor. + * + * @hdev: HID device + * + * The specific string descriptor and data were discovered by sniffing + * the Windows driver traffic. + */ +static int huion_tablet_enable(struct hid_device *hdev) +{ + int rc; + char buf[22]; + + rc = usb_string(hid_to_usb_dev(hdev), 0x64, buf, sizeof(buf)); + if (rc < 0) + return rc; + + return 0; +} + +static int huion_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + int ret; + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + + /* Ignore interfaces 1 (mouse) and 2 (keyboard) for Huion 580 tablet, + * as they are not used + */ + switch (id->product) { + case USB_DEVICE_ID_HUION_580: + if (intf->cur_altsetting->desc.bInterfaceNumber != 0x00) + return -ENODEV; + break; + } + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "parse failed\n"); + goto err; + } + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (ret) { + hid_err(hdev, "hw start failed\n"); + goto err; + } + + switch (id->product) { + case USB_DEVICE_ID_HUION_580: + ret = huion_tablet_enable(hdev); + if (ret) { + hid_err(hdev, "tablet enabling failed\n"); + goto enabling_err; + } + break; + } + + return 0; +enabling_err: + hid_hw_stop(hdev); +err: + return ret; +} + +static int huion_raw_event(struct hid_device *hdev, struct hid_report *report, + u8 *data, int size) +{ + /* If this is a pen input report then invert the in-range bit */ + if (report->type == HID_INPUT_REPORT && report->id == 0x07 && size >= 2) + data[1] ^= 0x40; + + return 0; +} + +static const struct hid_device_id huion_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, + { } +}; +MODULE_DEVICE_TABLE(hid, huion_devices); + +static struct hid_driver huion_driver = { + .name = "huion", + .id_table = huion_devices, + .probe = huion_probe, + .report_fixup = huion_report_fixup, + .raw_event = huion_raw_event, +}; +module_hid_driver(huion_driver); + +MODULE_AUTHOR("Martin Rusko"); +MODULE_DESCRIPTION("Huion HID driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index aa3fec0d9dc6..713217380b44 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -199,13 +199,11 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, if (desc->bLength == 0) goto cleanup; - input_device->hid_desc = kzalloc(desc->bLength, GFP_ATOMIC); + input_device->hid_desc = kmemdup(desc, desc->bLength, GFP_ATOMIC); if (!input_device->hid_desc) goto cleanup; - memcpy(input_device->hid_desc, desc, desc->bLength); - input_device->report_desc_size = desc->desc[0].wDescriptorLength; if (input_device->report_desc_size == 0) { input_device->dev_info_status = -EINVAL; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 508c0072b630..c5aea29f164f 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -248,6 +248,9 @@ #define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81 #define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001 +#define USB_VENDOR_ID_DATA_MODUL 0x7374 +#define USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH 0x1201 + #define USB_VENDOR_ID_DEALEXTREAME 0x10c5 #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a @@ -272,16 +275,15 @@ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_725E 0x725e #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7262 0x7262 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72AA 0x72aa #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72AA 0x72aa +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4 0x72c4 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0 0x72d0 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224 0x7224 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0 0x72d0 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4 0x72c4 #define USB_VENDOR_ID_ELECOM 0x056e #define USB_DEVICE_ID_ELECOM_BM084 0x0061 @@ -425,6 +427,9 @@ #define USB_DEVICE_ID_UGCI_FLYING 0x0020 #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030 +#define USB_VENDOR_ID_HUION 0x256c +#define USB_DEVICE_ID_HUION_580 0x006e + #define USB_VENDOR_ID_IDEACOM 0x1cb6 #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 @@ -440,6 +445,8 @@ #define USB_VENDOR_ID_HOLTEK_ALT 0x04d9 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 +#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067 +#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a #define USB_VENDOR_ID_IMATION 0x0718 #define USB_DEVICE_ID_DISC_STAKKA 0xd000 @@ -447,6 +454,10 @@ #define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615 #define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070 +#define USB_VENDOR_ID_JABRA 0x0b0e +#define USB_DEVICE_ID_JABRA_SPEAK_410 0x0412 +#define USB_DEVICE_ID_JABRA_SPEAK_510 0x0420 + #define USB_VENDOR_ID_JESS 0x0c45 #define USB_DEVICE_ID_JESS_YUREX 0x1010 @@ -467,6 +478,7 @@ #define USB_VENDOR_ID_KYE 0x0458 #define USB_DEVICE_ID_KYE_ERGO_525V 0x0087 +#define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138 #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 945b8158ec4c..7480799e535c 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -354,10 +354,10 @@ static int hidinput_get_battery_property(struct power_supply *psy, dev->battery_report_type); if (ret != 2) { - if (ret >= 0) - ret = -EINVAL; + ret = -ENODATA; break; } + ret = 0; if (dev->battery_min < dev->battery_max && buf[1] >= dev->battery_min && @@ -1042,9 +1042,14 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct /* * Ignore out-of-range values as per HID specification, - * section 5.10 and 6.2.25 + * section 5.10 and 6.2.25. + * + * The logical_minimum < logical_maximum check is done so that we + * don't unintentionally discard values sent by devices which + * don't specify logical min and max. */ if ((field->flags & HID_MAIN_ITEM_VARIABLE) && + (field->logical_minimum < field->logical_maximum) && (value < field->logical_minimum || value > field->logical_maximum)) { dbg_hid("Ignoring out-of-range value %x\n", value); diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c index 6af90dbdc3d4..1e2ee2aa84a0 100644 --- a/drivers/hid/hid-kye.c +++ b/drivers/hid/hid-kye.c @@ -314,6 +314,25 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, *rsize = sizeof(easypen_m610x_rdesc_fixed); } break; + case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE: + /* + * the fixup that need to be done: + * - change Usage Maximum in the Comsumer Control + * (report ID 3) to a reasonable value + */ + if (*rsize >= 135 && + /* Usage Page (Consumer Devices) */ + rdesc[104] == 0x05 && rdesc[105] == 0x0c && + /* Usage (Consumer Control) */ + rdesc[106] == 0x09 && rdesc[107] == 0x01 && + /* Usage Maximum > 12287 */ + rdesc[114] == 0x2a && rdesc[116] > 0x2f) { + hid_info(hdev, + "fixing up Genius Gila Gaming Mouse " + "report descriptor\n"); + rdesc[116] = 0x2f; + } + break; } return rdesc; } @@ -407,6 +426,8 @@ static const struct hid_device_id kye_devices[] = { USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, + USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, { } }; MODULE_DEVICE_TABLE(hid, kye_devices); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index dc3ae5c56f56..cb0e361d7a4b 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -264,9 +264,12 @@ static struct mt_class mt_classes[] = { static void mt_free_input_name(struct hid_input *hi) { struct hid_device *hdev = hi->report->device; + const char *name = hi->input->name; - if (hi->input->name != hdev->name) - kfree(hi->input->name); + if (name != hdev->name) { + hi->input->name = hdev->name; + kfree(name); + } } static ssize_t mt_show_quirks(struct device *dev, @@ -1040,11 +1043,11 @@ static void mt_remove(struct hid_device *hdev) struct hid_input *hi; sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); - hid_hw_stop(hdev); - list_for_each_entry(hi, &hdev->inputs, list) mt_free_input_name(hi); + hid_hw_stop(hdev); + kfree(td); hid_set_drvdata(hdev, NULL); } @@ -1108,6 +1111,11 @@ static const struct hid_device_id mt_devices[] = { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) }, + /* Data Modul easyMaxTouch */ + { .driver_data = MT_CLS_DEFAULT, + MT_USB_DEVICE(USB_VENDOR_ID_DATA_MODUL, + USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH) }, + /* eGalax devices (resistive) */ { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, @@ -1117,34 +1125,40 @@ static const struct hid_device_id mt_devices[] = { USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) }, /* eGalax devices (capacitive) */ - { .driver_data = MT_CLS_EGALAX, - MT_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7207) }, - { .driver_data = MT_CLS_EGALAX_SERIAL, + { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_725E) }, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_722A) }, - { .driver_data = MT_CLS_EGALAX, + { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) }, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_725E) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7262) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) }, + { .driver_data = MT_CLS_EGALAX, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72AA) }, { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4) }, + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0) }, + { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) }, { .driver_data = MT_CLS_EGALAX, @@ -1159,15 +1173,6 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, - { .driver_data = MT_CLS_EGALAX, - HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224) }, - { .driver_data = MT_CLS_EGALAX, - HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0) }, - { .driver_data = MT_CLS_EGALAX, - HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4) }, /* Elo TouchSystems IntelliTouch Plus panel */ { .driver_data = MT_CLS_DUAL_CONTACT_ID, diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index b59b3df9ca95..65c4ccfcbd29 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -366,7 +366,7 @@ void roccat_disconnect(int minor) mutex_lock(&devices_lock); devices[minor] = NULL; mutex_unlock(&devices_lock); - + if (device->open) { hid_hw_close(device->hid); wake_up_interruptible(&device->wait); @@ -426,13 +426,23 @@ static int __init roccat_init(void) if (retval < 0) { pr_warn("can't get major number\n"); - return retval; + goto error; } cdev_init(&roccat_cdev, &roccat_ops); - cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES); + retval = cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES); + if (retval < 0) { + pr_warn("cannot add cdev\n"); + goto cleanup_alloc_chrdev_region; + } return 0; + + + cleanup_alloc_chrdev_region: + unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES); + error: + return retval; } static void __exit roccat_exit(void) diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 2b1799a3b212..879b0ed701a3 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -108,6 +108,7 @@ static const struct i2c_hid_cmd hid_reset_cmd = { I2C_HID_CMD(0x01), static const struct i2c_hid_cmd hid_get_report_cmd = { I2C_HID_CMD(0x02) }; static const struct i2c_hid_cmd hid_set_report_cmd = { I2C_HID_CMD(0x03) }; static const struct i2c_hid_cmd hid_set_power_cmd = { I2C_HID_CMD(0x08) }; +static const struct i2c_hid_cmd hid_no_cmd = { .length = 0 }; /* * These definitions are not used here, but are defined by the spec. @@ -259,8 +260,11 @@ static int i2c_hid_set_report(struct i2c_client *client, u8 reportType, { struct i2c_hid *ihid = i2c_get_clientdata(client); u8 *args = ihid->argsbuf; + const struct i2c_hid_cmd * hidcmd = &hid_set_report_cmd; int ret; u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister); + u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister); + u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength); /* hidraw already checked that data_len < HID_MAX_BUFFER_SIZE */ u16 size = 2 /* size */ + @@ -278,8 +282,18 @@ static int i2c_hid_set_report(struct i2c_client *client, u8 reportType, reportID = 0x0F; } - args[index++] = dataRegister & 0xFF; - args[index++] = dataRegister >> 8; + /* + * use the data register for feature reports or if the device does not + * support the output register + */ + if (reportType == 0x03 || maxOutputLength == 0) { + args[index++] = dataRegister & 0xFF; + args[index++] = dataRegister >> 8; + } else { + args[index++] = outputRegister & 0xFF; + args[index++] = outputRegister >> 8; + hidcmd = &hid_no_cmd; + } args[index++] = size & 0xFF; args[index++] = size >> 8; @@ -289,7 +303,7 @@ static int i2c_hid_set_report(struct i2c_client *client, u8 reportType, memcpy(&args[index], buf, data_len); - ret = __i2c_hid_command(client, &hid_set_report_cmd, reportID, + ret = __i2c_hid_command(client, hidcmd, reportID, reportType, args, args_len, NULL, 0); if (ret) { dev_err(&client->dev, "failed to set a report to device.\n"); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index bad8128b283a..21ef68934a20 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -329,7 +329,7 @@ static u32 get_vp_index(uuid_le *type_guid) return 0; } cur_cpu = (++next_vp % max_cpus); - return cur_cpu; + return hv_context.vp_index[cur_cpu]; } /* diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c index df0b69987914..2ebd6ce46108 100644 --- a/drivers/hwmon/abituguru.c +++ b/drivers/hwmon/abituguru.c @@ -1414,14 +1414,18 @@ static int abituguru_probe(struct platform_device *pdev) pr_info("found Abit uGuru\n"); /* Register sysfs hooks */ - for (i = 0; i < sysfs_attr_i; i++) - if (device_create_file(&pdev->dev, - &data->sysfs_attr[i].dev_attr)) + for (i = 0; i < sysfs_attr_i; i++) { + res = device_create_file(&pdev->dev, + &data->sysfs_attr[i].dev_attr); + if (res) goto abituguru_probe_error; - for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) - if (device_create_file(&pdev->dev, - &abituguru_sysfs_attr[i].dev_attr)) + } + for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) { + res = device_create_file(&pdev->dev, + &abituguru_sysfs_attr[i].dev_attr); + if (res) goto abituguru_probe_error; + } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (!IS_ERR(data->hwmon_dev)) diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index 7e76922a4ba9..f920619cd6da 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -331,26 +331,68 @@ static int adm1021_detect(struct i2c_client *client, man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID); dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID); + if (man_id < 0 || dev_id < 0) + return -ENODEV; + if (man_id == 0x4d && dev_id == 0x01) type_name = "max1617a"; else if (man_id == 0x41) { if ((dev_id & 0xF0) == 0x30) type_name = "adm1023"; - else + else if ((dev_id & 0xF0) == 0x00) type_name = "adm1021"; + else + return -ENODEV; } else if (man_id == 0x49) type_name = "thmc10"; else if (man_id == 0x23) type_name = "gl523sm"; else if (man_id == 0x54) type_name = "mc1066"; - /* LM84 Mfr ID in a different place, and it has more unused bits */ - else if (conv_rate == 0x00 - && (config & 0x7F) == 0x00 - && (status & 0xAB) == 0x00) - type_name = "lm84"; - else - type_name = "max1617"; + else { + int lte, rte, lhi, rhi, llo, rlo; + + /* extra checks for LM84 and MAX1617 to avoid misdetections */ + + llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0)); + rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1)); + + /* fail if any of the additional register reads failed */ + if (llo < 0 || rlo < 0) + return -ENODEV; + + lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0)); + rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1)); + lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0)); + rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1)); + + /* + * Fail for negative temperatures and negative high limits. + * This check also catches read errors on the tested registers. + */ + if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0) + return -ENODEV; + + /* fail if all registers hold the same value */ + if (lte == rte && lte == lhi && lte == rhi && lte == llo + && lte == rlo) + return -ENODEV; + + /* + * LM84 Mfr ID is in a different place, + * and it has more unused bits. + */ + if (conv_rate == 0x00 + && (config & 0x7F) == 0x00 + && (status & 0xAB) == 0x00) { + type_name = "lm84"; + } else { + /* fail if low limits are larger than high limits */ + if ((s8)llo > lhi || (s8)rlo > rhi) + return -ENODEV; + type_name = "max1617"; + } + } pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n", type_name, i2c_adapter_id(adapter), client->addr); diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c index aafa4531b961..52b77afebde1 100644 --- a/drivers/hwmon/iio_hwmon.c +++ b/drivers/hwmon/iio_hwmon.c @@ -84,8 +84,10 @@ static int iio_hwmon_probe(struct platform_device *pdev) return PTR_ERR(channels); st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); - if (st == NULL) - return -ENOMEM; + if (st == NULL) { + ret = -ENOMEM; + goto error_release_channels; + } st->channels = channels; @@ -159,7 +161,7 @@ static int iio_hwmon_probe(struct platform_device *pdev) error_remove_group: sysfs_remove_group(&dev->kobj, &st->attr_group); error_release_channels: - iio_channel_release_all(st->channels); + iio_channel_release_all(channels); return ret; } diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index f43f5e571db9..04638aee9039 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -3705,8 +3705,10 @@ static int nct6775_probe(struct platform_device *pdev) data->have_temp |= 1 << i; data->have_temp_fixed |= 1 << i; data->reg_temp[0][i] = reg_temp_alternate[i]; - data->reg_temp[1][i] = reg_temp_over[i]; - data->reg_temp[2][i] = reg_temp_hyst[i]; + if (i < num_reg_temp) { + data->reg_temp[1][i] = reg_temp_over[i]; + data->reg_temp[2][i] = reg_temp_hyst[i]; + } data->temp_src[i] = i + 1; continue; } diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index a478454f690f..dfe6d9527efb 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -240,7 +240,7 @@ static struct tmp401_data *tmp401_update_device(struct device *dev) mutex_lock(&data->update_lock); next_update = data->last_updated + - msecs_to_jiffies(data->update_interval) + 1; + msecs_to_jiffies(data->update_interval); if (time_after(jiffies, next_update) || !data->valid) { if (data->kind != tmp432) { /* diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 21fbb340ad66..c41ca6354fc5 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -383,7 +383,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) /* Enable the adapter */ __i2c_dw_enable(dev, true); - /* Enable interrupts */ + /* Clear and enable interrupts */ + i2c_dw_clear_int(dev); dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK); } @@ -448,8 +449,14 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) cmd |= BIT(9); if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { + + /* avoid rx buffer overrun */ + if (rx_limit - dev->rx_outstanding <= 0) + break; + dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); rx_limit--; + dev->rx_outstanding++; } else dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD); tx_limit--; buf_len--; @@ -502,8 +509,10 @@ i2c_dw_read(struct dw_i2c_dev *dev) rx_valid = dw_readl(dev, DW_IC_RXFLR); - for (; len > 0 && rx_valid > 0; len--, rx_valid--) + for (; len > 0 && rx_valid > 0; len--, rx_valid--) { *buf++ = dw_readl(dev, DW_IC_DATA_CMD); + dev->rx_outstanding--; + } if (len > 0) { dev->status |= STATUS_READ_IN_PROGRESS; @@ -561,6 +570,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) dev->msg_err = 0; dev->status = STATUS_IDLE; dev->abort_source = 0; + dev->rx_outstanding = 0; ret = i2c_dw_wait_bus_not_busy(dev); if (ret < 0) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 9c1840ee09c7..e761ad18dd61 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -60,6 +60,7 @@ * @adapter: i2c subsystem adapter node * @tx_fifo_depth: depth of the hardware tx fifo * @rx_fifo_depth: depth of the hardware rx fifo + * @rx_outstanding: current master-rx elements in tx fifo */ struct dw_i2c_dev { struct device *dev; @@ -88,6 +89,7 @@ struct dw_i2c_dev { u32 master_cfg; unsigned int tx_fifo_depth; unsigned int rx_fifo_depth; + int rx_outstanding; }; #define ACCESS_SWAP 0x00000001 diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 8ec91335d95a..35b70a1edf57 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -69,6 +69,7 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev) static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT33C2", 0 }, { "INT33C3", 0 }, + { "80860F41", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index e1cf2e0e1f23..3a6903f63913 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -231,7 +231,11 @@ static const char *i801_feature_names[] = { static unsigned int disable_features; module_param(disable_features, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(disable_features, "Disable selected driver features"); +MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n" + "\t\t 0x01 disable SMBus PEC\n" + "\t\t 0x02 disable the block buffer\n" + "\t\t 0x08 disable the I2C block read functionality\n" + "\t\t 0x10 don't use interrupts "); /* Make sure the SMBus host is ready to start transmitting. Return 0 if it is, -EBUSY if it is not. */ diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 3bbd65d35a5e..1a3abd6a0bfc 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -252,7 +252,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) writel(drv_data->cntl_bits, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; - wake_up_interruptible(&drv_data->waitq); + wake_up(&drv_data->waitq); break; case MV64XXX_I2C_ACTION_CONTINUE: @@ -300,7 +300,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; - wake_up_interruptible(&drv_data->waitq); + wake_up(&drv_data->waitq); break; case MV64XXX_I2C_ACTION_INVALID: @@ -315,7 +315,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; - wake_up_interruptible(&drv_data->waitq); + wake_up(&drv_data->waitq); break; } } @@ -381,7 +381,7 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data) unsigned long flags; char abort = 0; - time_left = wait_event_interruptible_timeout(drv_data->waitq, + time_left = wait_event_timeout(drv_data->waitq, !drv_data->block, drv_data->adapter.timeout); spin_lock_irqsave(&drv_data->lock, flags); diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 6e8ee92ab553..cab1c91b75a3 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1082,11 +1082,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) /* map the registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "cannot find IO resource\n"); - return -ENOENT; - } - i2c->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(i2c->regs)) diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index 5a7ad240bd26..a63c7d506836 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c @@ -303,12 +303,6 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) adap->class = I2C_CLASS_HWMON; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (mem_res == NULL) { - dev_err(&pdev->dev, "Unable to get MEM resource\n"); - err = -EINVAL; - goto out; - } - siic->base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(siic->base)) { err = PTR_ERR(siic->base); diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index b60ff90adc39..9aa1b60f7fdd 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -714,11 +714,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) int ret = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "no mem resource\n"); - return -EINVAL; - } - base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 6b63cc7eb71e..48e31ed69dbf 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -892,7 +892,8 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device); -static DEVICE_ATTR(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device); +static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL, + i2c_sysfs_delete_device); static struct attribute *i2c_adapter_attrs[] = { &dev_attr_name.attr, diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 0e8fab1913df..fa6964d8681a 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -273,6 +273,27 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = { .target_residency = 500, .enter = &intel_idle }, { + .name = "C8-HSW", + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, + .target_residency = 900, + .enter = &intel_idle }, + { + .name = "C9-HSW", + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, + .target_residency = 1800, + .enter = &intel_idle }, + { + .name = "C10-HSW", + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, + .target_residency = 7700, + .enter = &intel_idle }, + { .enter = NULL } }; diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c index 9f3a8ef1fb3e..b3d03d335948 100644 --- a/drivers/iio/adc/exynos_adc.c +++ b/drivers/iio/adc/exynos_adc.c @@ -390,8 +390,8 @@ static int exynos_adc_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int exynos_adc_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct exynos_adc *info = platform_get_drvdata(pdev); + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct exynos_adc *info = iio_priv(indio_dev); u32 con; if (info->version == ADC_V2) { @@ -413,8 +413,8 @@ static int exynos_adc_suspend(struct device *dev) static int exynos_adc_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct exynos_adc *info = platform_get_drvdata(pdev); + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct exynos_adc *info = iio_priv(indio_dev); int ret; ret = regulator_enable(info->vdd); diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c index 9201022945e9..9d19ba74f22b 100644 --- a/drivers/iio/buffer_cb.c +++ b/drivers/iio/buffer_cb.c @@ -64,7 +64,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, while (chan->indio_dev) { if (chan->indio_dev != indio_dev) { ret = -EINVAL; - goto error_release_channels; + goto error_free_scan_mask; } set_bit(chan->channel->scan_index, cb_buff->buffer.scan_mask); @@ -73,6 +73,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, return cb_buff; +error_free_scan_mask: + kfree(cb_buff->buffer.scan_mask); error_release_channels: iio_channel_release_all(cb_buff->channels); error_free_cb_buff: @@ -100,6 +102,7 @@ EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb); void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff) { + kfree(cb_buff->buffer.scan_mask); iio_channel_release_all(cb_buff->channels); kfree(cb_buff); } diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index bd33473f8e38..ed9bc8ae9330 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -312,6 +312,8 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, goto read_error; *val = *val >> ch->scan_type.shift; + + err = st_sensors_set_enable(indio_dev, false); } mutex_unlock(&indio_dev->mlock); diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index f4a6f0838327..b61160bd935e 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -5,7 +5,7 @@ menu "Digital to analog converters" config AD5064 tristate "Analog Devices AD5064 and similar multi-channel DAC driver" - depends on (SPI_MASTER || I2C) + depends on (SPI_MASTER && I2C!=m) || I2C help Say yes here to build support for Analog Devices AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668, @@ -27,7 +27,7 @@ config AD5360 config AD5380 tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver" - depends on (SPI_MASTER || I2C) + depends on (SPI_MASTER && I2C!=m) || I2C select REGMAP_I2C if I2C select REGMAP_SPI if SPI_MASTER help @@ -57,7 +57,7 @@ config AD5624R_SPI config AD5446 tristate "Analog Devices AD5446 and similar single channel DACs driver" - depends on (SPI_MASTER || I2C) + depends on (SPI_MASTER && I2C!=m) || I2C help Say yes here to build support for Analog Devices AD5300, AD5301, AD5310, AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453, diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c index a884252ac66b..e76d4ace53ff 100644 --- a/drivers/iio/frequency/adf4350.c +++ b/drivers/iio/frequency/adf4350.c @@ -212,7 +212,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) (pdata->r2_user_settings & (ADF4350_REG2_PD_POLARITY_POS | ADF4350_REG2_LDP_6ns | ADF4350_REG2_LDF_INT_N | ADF4350_REG2_CHARGE_PUMP_CURR_uA(5000) | - ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x9))); + ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x3))); st->regs[ADF4350_REG3] = pdata->r3_user_settings & (ADF4350_REG3_12BIT_CLKDIV(0xFFF) | diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 795d100b4c36..98ddc323add0 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -124,7 +124,7 @@ static int __of_iio_channel_get(struct iio_channel *channel, channel->indio_dev = indio_dev; index = iiospec.args_count ? iiospec.args[0] : 0; if (index >= indio_dev->num_channels) { - return -EINVAL; + err = -EINVAL; goto err_put; } channel->channel = &indio_dev->channels[index]; @@ -450,7 +450,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, s64 raw64 = raw; int ret; - ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE); + ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET); if (ret == 0) raw64 += offset; diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 81c7b73695d2..3b9afccaaade 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -61,7 +61,7 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) if (dma_region) { struct qib_mregion *tmr; - tmr = rcu_dereference(dev->dma_mr); + tmr = rcu_access_pointer(dev->dma_mr); if (!tmr) { qib_get_mr(mr); rcu_assign_pointer(dev->dma_mr, mr); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index f19b0998a53c..2e84ef859c5b 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -5,6 +5,7 @@ * Copyright (C) 2004 Alex Aizman * Copyright (C) 2005 Mike Christie * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * maintained by openib-general@openib.org * * This software is available to you under a choice of one of two diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 06f578cde75b..4f069c0d4c04 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -8,6 +8,7 @@ * * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. + * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index a00ccd1ca333..b6d81a86c976 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 68ebb7fe072a..7827baf455a1 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 5278916c3103..2c4941d0656b 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. + * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -292,10 +293,10 @@ out_err: } /** - * releases the FMR pool, QP and CMA ID objects, returns 0 on success, + * releases the FMR pool and QP objects, returns 0 on success, * -1 on failure */ -static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) +static int iser_free_ib_conn_res(struct iser_conn *ib_conn) { int cq_index; BUG_ON(ib_conn == NULL); @@ -314,13 +315,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) rdma_destroy_qp(ib_conn->cma_id); } - /* if cma handler context, the caller acts s.t the cma destroy the id */ - if (ib_conn->cma_id != NULL && can_destroy_id) - rdma_destroy_id(ib_conn->cma_id); ib_conn->fmr_pool = NULL; ib_conn->qp = NULL; - ib_conn->cma_id = NULL; kfree(ib_conn->page_vec); if (ib_conn->login_buf) { @@ -415,11 +412,16 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) list_del(&ib_conn->conn_list); mutex_unlock(&ig.connlist_mutex); iser_free_rx_descriptors(ib_conn); - iser_free_ib_conn_res(ib_conn, can_destroy_id); + iser_free_ib_conn_res(ib_conn); ib_conn->device = NULL; /* on EVENT_ADDR_ERROR there's no device yet for this conn */ if (device != NULL) iser_device_try_release(device); + /* if cma handler context, the caller actually destroy the id */ + if (ib_conn->cma_id != NULL && can_destroy_id) { + rdma_destroy_id(ib_conn->cma_id); + ib_conn->cma_id = NULL; + } iscsi_destroy_endpoint(ib_conn->ep); } diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index b08ca7a9f76b..3f3f0416fbdd 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2227,6 +2227,27 @@ static void srpt_close_ch(struct srpt_rdma_ch *ch) } /** + * srpt_shutdown_session() - Whether or not a session may be shut down. + */ +static int srpt_shutdown_session(struct se_session *se_sess) +{ + struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr; + unsigned long flags; + + spin_lock_irqsave(&ch->spinlock, flags); + if (ch->in_shutdown) { + spin_unlock_irqrestore(&ch->spinlock, flags); + return true; + } + + ch->in_shutdown = true; + target_sess_cmd_list_set_waiting(se_sess); + spin_unlock_irqrestore(&ch->spinlock, flags); + + return true; +} + +/** * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. * @cm_id: Pointer to the CM ID of the channel to be drained. * @@ -2264,6 +2285,9 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id) spin_unlock_irq(&sdev->spinlock); if (do_reset) { + if (ch->sess) + srpt_shutdown_session(ch->sess); + ret = srpt_ch_qp_err(ch); if (ret < 0) printk(KERN_ERR "Setting queue pair in error state" @@ -2328,7 +2352,7 @@ static void srpt_release_channel_work(struct work_struct *w) se_sess = ch->sess; BUG_ON(!se_sess); - target_wait_for_sess_cmds(se_sess, 0); + target_wait_for_sess_cmds(se_sess); transport_deregister_session_configfs(se_sess); transport_deregister_session(se_sess); @@ -3467,14 +3491,6 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) } /** - * srpt_shutdown_session() - Whether or not a session may be shut down. - */ -static int srpt_shutdown_session(struct se_session *se_sess) -{ - return true; -} - -/** * srpt_close_session() - Forcibly close a session. * * Callback function invoked by the TCM core to clean up sessions associated diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index 4caf55cda7b1..3dae156905de 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -325,6 +325,7 @@ struct srpt_rdma_ch { u8 sess_name[36]; struct work_struct release_work; struct completion *release_done; + bool in_shutdown; }; /** diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 2f78538e09d0..b2420ae19e14 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -1379,6 +1379,7 @@ static int synaptics_reconnect(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; struct synaptics_data old_priv = *priv; + unsigned char param[2]; int retry = 0; int error; @@ -1394,6 +1395,7 @@ static int synaptics_reconnect(struct psmouse *psmouse) */ ssleep(1); } + ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETID); error = synaptics_detect(psmouse, 0); } while (error && ++retry < 3); diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 0bfd8cf25200..518282da6d85 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -342,10 +342,10 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) | ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12); - switch (wacom->id[idx] & 0xfffff) { + switch (wacom->id[idx]) { case 0x812: /* Inking pen */ case 0x801: /* Intuos3 Inking pen */ - case 0x20802: /* Intuos4 Inking Pen */ + case 0x120802: /* Intuos4/5 Inking Pen */ case 0x012: wacom->tool[idx] = BTN_TOOL_PENCIL; break; @@ -356,11 +356,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x823: /* Intuos3 Grip Pen */ case 0x813: /* Intuos3 Classic Pen */ case 0x885: /* Intuos3 Marker Pen */ - case 0x802: /* Intuos4 General Pen */ - case 0x804: /* Intuos4 Marker Pen */ - case 0x40802: /* Intuos4 Classic Pen */ - case 0x18802: /* DTH2242 Grip Pen */ + case 0x802: /* Intuos4/5 13HD/24HD General Pen */ + case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */ case 0x022: + case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */ + case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ + case 0x160802: /* Cintiq 13HD Pro Pen */ + case 0x180802: /* DTH2242 Pen */ wacom->tool[idx] = BTN_TOOL_PEN; break; @@ -391,10 +393,14 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x82b: /* Intuos3 Grip Pen Eraser */ case 0x81b: /* Intuos3 Classic Pen Eraser */ case 0x91b: /* Intuos3 Airbrush Eraser */ - case 0x80c: /* Intuos4 Marker Pen Eraser */ - case 0x80a: /* Intuos4 General Pen Eraser */ - case 0x4080a: /* Intuos4 Classic Pen Eraser */ - case 0x90a: /* Intuos4 Airbrush Eraser */ + case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */ + case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */ + case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */ + case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */ + case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */ + case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ + case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ + case 0x18080a: /* DTH2242 Eraser */ wacom->tool[idx] = BTN_TOOL_RUBBER; break; @@ -402,7 +408,8 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x912: case 0x112: case 0x913: /* Intuos3 Airbrush */ - case 0x902: /* Intuos4 Airbrush */ + case 0x902: /* Intuos4/5 13HD/24HD Airbrush */ + case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */ wacom->tool[idx] = BTN_TOOL_AIRBRUSH; break; @@ -533,10 +540,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_key(input, BTN_8, (data[3] & 0x80)); } if (data[1] | (data[2] & 0x01) | data[3]) { - input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { - input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else if (features->type == DTK) { @@ -546,6 +551,26 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_key(input, BTN_3, (data[6] & 0x08)); input_report_key(input, BTN_4, (data[6] & 0x10)); input_report_key(input, BTN_5, (data[6] & 0x20)); + if (data[6] & 0x3f) { + input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); + } else { + input_report_abs(input, ABS_MISC, 0); + } + } else if (features->type == WACOM_13HD) { + input_report_key(input, BTN_0, (data[3] & 0x01)); + input_report_key(input, BTN_1, (data[4] & 0x01)); + input_report_key(input, BTN_2, (data[4] & 0x02)); + input_report_key(input, BTN_3, (data[4] & 0x04)); + input_report_key(input, BTN_4, (data[4] & 0x08)); + input_report_key(input, BTN_5, (data[4] & 0x10)); + input_report_key(input, BTN_6, (data[4] & 0x20)); + input_report_key(input, BTN_7, (data[4] & 0x40)); + input_report_key(input, BTN_8, (data[4] & 0x80)); + if ((data[3] & 0x01) | data[4]) { + input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); + } else { + input_report_abs(input, ABS_MISC, 0); + } } else if (features->type == WACOM_24HD) { input_report_key(input, BTN_0, (data[6] & 0x01)); input_report_key(input, BTN_1, (data[6] & 0x02)); @@ -590,10 +615,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) } if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) { - input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { - input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) { @@ -618,10 +641,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) } if (data[2] | (data[3] & 0x01) | data[4] | data[5]) { - input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { - input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else { @@ -668,10 +689,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) | data[2] | (data[3] & 0x1f) | data[4] | data[8] | (data[7] & 0x01)) { - input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { - input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } @@ -1301,6 +1320,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) case INTUOS4L: case CINTIQ: case WACOM_BEE: + case WACOM_13HD: case WACOM_21UX2: case WACOM_22HD: case WACOM_24HD: @@ -1530,15 +1550,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(KEY_PROG1, input_dev->keybit); __set_bit(KEY_PROG2, input_dev->keybit); __set_bit(KEY_PROG3, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); /* fall through */ case DTK: for (i = 0; i < 6; i++) __set_bit(BTN_0 + i, input_dev->keybit); - input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); - input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); - __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); wacom_setup_cintiq(wacom_wac); @@ -1579,6 +1599,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, wacom_setup_cintiq(wacom_wac); break; + case WACOM_13HD: + for (i = 0; i < 9; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + wacom_setup_cintiq(wacom_wac); + break; + case INTUOS3: case INTUOS3L: __set_bit(BTN_4, input_dev->keybit); @@ -1937,7 +1966,8 @@ static const struct wacom_features wacom_features_0xF4 = 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xF8 = { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, /* Pen */ - 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 }; + 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 }; static const struct wacom_features wacom_features_0xF6 = { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 }; @@ -1950,6 +1980,9 @@ static const struct wacom_features wacom_features_0xC5 = static const struct wacom_features wacom_features_0xC6 = { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023, 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0x304 = + { "Wacom Cintiq 13HD", WACOM_PKGLEN_INTUOS, 59552, 33848, 1023, + 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xC7 = { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511, 0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -1959,6 +1992,9 @@ static const struct wacom_features wacom_features_0xCE = static const struct wacom_features wacom_features_0xF0 = { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511, 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x57 = + { "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, + 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES}; static const struct wacom_features wacom_features_0x59 = /* Pen */ { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, @@ -1972,6 +2008,13 @@ static const struct wacom_features wacom_features_0xCC = static const struct wacom_features wacom_features_0xFA = { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0x5B = + { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, + 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e }; +static const struct wacom_features wacom_features_0x5E = + { "Wacom Cintiq 22HDT", .type = WACOM_24HDT, + .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 }; static const struct wacom_features wacom_features_0x90 = { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2001,7 +2044,7 @@ static const struct wacom_features wacom_features_0xE5 = static const struct wacom_features wacom_features_0xE6 = { "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255, 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + .touch_max = 2 }; static const struct wacom_features wacom_features_0xEC = { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2143,8 +2186,11 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x43) }, { USB_DEVICE_WACOM(0x44) }, { USB_DEVICE_WACOM(0x45) }, + { USB_DEVICE_WACOM(0x57) }, { USB_DEVICE_WACOM(0x59) }, { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) }, + { USB_DEVICE_WACOM(0x5B) }, + { USB_DEVICE_DETAILED(0x5E, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0xB0) }, { USB_DEVICE_WACOM(0xB1) }, { USB_DEVICE_WACOM(0xB2) }, @@ -2205,6 +2251,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x100) }, { USB_DEVICE_WACOM(0x101) }, { USB_DEVICE_WACOM(0x10D) }, + { USB_DEVICE_WACOM(0x304) }, { USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h index 5f9a7721e16c..dfc9e08e7f70 100644 --- a/drivers/input/tablet/wacom_wac.h +++ b/drivers/input/tablet/wacom_wac.h @@ -82,6 +82,7 @@ enum { WACOM_24HD, CINTIQ, WACOM_BEE, + WACOM_13HD, WACOM_MO, WIRELESS, BAMBOO_PT, diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c index 17c9097f3b5d..39f3df8670c3 100644 --- a/drivers/input/touchscreen/egalax_ts.c +++ b/drivers/input/touchscreen/egalax_ts.c @@ -216,7 +216,7 @@ static int egalax_ts_probe(struct i2c_client *client, input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0); input_set_abs_params(input_dev, - ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0); + ABS_MT_POSITION_Y, 0, EGALAX_MAX_Y, 0, 0); input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0); input_set_drvdata(input_dev, ts); diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index 29889bbdcc6d..63b3d4eb0ef7 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -76,16 +76,10 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) { u32 irqnr; - do { - irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); - if (irqnr != 0x7f) { - __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); - irqnr = irq_find_mapping(icoll_domain, irqnr); - handle_IRQ(irqnr, regs); - continue; - } - break; - } while (1); + irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); + __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); + irqnr = irq_find_mapping(icoll_domain, irqnr); + handle_IRQ(irqnr, regs); } static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 065b7a31a478..47a52ab580d8 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -119,7 +119,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq, /* Skip invalid IRQs, only register handlers for the real ones */ if (!(f->valid & BIT(hwirq))) - return -ENOTSUPP; + return -EPERM; irq_set_chip_data(irq, f); irq_set_chip_and_handler(irq, &f->chip, handle_level_irq); diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 884d11c7355f..2bbb00404cf5 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -197,7 +197,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq, /* Skip invalid IRQs, only register handlers for the real ones */ if (!(v->valid_sources & (1 << hwirq))) - return -ENOTSUPP; + return -EPERM; irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); irq_set_chip_data(irq, v->base); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 9b1b274c7d25..c123709acf82 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -93,7 +93,7 @@ capi_ctr_put(struct capi_ctr *ctr) static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr) { - if (contr - 1 >= CAPI_MAXCONTR) + if (contr < 1 || contr - 1 >= CAPI_MAXCONTR) return NULL; return capi_controller[contr - 1]; @@ -103,7 +103,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid) { lockdep_assert_held(&capi_controller_lock); - if (applid - 1 >= CAPI_MAXAPPL) + if (applid < 1 || applid - 1 >= CAPI_MAXAPPL) return NULL; return capi_applications[applid - 1]; @@ -111,7 +111,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid) static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid) { - if (applid - 1 >= CAPI_MAXAPPL) + if (applid < 1 || applid - 1 >= CAPI_MAXAPPL) return NULL; return rcu_dereference(capi_applications[applid - 1]); diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index a0d931bcb37c..b02b679abf31 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -107,6 +107,10 @@ static int create_gpio_led(const struct gpio_led *template, return 0; } + ret = devm_gpio_request(parent, template->gpio, template->name); + if (ret < 0) + return ret; + led_dat->cdev.name = template->name; led_dat->cdev.default_trigger = template->default_trigger; led_dat->gpio = template->gpio; @@ -126,10 +130,7 @@ static int create_gpio_led(const struct gpio_led *template, if (!template->retain_state_suspended) led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; - ret = devm_gpio_request_one(parent, template->gpio, - (led_dat->active_low ^ state) ? - GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, - template->name); + ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state); if (ret < 0) return ret; diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c index ee14662ed5ce..98cae529373f 100644 --- a/drivers/leds/leds-ot200.c +++ b/drivers/leds/leds-ot200.c @@ -47,37 +47,37 @@ static struct ot200_led leds[] = { { .name = "led_1", .port = 0x49, - .mask = BIT(7), + .mask = BIT(6), }, { .name = "led_2", .port = 0x49, - .mask = BIT(6), + .mask = BIT(5), }, { .name = "led_3", .port = 0x49, - .mask = BIT(5), + .mask = BIT(4), }, { .name = "led_4", .port = 0x49, - .mask = BIT(4), + .mask = BIT(3), }, { .name = "led_5", .port = 0x49, - .mask = BIT(3), + .mask = BIT(2), }, { .name = "led_6", .port = 0x49, - .mask = BIT(2), + .mask = BIT(1), }, { .name = "led_7", .port = 0x49, - .mask = BIT(1), + .mask = BIT(0), } }; diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 699187ab3800..5b9ac32801c7 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -1002,6 +1002,7 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) kill_guest(&lg->cpus[0], "Cannot populate switcher mapping"); } + lg->pgdirs[pgdir].last_host_cpu = -1; } } diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index 05c220d05e23..f950c9d29f3e 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -1,7 +1,6 @@ config BCACHE tristate "Block device as cache" - select CLOSURES ---help--- Allows a block device to be used as cache for other devices; uses a btree for indexing and the layout is optimized for SSDs. diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 340146d7c17f..d3e15b42a4ab 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -1241,7 +1241,7 @@ void bch_cache_set_stop(struct cache_set *); struct cache_set *bch_cache_set_alloc(struct cache_sb *); void bch_btree_cache_free(struct cache_set *); int bch_btree_cache_alloc(struct cache_set *); -void bch_writeback_init_cached_dev(struct cached_dev *); +void bch_cached_dev_writeback_init(struct cached_dev *); void bch_moving_init_cache_set(struct cache_set *); void bch_cache_allocator_exit(struct cache *ca); diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index 64e679449c2a..b8730e714d69 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -93,24 +93,6 @@ static struct attribute *bch_stats_files[] = { }; static KTYPE(bch_stats); -static void scale_accounting(unsigned long data); - -void bch_cache_accounting_init(struct cache_accounting *acc, - struct closure *parent) -{ - kobject_init(&acc->total.kobj, &bch_stats_ktype); - kobject_init(&acc->five_minute.kobj, &bch_stats_ktype); - kobject_init(&acc->hour.kobj, &bch_stats_ktype); - kobject_init(&acc->day.kobj, &bch_stats_ktype); - - closure_init(&acc->cl, parent); - init_timer(&acc->timer); - acc->timer.expires = jiffies + accounting_delay; - acc->timer.data = (unsigned long) acc; - acc->timer.function = scale_accounting; - add_timer(&acc->timer); -} - int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, struct kobject *parent) { @@ -244,3 +226,19 @@ void bch_mark_sectors_bypassed(struct search *s, int sectors) atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); } + +void bch_cache_accounting_init(struct cache_accounting *acc, + struct closure *parent) +{ + kobject_init(&acc->total.kobj, &bch_stats_ktype); + kobject_init(&acc->five_minute.kobj, &bch_stats_ktype); + kobject_init(&acc->hour.kobj, &bch_stats_ktype); + kobject_init(&acc->day.kobj, &bch_stats_ktype); + + closure_init(&acc->cl, parent); + init_timer(&acc->timer); + acc->timer.expires = jiffies + accounting_delay; + acc->timer.data = (unsigned long) acc; + acc->timer.function = scale_accounting; + add_timer(&acc->timer); +} diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index c8046bc4aa57..f88e2b653a3f 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -634,11 +634,10 @@ static int open_dev(struct block_device *b, fmode_t mode) return 0; } -static int release_dev(struct gendisk *b, fmode_t mode) +static void release_dev(struct gendisk *b, fmode_t mode) { struct bcache_device *d = b->private_data; closure_put(&d->cl); - return 0; } static int ioctl_dev(struct block_device *b, fmode_t mode, @@ -732,8 +731,7 @@ static void bcache_device_free(struct bcache_device *d) if (d->c) bcache_device_detach(d); - - if (d->disk) + if (d->disk && d->disk->flags & GENHD_FL_UP) del_gendisk(d->disk); if (d->disk && d->disk->queue) blk_cleanup_queue(d->disk->queue); @@ -756,12 +754,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size) if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, sizeof(struct bio_vec) * BIO_MAX_PAGES)) || - bio_split_pool_init(&d->bio_split_hook)) - - return -ENOMEM; - - d->disk = alloc_disk(1); - if (!d->disk) + bio_split_pool_init(&d->bio_split_hook) || + !(d->disk = alloc_disk(1)) || + !(q = blk_alloc_queue(GFP_KERNEL))) return -ENOMEM; snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); @@ -771,10 +766,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size) d->disk->fops = &bcache_ops; d->disk->private_data = d; - q = blk_alloc_queue(GFP_KERNEL); - if (!q) - return -ENOMEM; - blk_queue_make_request(q, NULL); d->disk->queue = q; q->queuedata = d; @@ -999,14 +990,17 @@ static void cached_dev_free(struct closure *cl) mutex_lock(&bch_register_lock); - bd_unlink_disk_holder(dc->bdev, dc->disk.disk); + if (atomic_read(&dc->running)) + bd_unlink_disk_holder(dc->bdev, dc->disk.disk); bcache_device_free(&dc->disk); list_del(&dc->list); mutex_unlock(&bch_register_lock); if (!IS_ERR_OR_NULL(dc->bdev)) { - blk_sync_queue(bdev_get_queue(dc->bdev)); + if (dc->bdev->bd_disk) + blk_sync_queue(bdev_get_queue(dc->bdev)); + blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } @@ -1028,73 +1022,67 @@ static void cached_dev_flush(struct closure *cl) static int cached_dev_init(struct cached_dev *dc, unsigned block_size) { - int err; + int ret; struct io *io; - - closure_init(&dc->disk.cl, NULL); - set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); + struct request_queue *q = bdev_get_queue(dc->bdev); __module_get(THIS_MODULE); INIT_LIST_HEAD(&dc->list); + closure_init(&dc->disk.cl, NULL); + set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); - - bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); - - err = bcache_device_init(&dc->disk, block_size); - if (err) - goto err; - - spin_lock_init(&dc->io_lock); - closure_init_unlocked(&dc->sb_write); INIT_WORK(&dc->detach, cached_dev_detach_finish); + closure_init_unlocked(&dc->sb_write); + INIT_LIST_HEAD(&dc->io_lru); + spin_lock_init(&dc->io_lock); + bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); dc->sequential_merge = true; dc->sequential_cutoff = 4 << 20; - INIT_LIST_HEAD(&dc->io_lru); - dc->sb_bio.bi_max_vecs = 1; - dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; - for (io = dc->io; io < dc->io + RECENT_IO; io++) { list_add(&io->lru, &dc->io_lru); hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); } - bch_writeback_init_cached_dev(dc); + ret = bcache_device_init(&dc->disk, block_size); + if (ret) + return ret; + + set_capacity(dc->disk.disk, + dc->bdev->bd_part->nr_sects - dc->sb.data_offset); + + dc->disk.disk->queue->backing_dev_info.ra_pages = + max(dc->disk.disk->queue->backing_dev_info.ra_pages, + q->backing_dev_info.ra_pages); + + bch_cached_dev_request_init(dc); + bch_cached_dev_writeback_init(dc); return 0; -err: - bcache_device_stop(&dc->disk); - return err; } /* Cached device - bcache superblock */ -static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, +static void register_bdev(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cached_dev *dc) { char name[BDEVNAME_SIZE]; const char *err = "cannot allocate memory"; - struct gendisk *g; struct cache_set *c; - if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0) - return err; - memcpy(&dc->sb, sb, sizeof(struct cache_sb)); - dc->sb_bio.bi_io_vec[0].bv_page = sb_page; dc->bdev = bdev; dc->bdev->bd_holder = dc; - g = dc->disk.disk; - - set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); - - g->queue->backing_dev_info.ra_pages = - max(g->queue->backing_dev_info.ra_pages, - bdev->bd_queue->backing_dev_info.ra_pages); + bio_init(&dc->sb_bio); + dc->sb_bio.bi_max_vecs = 1; + dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; + dc->sb_bio.bi_io_vec[0].bv_page = sb_page; + get_page(sb_page); - bch_cached_dev_request_init(dc); + if (cached_dev_init(dc, sb->block_size << 9)) + goto err; err = "error creating kobject"; if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, @@ -1103,6 +1091,8 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) goto err; + pr_info("registered backing device %s", bdevname(bdev, name)); + list_add(&dc->list, &uncached_devices); list_for_each_entry(c, &bch_cache_sets, list) bch_cached_dev_attach(dc, c); @@ -1111,15 +1101,10 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) bch_cached_dev_run(dc); - return NULL; + return; err: - kobject_put(&dc->disk.kobj); pr_notice("error opening %s: %s", bdevname(bdev, name), err); - /* - * Return NULL instead of an error because kobject_put() cleans - * everything up - */ - return NULL; + bcache_device_stop(&dc->disk); } /* Flash only volumes */ @@ -1717,20 +1702,11 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) size_t free; struct bucket *b; - if (!ca) - return -ENOMEM; - __module_get(THIS_MODULE); kobject_init(&ca->kobj, &bch_cache_ktype); - memcpy(&ca->sb, sb, sizeof(struct cache_sb)); - INIT_LIST_HEAD(&ca->discards); - bio_init(&ca->sb_bio); - ca->sb_bio.bi_max_vecs = 1; - ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; - bio_init(&ca->journal.bio); ca->journal.bio.bi_max_vecs = 8; ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; @@ -1742,18 +1718,17 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || !init_heap(&ca->heap, free << 3, GFP_KERNEL) || - !(ca->buckets = vmalloc(sizeof(struct bucket) * + !(ca->buckets = vzalloc(sizeof(struct bucket) * ca->sb.nbuckets)) || !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 2, GFP_KERNEL)) || !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || bio_split_pool_init(&ca->bio_split_hook)) - goto err; + return -ENOMEM; ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); - memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket)); for_each_bucket(b, ca) atomic_set(&b->pin, 0); @@ -1766,22 +1741,28 @@ err: return -ENOMEM; } -static const char *register_cache(struct cache_sb *sb, struct page *sb_page, +static void register_cache(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cache *ca) { char name[BDEVNAME_SIZE]; const char *err = "cannot allocate memory"; - if (cache_alloc(sb, ca) != 0) - return err; - - ca->sb_bio.bi_io_vec[0].bv_page = sb_page; + memcpy(&ca->sb, sb, sizeof(struct cache_sb)); ca->bdev = bdev; ca->bdev->bd_holder = ca; + bio_init(&ca->sb_bio); + ca->sb_bio.bi_max_vecs = 1; + ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; + ca->sb_bio.bi_io_vec[0].bv_page = sb_page; + get_page(sb_page); + if (blk_queue_discard(bdev_get_queue(ca->bdev))) ca->discard = CACHE_DISCARD(&ca->sb); + if (cache_alloc(sb, ca) != 0) + goto err; + err = "error creating kobject"; if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) goto err; @@ -1791,15 +1772,10 @@ static const char *register_cache(struct cache_sb *sb, struct page *sb_page, goto err; pr_info("registered cache device %s", bdevname(bdev, name)); - - return NULL; + return; err: + pr_notice("error opening %s: %s", bdevname(bdev, name), err); kobject_put(&ca->kobj); - pr_info("error opening %s: %s", bdevname(bdev, name), err); - /* Return NULL instead of an error because kobject_put() cleans - * everything up - */ - return NULL; } /* Global interfaces/init */ @@ -1833,12 +1809,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, bdev = blkdev_get_by_path(strim(path), FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); - if (bdev == ERR_PTR(-EBUSY)) - err = "device busy"; - - if (IS_ERR(bdev) || - set_blocksize(bdev, 4096)) + if (IS_ERR(bdev)) { + if (bdev == ERR_PTR(-EBUSY)) + err = "device busy"; goto err; + } + + err = "failed to set blocksize"; + if (set_blocksize(bdev, 4096)) + goto err_close; err = read_super(sb, bdev, &sb_page); if (err) @@ -1846,33 +1825,33 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); + if (!dc) + goto err_close; - err = register_bdev(sb, sb_page, bdev, dc); + register_bdev(sb, sb_page, bdev, dc); } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); + if (!ca) + goto err_close; - err = register_cache(sb, sb_page, bdev, ca); + register_cache(sb, sb_page, bdev, ca); } - - if (err) { - /* register_(bdev|cache) will only return an error if they - * didn't get far enough to create the kobject - if they did, - * the kobject destructor will do this cleanup. - */ +out: + if (sb_page) put_page(sb_page); -err_close: - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); -err: - if (attr != &ksysfs_register_quiet) - pr_info("error opening %s: %s", path, err); - ret = -EINVAL; - } - kfree(sb); kfree(path); mutex_unlock(&bch_register_lock); module_put(THIS_MODULE); return ret; + +err_close: + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); +err: + if (attr != &ksysfs_register_quiet) + pr_info("error opening %s: %s", path, err); + ret = -EINVAL; + goto out; } static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 93e7e31a4bd3..2714ed3991d1 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -375,7 +375,7 @@ err: refill_dirty(cl); } -void bch_writeback_init_cached_dev(struct cached_dev *dc) +void bch_cached_dev_writeback_init(struct cached_dev *dc) { closure_init_unlocked(&dc->writeback); init_rwsem(&dc->writeback_lock); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index c6083132c4b8..0387e05cdb98 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -319,6 +319,9 @@ static void __cache_size_refresh(void) static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, enum data_mode *data_mode) { + unsigned noio_flag; + void *ptr; + if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { *data_mode = DATA_MODE_SLAB; return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); @@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, } *data_mode = DATA_MODE_VMALLOC; - return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); + + /* + * __vmalloc allocates the data pages and auxiliary structures with + * gfp_flags that were specified, but pagetables are always allocated + * with GFP_KERNEL, no matter what was specified as gfp_mask. + * + * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that + * all allocations done by this process (including pagetables) are done + * as if GFP_NOIO was specified. + */ + + if (gfp_mask & __GFP_NORETRY) + noio_flag = memalloc_noio_save(); + + ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); + + if (gfp_mask & __GFP_NORETRY) + memalloc_noio_restore(noio_flag); + + return ptr; } /* diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 83e995fece88..1af7255bbffb 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd, struct dm_cache_statistics *stats) { down_read(&cmd->root_lock); - memcpy(stats, &cmd->stats, sizeof(*stats)); + *stats = cmd->stats; up_read(&cmd->root_lock); } @@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd, struct dm_cache_statistics *stats) { down_write(&cmd->root_lock); - memcpy(&cmd->stats, stats, sizeof(*stats)); + cmd->stats = *stats; up_write(&cmd->root_lock); } diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index 558bdfdabf5f..33369ca9614f 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h @@ -130,8 +130,8 @@ struct dm_cache_policy { * * Must not block. * - * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK - * would be typical). + * Returns 0 if in cache, -ENOENT if not, < 0 for other errors + * (-EWOULDBLOCK would be typical). */ int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 10744091e6ca..df44b60e66f2 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -205,7 +205,7 @@ struct per_bio_data { /* * writethrough fields. These MUST remain at the end of this * structure and the 'cache' member must be the first as it - * is used to determine the offsetof the writethrough fields. + * is used to determine the offset of the writethrough fields. */ struct cache *cache; dm_cblock_t cblock; @@ -393,7 +393,7 @@ static int get_cell(struct cache *cache, return r; } - /*----------------------------------------------------------------*/ +/*----------------------------------------------------------------*/ static bool is_dirty(struct cache *cache, dm_cblock_t b) { @@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl } /*----------------------------------------------------------------*/ + static bool block_size_is_power_of_two(struct cache *cache) { return cache->sectors_per_block_shift >= 0; @@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err) /* * We can't issue this bio directly, since we're in interrupt - * context. So it get's put on a bio list for processing by the + * context. So it gets put on a bio list for processing by the * worker thread. */ defer_writethrough_bio(pb->cache, bio); @@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws) static void do_waker(struct work_struct *ws) { struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); + policy_tick(cache->policy); wake_worker(cache); queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); } @@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv, static struct kmem_cache *migration_cache; -static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv) +#define NOT_CORE_OPTION 1 + +static int process_config_option(struct cache *cache, const char *key, const char *value) +{ + unsigned long tmp; + + if (!strcasecmp(key, "migration_threshold")) { + if (kstrtoul(value, 10, &tmp)) + return -EINVAL; + + cache->migration_threshold = tmp; + return 0; + } + + return NOT_CORE_OPTION; +} + +static int set_config_value(struct cache *cache, const char *key, const char *value) +{ + int r = process_config_option(cache, key, value); + + if (r == NOT_CORE_OPTION) + r = policy_set_config_value(cache->policy, key, value); + + if (r) + DMWARN("bad config value for %s: %s", key, value); + + return r; +} + +static int set_config_values(struct cache *cache, int argc, const char **argv) { int r = 0; @@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a } while (argc) { - r = policy_set_config_value(p, argv[0], argv[1]); - if (r) { - DMWARN("policy_set_config_value failed: key = '%s', value = '%s'", - argv[0], argv[1]); - return r; - } + r = set_config_value(cache, argv[0], argv[1]); + if (r) + break; argc -= 2; argv += 2; @@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a static int create_cache_policy(struct cache *cache, struct cache_args *ca, char **error) { - int r; - cache->policy = dm_cache_policy_create(ca->policy_name, cache->cache_size, cache->origin_sectors, @@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, return -ENOMEM; } - r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); - if (r) { - *error = "Error setting cache policy's config values"; - dm_cache_policy_destroy(cache->policy); - cache->policy = NULL; - } - - return r; + return 0; } /* @@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size, return discard_block_size; } -#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) +#define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) { @@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; - memcpy(&cache->features, &ca->features, sizeof(cache->features)); + cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); cache->callbacks.congested_fn = cache_is_congested; @@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result) r = create_cache_policy(cache, ca, error); if (r) goto bad; + cache->policy_nr_args = ca->policy_argc; + cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; + + r = set_config_values(cache, ca->policy_argc, ca->policy_argv); + if (r) { + *error = "Error setting cache policy's config values"; + goto bad; + } cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, ca->block_size, may_format, @@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result) INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); - cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; atomic_set(&cache->nr_migrations, 0); init_waitqueue_head(&cache->migration_wait); + r = -ENOMEM; cache->nr_dirty = 0; cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); if (!cache->dirty_bitset) { @@ -2517,23 +2545,6 @@ err: DMEMIT("Error"); } -#define NOT_CORE_OPTION 1 - -static int process_config_option(struct cache *cache, char **argv) -{ - unsigned long tmp; - - if (!strcasecmp(argv[0], "migration_threshold")) { - if (kstrtoul(argv[1], 10, &tmp)) - return -EINVAL; - - cache->migration_threshold = tmp; - return 0; - } - - return NOT_CORE_OPTION; -} - /* * Supports <key> <value>. * @@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv) */ static int cache_message(struct dm_target *ti, unsigned argc, char **argv) { - int r; struct cache *cache = ti->private; if (argc != 2) return -EINVAL; - r = process_config_option(cache, argv); - if (r == NOT_CORE_OPTION) - return policy_set_config_value(cache->policy, argv[0], argv[1]); - - return r; + return set_config_value(cache, argv[0], argv[1]); } static int cache_iterate_devices(struct dm_target *ti, @@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 1, 0}, + .version = {1, 1, 1}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 51bb81676be3..bdf26f5bd326 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, ti->num_flush_bios = 1; ti->num_discard_bios = 1; + ti->num_write_same_bios = 1; return 0; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c0e07026a8d1..c434e5aab2df 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); if (!s->pending_pool) { ti->error = "Could not allocate mempool for pending exceptions"; + r = -ENOMEM; goto bad_pending_pool; } diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index ea5e878a30b9..d907ca6227ce 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc, static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct stripe_c *sc; - sector_t width; + sector_t width, tmp_len; uint32_t stripes; uint32_t chunk_size; int r; @@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) } width = ti->len; - if (sector_div(width, chunk_size)) { + if (sector_div(width, stripes)) { ti->error = "Target length not divisible by " - "chunk size"; + "number of stripes"; return -EINVAL; } - if (sector_div(width, stripes)) { + tmp_len = width; + if (sector_div(tmp_len, chunk_size)) { ti->error = "Target length not divisible by " - "number of stripes"; + "chunk size"; return -EINVAL; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e50dad0c65f4..1ff252ab7d46 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t) return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) + ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) return false; } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 00cee02f8fc9..60bce435f4fa 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, return r; } -static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) +static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count) { int r; dm_block_t old_count; - r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count); + r = dm_sm_get_nr_blocks(sm, &old_count); if (r) return r; @@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) return 0; if (new_count < old_count) { - DMERR("cannot reduce size of data device"); + DMERR("cannot reduce size of space map"); return -EINVAL; } - return dm_sm_extend(pmd->data_sm, new_count - old_count); + return dm_sm_extend(sm, new_count - old_count); } int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) @@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) down_write(&pmd->root_lock); if (!pmd->fail_io) - r = __resize_data_dev(pmd, new_count); + r = __resize_space_map(pmd->data_sm, new_count); + up_write(&pmd->root_lock); + + return r; +} + +int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) +{ + int r = -EINVAL; + + down_write(&pmd->root_lock); + if (!pmd->fail_io) + r = __resize_space_map(pmd->metadata_sm, new_count); up_write(&pmd->root_lock); return r; @@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) dm_bm_set_read_only(pmd->bm); up_write(&pmd->root_lock); } + +int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + int r; + + down_write(&pmd->root_lock); + r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context); + up_write(&pmd->root_lock); + + return r; +} diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 0cecc3702885..845ebbe589a9 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -8,6 +8,7 @@ #define DM_THIN_METADATA_H #include "persistent-data/dm-block-manager.h" +#include "persistent-data/dm-space-map.h" #define THIN_METADATA_BLOCK_SIZE 4096 @@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); * blocks would be lost. */ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); +int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); /* * Flicks the underlying block manager into read only mode, so you know @@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); */ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); +int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context); + /*----------------------------------------------------------------*/ #endif diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 004ad1652b73..88f2f802d528 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) return r; if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { - DMWARN("%s: reached low water mark, sending event.", + DMWARN("%s: reached low water mark for data device: sending event.", dm_device_name(pool->pool_md)); spin_lock_irqsave(&pool->lock, flags); pool->low_water_triggered = 1; @@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio) bio_io_error(bio); } +/* + * FIXME: should we also commit due to size of transaction, measured in + * metadata blocks? + */ static int need_commit_due_to_time(struct pool *pool) { return jiffies < pool->last_commit_jiffies || @@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, return r; } +static void metadata_low_callback(void *context) +{ + struct pool *pool = context; + + DMWARN("%s: reached low water mark for metadata device: sending event.", + dm_device_name(pool->pool_md)); + + dm_table_event(pool->ti->table); +} + +static sector_t get_metadata_dev_size(struct block_device *bdev) +{ + sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; + char buffer[BDEVNAME_SIZE]; + + if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) { + DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", + bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS); + metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING; + } + + return metadata_dev_size; +} + +static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev) +{ + sector_t metadata_dev_size = get_metadata_dev_size(bdev); + + sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); + + return metadata_dev_size; +} + +/* + * When a metadata threshold is crossed a dm event is triggered, and + * userland should respond by growing the metadata device. We could let + * userland set the threshold, like we do with the data threshold, but I'm + * not sure they know enough to do this well. + */ +static dm_block_t calc_metadata_threshold(struct pool_c *pt) +{ + /* + * 4M is ample for all ops with the possible exception of thin + * device deletion which is harmless if it fails (just retry the + * delete after you've grown the device). + */ + dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4; + return min((dm_block_t)1024ULL /* 4M */, quarter); +} + /* * thin-pool <metadata dev> <data dev> * <data block size (sectors)> @@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) unsigned long block_size; dm_block_t low_water_blocks; struct dm_dev *metadata_dev; - sector_t metadata_dev_size; - char b[BDEVNAME_SIZE]; + fmode_t metadata_mode; /* * FIXME Remove validation from scope of lock. @@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) r = -EINVAL; goto out_unlock; } + as.argc = argc; as.argv = argv; - r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev); + /* + * Set default pool features. + */ + pool_features_init(&pf); + + dm_consume_args(&as, 4); + r = parse_pool_features(&as, &pf, ti); + if (r) + goto out_unlock; + + metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE); + r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev); if (r) { ti->error = "Error opening metadata block device"; goto out_unlock; } - metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT; - if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) - DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", - bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); + /* + * Run for the side-effect of possibly issuing a warning if the + * device is too big. + */ + (void) get_metadata_dev_size(metadata_dev->bdev); r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); if (r) { @@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) goto out; } - /* - * Set default pool features. - */ - pool_features_init(&pf); - - dm_consume_args(&as, 4); - r = parse_pool_features(&as, &pf, ti); - if (r) - goto out; - pt = kzalloc(sizeof(*pt), GFP_KERNEL); if (!pt) { r = -ENOMEM; @@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) } ti->private = pt; + r = dm_pool_register_metadata_threshold(pt->pool->pmd, + calc_metadata_threshold(pt), + metadata_low_callback, + pool); + if (r) + goto out_free_pt; + pt->callbacks.congested_fn = pool_is_congested; dm_table_add_target_callbacks(ti->table, &pt->callbacks); @@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio) return r; } -/* - * Retrieves the number of blocks of the data device from - * the superblock and compares it to the actual device size, - * thus resizing the data device in case it has grown. - * - * This both copes with opening preallocated data devices in the ctr - * being followed by a resume - * -and- - * calling the resume method individually after userspace has - * grown the data device in reaction to a table event. - */ -static int pool_preresume(struct dm_target *ti) +static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) { int r; struct pool_c *pt = ti->private; @@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti) sector_t data_size = ti->len; dm_block_t sb_data_size; - /* - * Take control of the pool object. - */ - r = bind_control_target(pool, ti); - if (r) - return r; + *need_commit = false; (void) sector_div(data_size, pool->sectors_per_block); @@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti) } if (data_size < sb_data_size) { - DMERR("pool target too small, is %llu blocks (expected %llu)", + DMERR("pool target (%llu blocks) too small: expected %llu", (unsigned long long)data_size, sb_data_size); return -EINVAL; @@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti) r = dm_pool_resize_data_dev(pool->pmd, data_size); if (r) { DMERR("failed to resize data device"); - /* FIXME Stricter than necessary: Rollback transaction instead here */ set_pool_mode(pool, PM_READ_ONLY); return r; } - (void) commit_or_fallback(pool); + *need_commit = true; } return 0; } +static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) +{ + int r; + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + dm_block_t metadata_dev_size, sb_metadata_dev_size; + + *need_commit = false; + + metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); + + r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); + if (r) { + DMERR("failed to retrieve data device size"); + return r; + } + + if (metadata_dev_size < sb_metadata_dev_size) { + DMERR("metadata device (%llu blocks) too small: expected %llu", + metadata_dev_size, sb_metadata_dev_size); + return -EINVAL; + + } else if (metadata_dev_size > sb_metadata_dev_size) { + r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); + if (r) { + DMERR("failed to resize metadata device"); + return r; + } + + *need_commit = true; + } + + return 0; +} + +/* + * Retrieves the number of blocks of the data device from + * the superblock and compares it to the actual device size, + * thus resizing the data device in case it has grown. + * + * This both copes with opening preallocated data devices in the ctr + * being followed by a resume + * -and- + * calling the resume method individually after userspace has + * grown the data device in reaction to a table event. + */ +static int pool_preresume(struct dm_target *ti) +{ + int r; + bool need_commit1, need_commit2; + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + + /* + * Take control of the pool object. + */ + r = bind_control_target(pool, ti); + if (r) + return r; + + r = maybe_resize_data_dev(ti, &need_commit1); + if (r) + return r; + + r = maybe_resize_metadata_dev(ti, &need_commit2); + if (r) + return r; + + if (need_commit1 || need_commit2) + (void) commit_or_fallback(pool); + + return 0; +} + static void pool_resume(struct dm_target *ti) { struct pool_c *pt = ti->private; @@ -2549,7 +2669,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 7, 0}, + .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index f6d29e614ab7..e735a6d5a793 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -248,7 +248,8 @@ static struct dm_space_map ops = { .new_block = sm_disk_new_block, .commit = sm_disk_commit, .root_size = sm_disk_root_size, - .copy_root = sm_disk_copy_root + .copy_root = sm_disk_copy_root, + .register_threshold_callback = NULL }; struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 906cf3df71af..1c959684caef 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -17,6 +17,55 @@ /*----------------------------------------------------------------*/ /* + * An edge triggered threshold. + */ +struct threshold { + bool threshold_set; + bool value_set; + dm_block_t threshold; + dm_block_t current_value; + dm_sm_threshold_fn fn; + void *context; +}; + +static void threshold_init(struct threshold *t) +{ + t->threshold_set = false; + t->value_set = false; +} + +static void set_threshold(struct threshold *t, dm_block_t value, + dm_sm_threshold_fn fn, void *context) +{ + t->threshold_set = true; + t->threshold = value; + t->fn = fn; + t->context = context; +} + +static bool below_threshold(struct threshold *t, dm_block_t value) +{ + return t->threshold_set && value <= t->threshold; +} + +static bool threshold_already_triggered(struct threshold *t) +{ + return t->value_set && below_threshold(t, t->current_value); +} + +static void check_threshold(struct threshold *t, dm_block_t value) +{ + if (below_threshold(t, value) && + !threshold_already_triggered(t)) + t->fn(t->context); + + t->value_set = true; + t->current_value = value; +} + +/*----------------------------------------------------------------*/ + +/* * Space map interface. * * The low level disk format is written using the standard btree and @@ -54,6 +103,8 @@ struct sm_metadata { unsigned allocated_this_transaction; unsigned nr_uncommitted; struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; + + struct threshold threshold; }; static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) @@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm) kfree(smm); } -static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) -{ - DMERR("doesn't support extend"); - return -EINVAL; -} - static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count) { struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); @@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b) { + dm_block_t count; + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + int r = sm_metadata_new_block_(sm, b); if (r) DMERR("unable to allocate new metadata block"); + + r = sm_metadata_get_nr_free(sm, &count); + if (r) + DMERR("couldn't get free block count"); + + check_threshold(&smm->threshold, count); + return r; } @@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm) return 0; } +static int sm_metadata_register_threshold_callback(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + + set_threshold(&smm->threshold, threshold, fn, context); + + return 0; +} + static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result) { *result = sizeof(struct disk_sm_root); @@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t return 0; } +static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks); + static struct dm_space_map ops = { .destroy = sm_metadata_destroy, .extend = sm_metadata_extend, @@ -395,7 +464,8 @@ static struct dm_space_map ops = { .new_block = sm_metadata_new_block, .commit = sm_metadata_commit, .root_size = sm_metadata_root_size, - .copy_root = sm_metadata_copy_root + .copy_root = sm_metadata_copy_root, + .register_threshold_callback = sm_metadata_register_threshold_callback }; /*----------------------------------------------------------------*/ @@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm) static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks) { - DMERR("boostrap doesn't support extend"); + DMERR("bootstrap doesn't support extend"); return -EINVAL; } @@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm, static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b, uint32_t count) { - DMERR("boostrap doesn't support set_count"); + DMERR("bootstrap doesn't support set_count"); return -EINVAL; } @@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm) static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) { - DMERR("boostrap doesn't support root_size"); + DMERR("bootstrap doesn't support root_size"); return -EINVAL; } @@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where, size_t max) { - DMERR("boostrap doesn't support copy_root"); + DMERR("bootstrap doesn't support copy_root"); return -EINVAL; } @@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = { .new_block = sm_bootstrap_new_block, .commit = sm_bootstrap_commit, .root_size = sm_bootstrap_root_size, - .copy_root = sm_bootstrap_copy_root + .copy_root = sm_bootstrap_copy_root, + .register_threshold_callback = NULL }; /*----------------------------------------------------------------*/ +static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) +{ + int r, i; + enum allocation_event ev; + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + dm_block_t old_len = smm->ll.nr_blocks; + + /* + * Flick into a mode where all blocks get allocated in the new area. + */ + smm->begin = old_len; + memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); + + /* + * Extend. + */ + r = sm_ll_extend(&smm->ll, extra_blocks); + + /* + * Switch back to normal behaviour. + */ + memcpy(&smm->sm, &ops, sizeof(smm->sm)); + for (i = old_len; !r && i < smm->begin; i++) + r = sm_ll_inc(&smm->ll, i, &ev); + + return r; +} + +/*----------------------------------------------------------------*/ + struct dm_space_map *dm_sm_metadata_init(void) { struct sm_metadata *smm; @@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm, smm->recursion_count = 0; smm->allocated_this_transaction = 0; smm->nr_uncommitted = 0; + threshold_init(&smm->threshold); memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); @@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm, smm->recursion_count = 0; smm->allocated_this_transaction = 0; smm->nr_uncommitted = 0; + threshold_init(&smm->threshold); memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); return 0; diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h index 1cbfc6b1638a..3e6d1153b7c4 100644 --- a/drivers/md/persistent-data/dm-space-map.h +++ b/drivers/md/persistent-data/dm-space-map.h @@ -9,6 +9,8 @@ #include "dm-block-manager.h" +typedef void (*dm_sm_threshold_fn)(void *context); + /* * struct dm_space_map keeps a record of how many times each block in a device * is referenced. It needs to be fixed on disk as part of the transaction. @@ -59,6 +61,15 @@ struct dm_space_map { */ int (*root_size)(struct dm_space_map *sm, size_t *result); int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len); + + /* + * You can register one threshold callback which is edge-triggered + * when the free space in the space map drops below the threshold. + */ + int (*register_threshold_callback)(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context); }; /*----------------------------------------------------------------*/ @@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le return sm->copy_root(sm, copy_to_here_le, len); } +static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + if (sm->register_threshold_callback) + return sm->register_threshold_callback(sm, threshold, fn, context); + + return -EINVAL; +} + + #endif /* _LINUX_DM_SPACE_MAP_H */ diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9359828ffe26..753f318c8984 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -664,6 +664,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) bi->bi_rw |= REQ_FLUSH; + bi->bi_vcnt = 1; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; bi->bi_io_vec[0].bv_offset = 0; bi->bi_size = STRIPE_SIZE; @@ -701,6 +702,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) else rbi->bi_sector = (sh->sector + rrdev->data_offset); + rbi->bi_vcnt = 1; rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; rbi->bi_io_vec[0].bv_offset = 0; rbi->bi_size = STRIPE_SIZE; diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h index ca2754a3cd63..5e040085c2ff 100644 --- a/drivers/media/pci/zoran/zoran.h +++ b/drivers/media/pci/zoran/zoran.h @@ -176,7 +176,7 @@ struct zoran_fh; struct zoran_mapping { struct zoran_fh *fh; - int count; + atomic_t count; }; struct zoran_buffer { diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c index 1168a84a737d..d133c30c3fdc 100644 --- a/drivers/media/pci/zoran/zoran_driver.c +++ b/drivers/media/pci/zoran/zoran_driver.c @@ -2803,8 +2803,7 @@ static void zoran_vm_open (struct vm_area_struct *vma) { struct zoran_mapping *map = vma->vm_private_data; - - map->count++; + atomic_inc(&map->count); } static void @@ -2815,7 +2814,7 @@ zoran_vm_close (struct vm_area_struct *vma) struct zoran *zr = fh->zr; int i; - if (--map->count > 0) + if (!atomic_dec_and_mutex_lock(&map->count, &zr->resource_lock)) return; dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr), @@ -2828,14 +2827,16 @@ zoran_vm_close (struct vm_area_struct *vma) kfree(map); /* Any buffers still mapped? */ - for (i = 0; i < fh->buffers.num_buffers; i++) - if (fh->buffers.buffer[i].map) + for (i = 0; i < fh->buffers.num_buffers; i++) { + if (fh->buffers.buffer[i].map) { + mutex_unlock(&zr->resource_lock); return; + } + } dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode)); - mutex_lock(&zr->resource_lock); if (fh->map_mode == ZORAN_MAP_MODE_RAW) { if (fh->buffers.active != ZORAN_FREE) { @@ -2939,7 +2940,7 @@ zoran_mmap (struct file *file, goto mmap_unlock_and_return; } map->fh = fh; - map->count = 1; + atomic_set(&map->count, 1); vma->vm_ops = &zoran_vm_ops; vma->vm_flags |= VM_DONTEXPAND; diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index 477268a2415f..d338b19da544 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c @@ -2150,6 +2150,9 @@ static int __init omap_vout_probe(struct platform_device *pdev) struct omap_dss_device *def_display; struct omap2video_device *vid_dev = NULL; + if (omapdss_is_initialized() == false) + return -EPROBE_DEFER; + ret = omapdss_compat_init(); if (ret) { dev_err(&pdev->dev, "failed to init dss\n"); diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index cadf1cc19aaf..04644e7b42b1 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -1560,12 +1560,6 @@ static int __init_or_module emif_probe(struct platform_device *pdev) platform_set_drvdata(pdev, emif); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(emif->dev, "%s: error getting memory resource\n", - __func__); - goto error; - } - emif->base = devm_ioremap_resource(emif->dev, res); if (IS_ERR(emif->base)) goto error; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index d9aed1593e5d..d54e985748b7 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -579,7 +579,7 @@ config AB8500_CORE config AB8500_DEBUG bool "Enable debug info via debugfs" - depends on AB8500_CORE && DEBUG_FS + depends on AB8500_GPADC && DEBUG_FS default y if DEBUG_FS help Select this option if you want debug information using the debug @@ -818,6 +818,7 @@ config MFD_TPS65910 config MFD_TPS65912 bool "TI TPS65912 Power Management chip" depends on GPIOLIB + select MFD_CORE help If you say yes here you get support for the TPS65912 series of PM chips. diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 8e8a016effe9..258b367e3989 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -868,6 +868,15 @@ static struct resource ab8500_chargalg_resources[] = {}; #ifdef CONFIG_DEBUG_FS static struct resource ab8500_debug_resources[] = { { + .name = "IRQ_AB8500", + /* + * Number will be filled in. NOTE: this is deliberately + * not flagged as an IRQ in ordet to avoid remapping using + * the irqdomain in the MFD core, so that this IRQ passes + * unremapped to the debug code. + */ + }, + { .name = "IRQ_FIRST", .start = AB8500_INT_MAIN_EXT_CH_NOT_OK, .end = AB8500_INT_MAIN_EXT_CH_NOT_OK, @@ -1051,6 +1060,7 @@ static struct mfd_cell ab8500_devs[] = { }, { .name = "ab8500-gpadc", + .of_compatible = "stericsson,ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), .resources = ab8500_gpadc_resources, }, @@ -1097,7 +1107,7 @@ static struct mfd_cell ab8500_devs[] = { .of_compatible = "stericsson,ab8500-denc", }, { - .name = "ab8500-gpio", + .name = "pinctrl-ab8500", .of_compatible = "stericsson,ab8500-gpio", }, { @@ -1208,6 +1218,7 @@ static struct mfd_cell ab8505_devs[] = { }, { .name = "ab8500-gpadc", + .of_compatible = "stericsson,ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8505_gpadc_resources), .resources = ab8505_gpadc_resources, }, @@ -1234,7 +1245,7 @@ static struct mfd_cell ab8505_devs[] = { .name = "ab8500-leds", }, { - .name = "ab8500-gpio", + .name = "pinctrl-ab8505", }, { .name = "ab8500-usb", @@ -1271,6 +1282,7 @@ static struct mfd_cell ab8540_devs[] = { }, { .name = "ab8500-gpadc", + .of_compatible = "stericsson,ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8505_gpadc_resources), .resources = ab8505_gpadc_resources, }, @@ -1302,7 +1314,7 @@ static struct mfd_cell ab8540_devs[] = { .resources = ab8500_temp_resources, }, { - .name = "ab8500-gpio", + .name = "pinctrl-ab8540", }, { .name = "ab8540-usb", @@ -1712,6 +1724,12 @@ static int ab8500_probe(struct platform_device *pdev) if (ret) return ret; +#if CONFIG_DEBUG_FS + /* Pass to debugfs */ + ab8500_debug_resources[0].start = ab8500->irq; + ab8500_debug_resources[0].end = ab8500->irq; +#endif + if (is_ab9540(ab8500)) ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs, ARRAY_SIZE(ab9540_devs), NULL, diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c index b88bbbc15f1e..37b7ce4c7c3b 100644 --- a/drivers/mfd/ab8500-debugfs.c +++ b/drivers/mfd/ab8500-debugfs.c @@ -91,12 +91,10 @@ #include <linux/ctype.h> #endif -/* TODO: this file should not reference IRQ_DB8500_AB8500! */ -#include <mach/irqs.h> - static u32 debug_bank; static u32 debug_address; +static int irq_ab8500; static int irq_first; static int irq_last; static u32 *irq_count; @@ -1589,7 +1587,7 @@ void ab8500_debug_register_interrupt(int line) { if (line < num_interrupt_lines) { num_interrupts[line]++; - if (suspend_test_wake_cause_interrupt_is_mine(IRQ_DB8500_AB8500)) + if (suspend_test_wake_cause_interrupt_is_mine(irq_ab8500)) num_wake_interrupts[line]++; } } @@ -2941,6 +2939,7 @@ static int ab8500_debug_probe(struct platform_device *plf) struct dentry *file; int ret = -ENOMEM; struct ab8500 *ab8500; + struct resource *res; debug_bank = AB8500_MISC; debug_address = AB8500_REV_REG & 0x00FF; @@ -2959,6 +2958,15 @@ static int ab8500_debug_probe(struct platform_device *plf) if (!event_name) goto out_freedev_attr; + res = platform_get_resource_byname(plf, 0, "IRQ_AB8500"); + if (!res) { + dev_err(&plf->dev, "AB8500 irq not found, err %d\n", + irq_first); + ret = -ENXIO; + goto out_freeevent_name; + } + irq_ab8500 = res->start; + irq_first = platform_get_irq_byname(plf, "IRQ_FIRST"); if (irq_first < 0) { dev_err(&plf->dev, "First irq not found, err %d\n", diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c index 5e65b28a5d09..13f7866de46e 100644 --- a/drivers/mfd/ab8500-gpadc.c +++ b/drivers/mfd/ab8500-gpadc.c @@ -907,14 +907,17 @@ static int ab8500_gpadc_suspend(struct device *dev) static int ab8500_gpadc_resume(struct device *dev) { struct ab8500_gpadc *gpadc = dev_get_drvdata(dev); + int ret; - regulator_enable(gpadc->regu); + ret = regulator_enable(gpadc->regu); + if (ret) + dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret); pm_runtime_mark_last_busy(gpadc->dev); pm_runtime_put_autosuspend(gpadc->dev); mutex_unlock(&gpadc->ab8500_gpadc_lock); - return 0; + return ret; } static int ab8500_gpadc_probe(struct platform_device *pdev) diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c index fbca1ced49fa..8e0dae59844d 100644 --- a/drivers/mfd/ab8500-sysctrl.c +++ b/drivers/mfd/ab8500-sysctrl.c @@ -23,7 +23,7 @@ static struct device *sysctrl_dev; -void ab8500_power_off(void) +static void ab8500_power_off(void) { sigset_t old; sigset_t all; @@ -104,7 +104,7 @@ void ab8500_restart(char mode, const char *cmd) plat = dev_get_platdata(sysctrl_dev->parent); pdata = plat->sysctrl; - if (pdata->reboot_reason_code) + if (pdata && pdata->reboot_reason_code) reason = pdata->reboot_reason_code(cmd); else pr_warn("[%s] No reboot reason set. Default reason %d\n", @@ -188,14 +188,15 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev) plat = dev_get_platdata(pdev->dev.parent); - if (!(plat && plat->sysctrl)) + if (!plat) return -EINVAL; - if (plat->pm_power_off) + sysctrl_dev = &pdev->dev; + + if (!pm_power_off) pm_power_off = ab8500_power_off; pdata = plat->sysctrl; - if (pdata) { int last, ret, i, j; @@ -226,6 +227,10 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev) static int ab8500_sysctrl_remove(struct platform_device *pdev) { sysctrl_dev = NULL; + + if (pm_power_off == ab8500_power_off) + pm_power_off = NULL; + return 0; } diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c index 9818afba2515..3714acb61458 100644 --- a/drivers/mfd/abx500-core.c +++ b/drivers/mfd/abx500-core.c @@ -156,7 +156,7 @@ EXPORT_SYMBOL(abx500_startup_irq_enabled); void abx500_dump_all_banks(void) { struct abx500_ops *ops; - struct device dummy_child = {0}; + struct device dummy_child = {NULL}; struct abx500_device_entry *dev_entry; list_for_each_entry(dev_entry, &abx500_list, list) { diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c index 19193cf1e7a1..367ccb58ecb1 100644 --- a/drivers/mfd/cros_ec_spi.c +++ b/drivers/mfd/cros_ec_spi.c @@ -120,7 +120,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) { if (*ptr == EC_MSG_HEADER) { - dev_dbg(ec_dev->dev, "msg found at %ld\n", + dev_dbg(ec_dev->dev, "msg found at %zd\n", ptr - ec_dev->din); break; } @@ -154,7 +154,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, * maximum-supported transfer size. */ todo = min(need_len, 256); - dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%ld\n", + dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n", todo, need_len, ptr - ec_dev->din); memset(&trans, '\0', sizeof(trans)); @@ -178,7 +178,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, need_len -= todo; } - dev_dbg(ec_dev->dev, "loop done, ptr=%ld\n", ptr - ec_dev->din); + dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din); return 0; } diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 319b8abe742b..66f80973596b 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -1613,6 +1613,8 @@ static unsigned long dsiclk_rate(u8 n) if (divsel == PRCM_DSI_PLLOUT_SEL_OFF) divsel = dsiclk[n].divsel; + else + dsiclk[n].divsel = divsel; switch (divsel) { case PRCM_DSI_PLLOUT_SEL_PHI_4: @@ -3095,6 +3097,7 @@ static struct mfd_cell db8500_prcmu_devs[] = { .num_resources = ARRAY_SIZE(db8500_thsens_resources), .resources = db8500_thsens_resources, .platform_data = &db8500_thsens_data, + .pdata_size = sizeof(db8500_thsens_data), }, }; diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c index 5be3b5e13855..d8d5137f9717 100644 --- a/drivers/mfd/intel_msic.c +++ b/drivers/mfd/intel_msic.c @@ -414,11 +414,6 @@ static int intel_msic_probe(struct platform_device *pdev) * the clients via intel_msic_irq_read(). */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "failed to get SRAM iomem resource\n"); - return -ENODEV; - } - msic->irq_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(msic->irq_base)) return PTR_ERR(msic->irq_base); diff --git a/drivers/mfd/si476x-cmd.c b/drivers/mfd/si476x-cmd.c index de48b4e88450..6f1ef63086c9 100644 --- a/drivers/mfd/si476x-cmd.c +++ b/drivers/mfd/si476x-cmd.c @@ -29,6 +29,8 @@ #include <linux/mfd/si476x-core.h> +#include <asm/unaligned.h> + #define msb(x) ((u8)((u16) x >> 8)) #define lsb(x) ((u8)((u16) x & 0x00FF)) @@ -150,7 +152,7 @@ enum si476x_acf_status_report_bits { SI476X_ACF_SOFTMUTE_INT = (1 << 0), SI476X_ACF_SMUTE = (1 << 0), - SI476X_ACF_SMATTN = 0b11111, + SI476X_ACF_SMATTN = 0x1f, SI476X_ACF_PILOT = (1 << 7), SI476X_ACF_STBLEND = ~SI476X_ACF_PILOT, }; @@ -483,7 +485,7 @@ int si476x_core_cmd_get_property(struct si476x_core *core, u16 property) if (err < 0) return err; else - return be16_to_cpup((__be16 *)(resp + 2)); + return get_unaligned_be16(resp + 2); } EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property); @@ -772,18 +774,18 @@ int si476x_core_cmd_am_rsq_status(struct si476x_core *core, if (!report) return err; - report->snrhint = 0b00001000 & resp[1]; - report->snrlint = 0b00000100 & resp[1]; - report->rssihint = 0b00000010 & resp[1]; - report->rssilint = 0b00000001 & resp[1]; + report->snrhint = 0x08 & resp[1]; + report->snrlint = 0x04 & resp[1]; + report->rssihint = 0x02 & resp[1]; + report->rssilint = 0x01 & resp[1]; - report->bltf = 0b10000000 & resp[2]; - report->snr_ready = 0b00100000 & resp[2]; - report->rssiready = 0b00001000 & resp[2]; - report->afcrl = 0b00000010 & resp[2]; - report->valid = 0b00000001 & resp[2]; + report->bltf = 0x80 & resp[2]; + report->snr_ready = 0x20 & resp[2]; + report->rssiready = 0x08 & resp[2]; + report->afcrl = 0x02 & resp[2]; + report->valid = 0x01 & resp[2]; - report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); + report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -931,26 +933,26 @@ int si476x_core_cmd_fm_rds_status(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->rdstpptyint = 0b00010000 & resp[1]; - report->rdspiint = 0b00001000 & resp[1]; - report->rdssyncint = 0b00000010 & resp[1]; - report->rdsfifoint = 0b00000001 & resp[1]; + report->rdstpptyint = 0x10 & resp[1]; + report->rdspiint = 0x08 & resp[1]; + report->rdssyncint = 0x02 & resp[1]; + report->rdsfifoint = 0x01 & resp[1]; - report->tpptyvalid = 0b00010000 & resp[2]; - report->pivalid = 0b00001000 & resp[2]; - report->rdssync = 0b00000010 & resp[2]; - report->rdsfifolost = 0b00000001 & resp[2]; + report->tpptyvalid = 0x10 & resp[2]; + report->pivalid = 0x08 & resp[2]; + report->rdssync = 0x02 & resp[2]; + report->rdsfifolost = 0x01 & resp[2]; - report->tp = 0b00100000 & resp[3]; - report->pty = 0b00011111 & resp[3]; + report->tp = 0x20 & resp[3]; + report->pty = 0x1f & resp[3]; - report->pi = be16_to_cpup((__be16 *)(resp + 4)); + report->pi = get_unaligned_be16(resp + 4); report->rdsfifoused = resp[6]; - report->ble[V4L2_RDS_BLOCK_A] = 0b11000000 & resp[7]; - report->ble[V4L2_RDS_BLOCK_B] = 0b00110000 & resp[7]; - report->ble[V4L2_RDS_BLOCK_C] = 0b00001100 & resp[7]; - report->ble[V4L2_RDS_BLOCK_D] = 0b00000011 & resp[7]; + report->ble[V4L2_RDS_BLOCK_A] = 0xc0 & resp[7]; + report->ble[V4L2_RDS_BLOCK_B] = 0x30 & resp[7]; + report->ble[V4L2_RDS_BLOCK_C] = 0x0c & resp[7]; + report->ble[V4L2_RDS_BLOCK_D] = 0x03 & resp[7]; report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A; report->rds[V4L2_RDS_BLOCK_A].msb = resp[8]; @@ -991,9 +993,9 @@ int si476x_core_cmd_fm_rds_blockcount(struct si476x_core *core, SI476X_DEFAULT_TIMEOUT); if (!err) { - report->expected = be16_to_cpup((__be16 *)(resp + 2)); - report->received = be16_to_cpup((__be16 *)(resp + 4)); - report->uncorrectable = be16_to_cpup((__be16 *)(resp + 6)); + report->expected = get_unaligned_be16(resp + 2); + report->received = get_unaligned_be16(resp + 4); + report->uncorrectable = get_unaligned_be16(resp + 6); } return err; @@ -1005,7 +1007,7 @@ int si476x_core_cmd_fm_phase_diversity(struct si476x_core *core, { u8 resp[CMD_FM_PHASE_DIVERSITY_NRESP]; const u8 args[CMD_FM_PHASE_DIVERSITY_NARGS] = { - mode & 0b111, + mode & 0x07, }; return si476x_core_send_command(core, CMD_FM_PHASE_DIVERSITY, @@ -1162,7 +1164,7 @@ static int si476x_core_cmd_am_tune_freq_a20(struct si476x_core *core, const int am_freq = tuneargs->freq; u8 resp[CMD_AM_TUNE_FREQ_NRESP]; const u8 args[CMD_AM_TUNE_FREQ_NARGS] = { - (tuneargs->zifsr << 6) | (tuneargs->injside & 0b11), + (tuneargs->zifsr << 6) | (tuneargs->injside & 0x03), msb(am_freq), lsb(am_freq), }; @@ -1197,20 +1199,20 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->multhint = 0b10000000 & resp[1]; - report->multlint = 0b01000000 & resp[1]; - report->snrhint = 0b00001000 & resp[1]; - report->snrlint = 0b00000100 & resp[1]; - report->rssihint = 0b00000010 & resp[1]; - report->rssilint = 0b00000001 & resp[1]; + report->multhint = 0x80 & resp[1]; + report->multlint = 0x40 & resp[1]; + report->snrhint = 0x08 & resp[1]; + report->snrlint = 0x04 & resp[1]; + report->rssihint = 0x02 & resp[1]; + report->rssilint = 0x01 & resp[1]; - report->bltf = 0b10000000 & resp[2]; - report->snr_ready = 0b00100000 & resp[2]; - report->rssiready = 0b00001000 & resp[2]; - report->afcrl = 0b00000010 & resp[2]; - report->valid = 0b00000001 & resp[2]; + report->bltf = 0x80 & resp[2]; + report->snr_ready = 0x20 & resp[2]; + report->rssiready = 0x08 & resp[2]; + report->afcrl = 0x02 & resp[2]; + report->valid = 0x01 & resp[2]; - report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); + report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -1218,7 +1220,7 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core, report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; - report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); + report->readantcap = get_unaligned_be16(resp + 13); report->assi = resp[15]; report->usn = resp[16]; @@ -1251,20 +1253,20 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->multhint = 0b10000000 & resp[1]; - report->multlint = 0b01000000 & resp[1]; - report->snrhint = 0b00001000 & resp[1]; - report->snrlint = 0b00000100 & resp[1]; - report->rssihint = 0b00000010 & resp[1]; - report->rssilint = 0b00000001 & resp[1]; + report->multhint = 0x80 & resp[1]; + report->multlint = 0x40 & resp[1]; + report->snrhint = 0x08 & resp[1]; + report->snrlint = 0x04 & resp[1]; + report->rssihint = 0x02 & resp[1]; + report->rssilint = 0x01 & resp[1]; - report->bltf = 0b10000000 & resp[2]; - report->snr_ready = 0b00100000 & resp[2]; - report->rssiready = 0b00001000 & resp[2]; - report->afcrl = 0b00000010 & resp[2]; - report->valid = 0b00000001 & resp[2]; + report->bltf = 0x80 & resp[2]; + report->snr_ready = 0x20 & resp[2]; + report->rssiready = 0x08 & resp[2]; + report->afcrl = 0x02 & resp[2]; + report->valid = 0x01 & resp[2]; - report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); + report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -1272,7 +1274,7 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core, report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; - report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); + report->readantcap = get_unaligned_be16(resp + 13); report->assi = resp[15]; report->usn = resp[16]; @@ -1306,21 +1308,21 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->multhint = 0b10000000 & resp[1]; - report->multlint = 0b01000000 & resp[1]; - report->snrhint = 0b00001000 & resp[1]; - report->snrlint = 0b00000100 & resp[1]; - report->rssihint = 0b00000010 & resp[1]; - report->rssilint = 0b00000001 & resp[1]; - - report->bltf = 0b10000000 & resp[2]; - report->snr_ready = 0b00100000 & resp[2]; - report->rssiready = 0b00001000 & resp[2]; - report->injside = 0b00000100 & resp[2]; - report->afcrl = 0b00000010 & resp[2]; - report->valid = 0b00000001 & resp[2]; - - report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); + report->multhint = 0x80 & resp[1]; + report->multlint = 0x40 & resp[1]; + report->snrhint = 0x08 & resp[1]; + report->snrlint = 0x04 & resp[1]; + report->rssihint = 0x02 & resp[1]; + report->rssilint = 0x01 & resp[1]; + + report->bltf = 0x80 & resp[2]; + report->snr_ready = 0x20 & resp[2]; + report->rssiready = 0x08 & resp[2]; + report->injside = 0x04 & resp[2]; + report->afcrl = 0x02 & resp[2]; + report->valid = 0x01 & resp[2]; + + report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -1329,7 +1331,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core, report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; - report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); + report->readantcap = get_unaligned_be16(resp + 13); report->assi = resp[15]; report->usn = resp[16]; @@ -1337,7 +1339,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core, report->rdsdev = resp[18]; report->assidev = resp[19]; report->strongdev = resp[20]; - report->rdspi = be16_to_cpup((__be16 *)(resp + 21)); + report->rdspi = get_unaligned_be16(resp + 21); return err; } diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index c09c28f92055..1abd5ad59925 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -154,11 +154,6 @@ static int ssc_probe(struct platform_device *pdev) ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_dbg(&pdev->dev, "no mmio resource defined\n"); - return -ENXIO; - } - ssc->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(ssc->regs)) return PTR_ERR(ssc->regs); diff --git a/drivers/misc/dummy-irq.c b/drivers/misc/dummy-irq.c index 7014167e2c61..c37eeedfe215 100644 --- a/drivers/misc/dummy-irq.c +++ b/drivers/misc/dummy-irq.c @@ -19,7 +19,7 @@ #include <linux/irq.h> #include <linux/interrupt.h> -static int irq; +static int irq = -1; static irqreturn_t dummy_interrupt(int irq, void *dev_id) { @@ -36,6 +36,10 @@ static irqreturn_t dummy_interrupt(int irq, void *dev_id) static int __init dummy_irq_init(void) { + if (irq < 0) { + printk(KERN_ERR "dummy-irq: no IRQ given. Use irq=N\n"); + return -EIO; + } if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) { printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq); return -EIO; diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 1e935eacaa7f..9ecd49a7be1b 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -496,6 +496,8 @@ int mei_cl_disable_device(struct mei_cl_device *device) } } + device->event_cb = NULL; + mutex_unlock(&dev->device_lock); if (!device->ops || !device->ops->disable) diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 7c44c8dbae42..053139f61086 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -489,11 +489,16 @@ static int mei_ioctl_connect_client(struct file *file, /* find ME client we're trying to connect to */ i = mei_me_cl_by_uuid(dev, &data->in_client_uuid); - if (i >= 0 && !dev->me_clients[i].props.fixed_address) { - cl->me_client_id = dev->me_clients[i].client_id; - cl->state = MEI_FILE_CONNECTING; + if (i < 0 || dev->me_clients[i].props.fixed_address) { + dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n", + &data->in_client_uuid); + rets = -ENODEV; + goto end; } + cl->me_client_id = dev->me_clients[i].client_id; + cl->state = MEI_FILE_CONNECTING; + dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", cl->me_client_id); dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n", @@ -527,11 +532,6 @@ static int mei_ioctl_connect_client(struct file *file, goto end; } - if (cl->state != MEI_FILE_CONNECTING) { - rets = -ENODEV; - goto end; - } - /* prepare the output buffer */ client = &data->out_client_properties; @@ -543,7 +543,6 @@ static int mei_ioctl_connect_client(struct file *file, rets = mei_cl_connect(cl, file); end: - dev_dbg(&dev->pdev->dev, "free connect cb memory."); return rets; } diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 44d273c5e19d..0535d1e0bc78 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -172,6 +172,7 @@ static long gru_get_config_info(unsigned long arg) nodesperblade = 2; else nodesperblade = 1; + memset(&info, 0, sizeof(info)); info.cpus = num_online_cpus(); info.nodes = num_online_nodes(); info.blades = info.nodes / nodesperblade; diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig index ea98f7e9ccd1..39c2ecadb273 100644 --- a/drivers/misc/vmw_vmci/Kconfig +++ b/drivers/misc/vmw_vmci/Kconfig @@ -4,7 +4,7 @@ config VMWARE_VMCI tristate "VMware VMCI Driver" - depends on X86 && PCI && NET + depends on X86 && PCI help This is VMware's Virtual Machine Communication Interface. It enables high-speed communication between host and guest in a virtual diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index d94245dbd765..8ff2e5ee8fb8 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -23,7 +23,7 @@ #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/socket.h> +#include <linux/uio.h> #include <linux/wait.h> #include <linux/vmalloc.h> diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index e75774f72606..aca59d93d5a9 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2230,10 +2230,15 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, mmc_free_host(slot->mmc); } -static bool atmci_filter(struct dma_chan *chan, void *slave) +static bool atmci_filter(struct dma_chan *chan, void *pdata) { - struct mci_dma_data *sl = slave; + struct mci_platform_data *sl_pdata = pdata; + struct mci_dma_data *sl; + if (!sl_pdata) + return false; + + sl = sl_pdata->dma_slave; if (sl && find_slave_dev(sl) == chan->device->dev) { chan->private = slave_data_ptr(sl); return true; @@ -2245,24 +2250,18 @@ static bool atmci_filter(struct dma_chan *chan, void *slave) static bool atmci_configure_dma(struct atmel_mci *host) { struct mci_platform_data *pdata; + dma_cap_mask_t mask; if (host == NULL) return false; pdata = host->pdev->dev.platform_data; - if (!pdata) - return false; + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); - if (pdata->dma_slave && find_slave_dev(pdata->dma_slave)) { - dma_cap_mask_t mask; - - /* Try to grab a DMA channel */ - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - host->dma.chan = - dma_request_channel(mask, atmci_filter, pdata->dma_slave); - } + host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata, + &host->pdev->dev, "rxtx"); if (!host->dma.chan) { dev_warn(&host->pdev->dev, "no DMA channel available\n"); return false; diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 375c109607ff..f4f3038c1df0 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1130,6 +1130,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) struct variant_data *variant = host->variant; u32 pwr = 0; unsigned long flags; + int ret; pm_runtime_get_sync(mmc_dev(mmc)); @@ -1161,8 +1162,12 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) break; case MMC_POWER_ON: if (!IS_ERR(mmc->supply.vqmmc) && - !regulator_is_enabled(mmc->supply.vqmmc)) - regulator_enable(mmc->supply.vqmmc); + !regulator_is_enabled(mmc->supply.vqmmc)) { + ret = regulator_enable(mmc->supply.vqmmc); + if (ret < 0) + dev_err(mmc_dev(mmc), + "failed to enable vqmmc regulator\n"); + } pwr |= MCI_PWR_ON; break; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 6e44025acf01..eccedc7d06a4 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -161,6 +161,7 @@ struct omap_hsmmc_host { */ struct regulator *vcc; struct regulator *vcc_aux; + int pbias_disable; void __iomem *base; resource_size_t mapbase; spinlock_t irq_lock; /* Prevent races with irq handler */ @@ -255,11 +256,11 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on, if (!host->vcc) return 0; /* - * With DT, never turn OFF the regulator. This is because + * With DT, never turn OFF the regulator for MMC1. This is because * the pbias cell programming support is still missing when * booting with Device tree */ - if (dev->of_node && !vdd) + if (host->pbias_disable && !vdd) return 0; if (mmc_slot(host).before_set_reg) @@ -1520,10 +1521,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) (ios->vdd == DUAL_VOLT_OCR_BIT) && /* * With pbias cell programming missing, this - * can't be allowed when booting with device + * can't be allowed on MMC1 when booting with device * tree. */ - !host->dev->of_node) { + !host->pbias_disable) { /* * The mmc_select_voltage fn of the core does * not seem to set the power_mode to @@ -1871,6 +1872,10 @@ static int omap_hsmmc_probe(struct platform_device *pdev) omap_hsmmc_context_save(host); + /* This can be removed once we support PBIAS with DT */ + if (host->dev->of_node && host->mapbase == 0x4809c000) + host->pbias_disable = 1; + host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); /* * MMC can still work without debounce clock. @@ -1906,33 +1911,41 @@ static int omap_hsmmc_probe(struct platform_device *pdev) omap_hsmmc_conf_bus_power(host); - res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); - if (!res) { - dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); - ret = -ENXIO; - goto err_irq; - } - tx_req = res->start; + if (!pdev->dev.of_node) { + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); + if (!res) { + dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); + ret = -ENXIO; + goto err_irq; + } + tx_req = res->start; - res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); - if (!res) { - dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); - ret = -ENXIO; - goto err_irq; + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); + if (!res) { + dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); + ret = -ENXIO; + goto err_irq; + } + rx_req = res->start; } - rx_req = res->start; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req); + host->rx_chan = + dma_request_slave_channel_compat(mask, omap_dma_filter_fn, + &rx_req, &pdev->dev, "rx"); + if (!host->rx_chan) { dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); ret = -ENXIO; goto err_irq; } - host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req); + host->tx_chan = + dma_request_slave_channel_compat(mask, omap_dma_filter_fn, + &tx_req, &pdev->dev, "tx"); + if (!host->tx_chan) { dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); ret = -ENXIO; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 7bcf74b1a5cd..706d9cb1a49e 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -87,6 +87,12 @@ static const struct sdhci_ops sdhci_acpi_ops_dflt = { .enable_dma = sdhci_acpi_enable_dma, }; +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, + .caps2 = MMC_CAP2_HC_ERASE_SZ, + .flags = SDHCI_ACPI_RUNTIME_PM, +}; + static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD, @@ -94,23 +100,67 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { .pm_caps = MMC_PM_KEEP_POWER, }; +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { +}; + +struct sdhci_acpi_uid_slot { + const char *hid; + const char *uid; + const struct sdhci_acpi_slot *slot; +}; + +static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { + { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc }, + { "80860F14" , "3" , &sdhci_acpi_slot_int_sd }, + { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio }, + { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, + { "PNP0D40" }, + { }, +}; + static const struct acpi_device_id sdhci_acpi_ids[] = { - { "INT33C6", (kernel_ulong_t)&sdhci_acpi_slot_int_sdio }, - { "PNP0D40" }, + { "80860F14" }, + { "INT33BB" }, + { "INT33C6" }, + { "PNP0D40" }, { }, }; MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); -static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid) +static const struct sdhci_acpi_slot *sdhci_acpi_get_slot_by_ids(const char *hid, + const char *uid) { - const struct acpi_device_id *id; - - for (id = sdhci_acpi_ids; id->id[0]; id++) - if (!strcmp(id->id, hid)) - return (const struct sdhci_acpi_slot *)id->driver_data; + const struct sdhci_acpi_uid_slot *u; + + for (u = sdhci_acpi_uids; u->hid; u++) { + if (strcmp(u->hid, hid)) + continue; + if (!u->uid) + return u->slot; + if (uid && !strcmp(u->uid, uid)) + return u->slot; + } return NULL; } +static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(acpi_handle handle, + const char *hid) +{ + const struct sdhci_acpi_slot *slot; + struct acpi_device_info *info; + const char *uid = NULL; + acpi_status status; + + status = acpi_get_object_info(handle, &info); + if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID)) + uid = info->unique_id.string; + + slot = sdhci_acpi_get_slot_by_ids(hid, uid); + + kfree(info); + return slot; +} + static int sdhci_acpi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -148,7 +198,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) c = sdhci_priv(host); c->host = host; - c->slot = sdhci_acpi_get_slot(hid); + c->slot = sdhci_acpi_get_slot(handle, hid); c->pdev = pdev; c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); @@ -202,6 +252,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) goto err_free; if (c->use_runtime_pm) { + pm_runtime_set_active(dev); pm_suspend_ignore_children(dev, 1); pm_runtime_set_autosuspend_delay(dev, 50); pm_runtime_use_autosuspend(dev); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 67d6dde2ff19..d5f0d59e1310 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -85,6 +85,12 @@ struct pltfm_imx_data { struct clk *clk_ipg; struct clk *clk_ahb; struct clk *clk_per; + enum { + NO_CMD_PENDING, /* no multiblock command pending*/ + MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ + WAIT_FOR_INT, /* sent CMD12, waiting for response INT */ + } multiblock_status; + }; static struct platform_device_id imx_esdhc_devtype[] = { @@ -154,6 +160,8 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i static u32 esdhc_readl_le(struct sdhci_host *host, int reg) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; u32 val = readl(host->ioaddr + reg); if (unlikely(reg == SDHCI_CAPABILITIES)) { @@ -175,6 +183,18 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; val |= SDHCI_INT_ADMA_ERROR; } + + /* + * mask off the interrupt we get in response to the manually + * sent CMD12 + */ + if ((imx_data->multiblock_status == WAIT_FOR_INT) && + ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) { + val &= ~SDHCI_INT_RESPONSE; + writel(SDHCI_INT_RESPONSE, host->ioaddr + + SDHCI_INT_STATUS); + imx_data->multiblock_status = NO_CMD_PENDING; + } } return val; @@ -211,6 +231,15 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK; writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); + + if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS) + { + /* send a manual CMD12 with RESPTYP=none */ + data = MMC_STOP_TRANSMISSION << 24 | + SDHCI_CMD_ABORTCMD << 16; + writel(data, host->ioaddr + SDHCI_TRANSFER_MODE); + imx_data->multiblock_status = WAIT_FOR_INT; + } } if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { @@ -277,11 +306,13 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) } return; case SDHCI_COMMAND: - if ((host->cmd->opcode == MMC_STOP_TRANSMISSION || - host->cmd->opcode == MMC_SET_BLOCK_COUNT) && - (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) + if (host->cmd->opcode == MMC_STOP_TRANSMISSION) val |= SDHCI_CMD_ABORTCMD; + if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) && + (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) + imx_data->multiblock_status = MULTIBLK_IN_PROCESS; + if (is_imx6q_usdhc(imx_data)) writel(val << 16, host->ioaddr + SDHCI_TRANSFER_MODE); @@ -324,8 +355,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) /* * Do not touch buswidth bits here. This is done in * esdhc_pltfm_bus_width. + * Do not touch the D3CD bit either which is used for the + * SDIO interrupt errata workaround. */ - mask = 0xffff & ~ESDHC_CTRL_BUSWIDTH_MASK; + mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD); esdhc_clrset_le(host, mask, new_val, reg); return; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 0012d3fdc999..701d06d0e1fb 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -33,6 +33,9 @@ */ #define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 #define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a +#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14 +#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15 +#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16 /* * PCI registers @@ -304,6 +307,33 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { .probe_slot = pch_hc_probe_slot, }; +static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; + slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; + return 0; +} + +static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; + return 0; +} + +static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { + .allow_runtime_pm = true, + .probe_slot = byt_emmc_probe_slot, +}; + +static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, + .allow_runtime_pm = true, + .probe_slot = byt_sdio_probe_slot, +}; + +static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { +}; + /* O2Micro extra registers */ #define O2_SD_LOCK_WP 0xD3 #define O2_SD_MULTI_VCC3V 0xEE @@ -856,6 +886,30 @@ static const struct pci_device_id pci_ids[] = { }, { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_BYT_EMMC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_BYT_SDIO, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_BYT_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd, + }, + + { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8120, .subvendor = PCI_ANY_ID, diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index a94facb46e5c..fd1df5e13ae4 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -672,11 +672,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) } rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (rc == NULL) { - dev_err(&pdev->dev, "No memory resource found for device!\r\n"); - return -ENXIO; - } - host->io_base = devm_ioremap_resource(&pdev->dev, rc); if (IS_ERR(host->io_base)) return PTR_ERR(host->io_base); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index fc58d118d844..390061d09693 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2360,14 +2360,15 @@ int bond_3ad_set_carrier(struct bonding *bond) } /** - * bond_3ad_get_active_agg_info - get information of the active aggregator + * __bond_3ad_get_active_agg_info - get information of the active aggregator * @bond: bonding struct to work on * @ad_info: ad_info struct to fill with the bond's info * * Returns: 0 on success * < 0 on error */ -int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) +int __bond_3ad_get_active_agg_info(struct bonding *bond, + struct ad_info *ad_info) { struct aggregator *aggregator = NULL; struct port *port; @@ -2391,6 +2392,18 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) return -1; } +/* Wrapper used to hold bond->lock so no slave manipulation can occur */ +int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) +{ + int ret; + + read_lock(&bond->lock); + ret = __bond_3ad_get_active_agg_info(bond, ad_info); + read_unlock(&bond->lock); + + return ret; +} + int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) { struct slave *slave, *start_at; @@ -2402,8 +2415,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) struct ad_info ad_info; int res = 1; - if (bond_3ad_get_active_agg_info(bond, &ad_info)) { - pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n", + if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { + pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n", dev->name); goto out; } diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 0cfaa4afdece..5d91ad0cc041 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -273,6 +273,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave); void bond_3ad_adapter_duplex_changed(struct slave *slave); void bond_3ad_handle_link_change(struct slave *slave, char link); int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); +int __bond_3ad_get_active_agg_info(struct bonding *bond, + struct ad_info *ad_info); int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d0aade04e49a..29b846cbfb48 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1362,6 +1362,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev, slave->dev->features, mask); } + features = netdev_add_tso_features(features, mask); out: read_unlock(&bond->lock); @@ -2555,8 +2556,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ { struct sk_buff *skb; - pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op, - slave_dev->name, dest_ip, src_ip, vlan_id); + pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op, + slave_dev->name, &dest_ip, &src_ip, vlan_id); skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, NULL, slave_dev->dev_addr, NULL); @@ -2588,7 +2589,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) __be32 addr; if (!targets[i]) break; - pr_debug("basa: target %x\n", targets[i]); + pr_debug("basa: target %pI4\n", &targets[i]); if (!bond_vlan_used(bond)) { pr_debug("basa: empty vlan: arp_send\n"); addr = bond_confirm_addr(bond->dev, targets[i], 0); @@ -4470,7 +4471,7 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl) static int bond_check_params(struct bond_params *params) { - int arp_validate_value, fail_over_mac_value, primary_reselect_value; + int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; /* * Convert string parameters. @@ -4650,19 +4651,18 @@ static int bond_check_params(struct bond_params *params) arp_interval = BOND_LINK_ARP_INTERV; } - for (arp_ip_count = 0; - (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[arp_ip_count]; - arp_ip_count++) { + for (arp_ip_count = 0, i = 0; + (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { /* not complete check, but should be good enough to catch mistakes */ - __be32 ip = in_aton(arp_ip_target[arp_ip_count]); - if (!isdigit(arp_ip_target[arp_ip_count][0]) || - ip == 0 || ip == htonl(INADDR_BROADCAST)) { + __be32 ip = in_aton(arp_ip_target[i]); + if (!isdigit(arp_ip_target[i][0]) || ip == 0 || + ip == htonl(INADDR_BROADCAST)) { pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", - arp_ip_target[arp_ip_count]); + arp_ip_target[i]); arp_interval = 0; } else { - arp_target[arp_ip_count] = ip; + arp_target[arp_ip_count++] = ip; } } @@ -4696,8 +4696,6 @@ static int bond_check_params(struct bond_params *params) if (miimon) { pr_info("MII link monitoring set to %d ms\n", miimon); } else if (arp_interval) { - int i; - pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):", arp_interval, arp_validate_tbl[arp_validate_value].modename, diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 94d06f1307b8..4060d41f0ee7 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -130,7 +130,7 @@ static void bond_info_show_master(struct seq_file *seq) seq_printf(seq, "Aggregator selection policy (ad_select): %s\n", ad_select_tbl[bond->params.ad_select].modename); - if (bond_3ad_get_active_agg_info(bond, &ad_info)) { + if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { seq_printf(seq, "bond %s has no active aggregator\n", bond->dev->name); } else { diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index ea7a388f4843..d7434e0a610e 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -316,6 +316,9 @@ static ssize_t bonding_store_mode(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); + if (bond->dev->flags & IFF_UP) { pr_err("unable to update mode of %s because interface is up.\n", bond->dev->name); @@ -352,6 +355,7 @@ static ssize_t bonding_store_mode(struct device *d, bond->dev->name, bond_mode_tbl[new_value].modename, new_value); out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, @@ -1315,7 +1319,6 @@ static ssize_t bonding_show_mii_status(struct device *d, } static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); - /* * Show current 802.3ad aggregator ID. */ @@ -1329,7 +1332,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - (bond_3ad_get_active_agg_info(bond, &ad_info)) + bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.aggregator_id); } @@ -1351,7 +1354,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - (bond_3ad_get_active_agg_info(bond, &ad_info)) + bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.ports); } @@ -1373,7 +1376,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - (bond_3ad_get_active_agg_info(bond, &ad_info)) + bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.actor_key); } @@ -1395,7 +1398,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - (bond_3ad_get_active_agg_info(bond, &ad_info)) + bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.partner_key); } diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index 7ffc756131a2..547098086773 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -43,7 +43,7 @@ config CAIF_HSI config CAIF_VIRTIO tristate "CAIF virtio transport driver" - depends on CAIF + depends on CAIF && HAS_DMA select VHOST_RING select VIRTIO select GENERIC_ALLOCATOR diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 9b74d1e3ad44..6aa7b3266c80 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -612,9 +612,15 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv) { struct esd_usb2 *dev = priv->usb2; struct net_device *netdev = priv->netdev; - struct esd_usb2_msg msg; + struct esd_usb2_msg *msg; int err, i; + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) { + err = -ENOMEM; + goto out; + } + /* * Enable all IDs * The IDADD message takes up to 64 32 bit bitmasks (2048 bits). @@ -628,33 +634,32 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv) * the number of the starting bitmask (0..64) to the filter.option * field followed by only some bitmasks. */ - msg.msg.hdr.cmd = CMD_IDADD; - msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; - msg.msg.filter.net = priv->index; - msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ + msg->msg.hdr.cmd = CMD_IDADD; + msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; + msg->msg.filter.net = priv->index; + msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ for (i = 0; i < ESD_MAX_ID_SEGMENT; i++) - msg.msg.filter.mask[i] = cpu_to_le32(0xffffffff); + msg->msg.filter.mask[i] = cpu_to_le32(0xffffffff); /* enable 29bit extended IDs */ - msg.msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001); + msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001); - err = esd_usb2_send_msg(dev, &msg); + err = esd_usb2_send_msg(dev, msg); if (err) - goto failed; + goto out; err = esd_usb2_setup_rx_urbs(dev); if (err) - goto failed; + goto out; priv->can.state = CAN_STATE_ERROR_ACTIVE; - return 0; - -failed: +out: if (err == -ENODEV) netif_device_detach(netdev); + if (err) + netdev_err(netdev, "couldn't start device: %d\n", err); - netdev_err(netdev, "couldn't start device: %d\n", err); - + kfree(msg); return err; } @@ -833,26 +838,30 @@ nourbmem: static int esd_usb2_close(struct net_device *netdev) { struct esd_usb2_net_priv *priv = netdev_priv(netdev); - struct esd_usb2_msg msg; + struct esd_usb2_msg *msg; int i; + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + /* Disable all IDs (see esd_usb2_start()) */ - msg.msg.hdr.cmd = CMD_IDADD; - msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; - msg.msg.filter.net = priv->index; - msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ + msg->msg.hdr.cmd = CMD_IDADD; + msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; + msg->msg.filter.net = priv->index; + msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++) - msg.msg.filter.mask[i] = 0; - if (esd_usb2_send_msg(priv->usb2, &msg) < 0) + msg->msg.filter.mask[i] = 0; + if (esd_usb2_send_msg(priv->usb2, msg) < 0) netdev_err(netdev, "sending idadd message failed\n"); /* set CAN controller to reset mode */ - msg.msg.hdr.len = 2; - msg.msg.hdr.cmd = CMD_SETBAUD; - msg.msg.setbaud.net = priv->index; - msg.msg.setbaud.rsvd = 0; - msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE); - if (esd_usb2_send_msg(priv->usb2, &msg) < 0) + msg->msg.hdr.len = 2; + msg->msg.hdr.cmd = CMD_SETBAUD; + msg->msg.setbaud.net = priv->index; + msg->msg.setbaud.rsvd = 0; + msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE); + if (esd_usb2_send_msg(priv->usb2, msg) < 0) netdev_err(netdev, "sending setbaud message failed\n"); priv->can.state = CAN_STATE_STOPPED; @@ -861,6 +870,8 @@ static int esd_usb2_close(struct net_device *netdev) close_candev(netdev); + kfree(msg); + return 0; } @@ -886,7 +897,8 @@ static int esd_usb2_set_bittiming(struct net_device *netdev) { struct esd_usb2_net_priv *priv = netdev_priv(netdev); struct can_bittiming *bt = &priv->can.bittiming; - struct esd_usb2_msg msg; + struct esd_usb2_msg *msg; + int err; u32 canbtr; int sjw_shift; @@ -912,15 +924,22 @@ static int esd_usb2_set_bittiming(struct net_device *netdev) if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) canbtr |= ESD_USB2_3_SAMPLES; - msg.msg.hdr.len = 2; - msg.msg.hdr.cmd = CMD_SETBAUD; - msg.msg.setbaud.net = priv->index; - msg.msg.setbaud.rsvd = 0; - msg.msg.setbaud.baud = cpu_to_le32(canbtr); + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->msg.hdr.len = 2; + msg->msg.hdr.cmd = CMD_SETBAUD; + msg->msg.setbaud.net = priv->index; + msg->msg.setbaud.rsvd = 0; + msg->msg.setbaud.baud = cpu_to_le32(canbtr); netdev_info(netdev, "setting BTR=%#x\n", canbtr); - return esd_usb2_send_msg(priv->usb2, &msg); + err = esd_usb2_send_msg(priv->usb2, msg); + + kfree(msg); + return err; } static int esd_usb2_get_berr_counter(const struct net_device *netdev, @@ -1022,7 +1041,7 @@ static int esd_usb2_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct esd_usb2 *dev; - struct esd_usb2_msg msg; + struct esd_usb2_msg *msg; int i, err; dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -1037,27 +1056,33 @@ static int esd_usb2_probe(struct usb_interface *intf, usb_set_intfdata(intf, dev); + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) { + err = -ENOMEM; + goto free_msg; + } + /* query number of CAN interfaces (nets) */ - msg.msg.hdr.cmd = CMD_VERSION; - msg.msg.hdr.len = 2; - msg.msg.version.rsvd = 0; - msg.msg.version.flags = 0; - msg.msg.version.drv_version = 0; + msg->msg.hdr.cmd = CMD_VERSION; + msg->msg.hdr.len = 2; + msg->msg.version.rsvd = 0; + msg->msg.version.flags = 0; + msg->msg.version.drv_version = 0; - err = esd_usb2_send_msg(dev, &msg); + err = esd_usb2_send_msg(dev, msg); if (err < 0) { dev_err(&intf->dev, "sending version message failed\n"); - goto free_dev; + goto free_msg; } - err = esd_usb2_wait_msg(dev, &msg); + err = esd_usb2_wait_msg(dev, msg); if (err < 0) { dev_err(&intf->dev, "no version message answer\n"); - goto free_dev; + goto free_msg; } - dev->net_count = (int)msg.msg.version_reply.nets; - dev->version = le32_to_cpu(msg.msg.version_reply.version); + dev->net_count = (int)msg->msg.version_reply.nets; + dev->version = le32_to_cpu(msg->msg.version_reply.version); if (device_create_file(&intf->dev, &dev_attr_firmware)) dev_err(&intf->dev, @@ -1075,10 +1100,10 @@ static int esd_usb2_probe(struct usb_interface *intf, for (i = 0; i < dev->net_count; i++) esd_usb2_probe_one_net(intf, i); - return 0; - -free_dev: - kfree(dev); +free_msg: + kfree(msg); + if (err) + kfree(dev); done: return err; } diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 45cb9f3c1324..3b9546588240 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -136,6 +136,9 @@ #define KVASER_CTRL_MODE_SELFRECEPTION 3 #define KVASER_CTRL_MODE_OFF 4 +/* log message */ +#define KVASER_EXTENDED_FRAME BIT(31) + struct kvaser_msg_simple { u8 tid; u8 channel; @@ -817,8 +820,13 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, priv = dev->nets[channel]; stats = &priv->netdev->stats; - if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR | - MSG_FLAG_OVERRUN)) { + if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) && + (msg->id == CMD_LOG_MESSAGE)) { + kvaser_usb_rx_error(dev, msg); + return; + } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | + MSG_FLAG_NERR | + MSG_FLAG_OVERRUN)) { kvaser_usb_rx_can_err(priv, msg); return; } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) { @@ -834,22 +842,40 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, return; } - cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) | - (msg->u.rx_can.msg[1] & 0x3f); - cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]); + if (msg->id == CMD_LOG_MESSAGE) { + cf->can_id = le32_to_cpu(msg->u.log_message.id); + if (cf->can_id & KVASER_EXTENDED_FRAME) + cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; + else + cf->can_id &= CAN_SFF_MASK; - if (msg->id == CMD_RX_EXT_MESSAGE) { - cf->can_id <<= 18; - cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) | - ((msg->u.rx_can.msg[3] & 0xff) << 6) | - (msg->u.rx_can.msg[4] & 0x3f); - cf->can_id |= CAN_EFF_FLAG; - } + cf->can_dlc = get_can_dlc(msg->u.log_message.dlc); - if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME) - cf->can_id |= CAN_RTR_FLAG; - else - memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc); + if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, &msg->u.log_message.data, + cf->can_dlc); + } else { + cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) | + (msg->u.rx_can.msg[1] & 0x3f); + + if (msg->id == CMD_RX_EXT_MESSAGE) { + cf->can_id <<= 18; + cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) | + ((msg->u.rx_can.msg[3] & 0xff) << 6) | + (msg->u.rx_can.msg[4] & 0x3f); + cf->can_id |= CAN_EFF_FLAG; + } + + cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]); + + if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, &msg->u.rx_can.msg[6], + cf->can_dlc); + } netif_rx(skb); @@ -911,6 +937,7 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev, case CMD_RX_STD_MESSAGE: case CMD_RX_EXT_MESSAGE: + case CMD_LOG_MESSAGE: kvaser_usb_rx_can_msg(dev, msg); break; @@ -919,11 +946,6 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev, kvaser_usb_rx_error(dev, msg); break; - case CMD_LOG_MESSAGE: - if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME) - kvaser_usb_rx_error(dev, msg); - break; - case CMD_TX_ACKNOWLEDGE: kvaser_usb_tx_acknowledge(dev, msg); break; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 30d79bfa5b10..8ee9d1556e6e 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -504,15 +504,24 @@ static int pcan_usb_pro_restart_async(struct peak_usb_device *dev, return usb_submit_urb(urb, GFP_ATOMIC); } -static void pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) +static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) { - u8 buffer[16]; + u8 *buffer; + int err; + + buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL); + if (!buffer) + return -ENOMEM; buffer[0] = 0; buffer[1] = !!loaded; - pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT, - PCAN_USBPRO_FCT_DRVLD, buffer, sizeof(buffer)); + err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT, + PCAN_USBPRO_FCT_DRVLD, buffer, + PCAN_USBPRO_FCT_DRVLD_REQ_LEN); + kfree(buffer); + + return err; } static inline @@ -851,21 +860,24 @@ static int pcan_usb_pro_stop(struct peak_usb_device *dev) */ static int pcan_usb_pro_init(struct peak_usb_device *dev) { - struct pcan_usb_pro_interface *usb_if; struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); + struct pcan_usb_pro_interface *usb_if = NULL; + struct pcan_usb_pro_fwinfo *fi = NULL; + struct pcan_usb_pro_blinfo *bi = NULL; + int err; /* do this for 1st channel only */ if (!dev->prev_siblings) { - struct pcan_usb_pro_fwinfo fi; - struct pcan_usb_pro_blinfo bi; - int err; - /* allocate netdevices common structure attached to first one */ usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface), GFP_KERNEL); - if (!usb_if) - return -ENOMEM; + fi = kmalloc(sizeof(struct pcan_usb_pro_fwinfo), GFP_KERNEL); + bi = kmalloc(sizeof(struct pcan_usb_pro_blinfo), GFP_KERNEL); + if (!usb_if || !fi || !bi) { + err = -ENOMEM; + goto err_out; + } /* number of ts msgs to ignore before taking one into account */ usb_if->cm_ignore_count = 5; @@ -877,34 +889,34 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) */ err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_FW, - &fi, sizeof(fi)); + fi, sizeof(*fi)); if (err) { - kfree(usb_if); dev_err(dev->netdev->dev.parent, "unable to read %s firmware info (err %d)\n", pcan_usb_pro.name, err); - return err; + goto err_out; } err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_BL, - &bi, sizeof(bi)); + bi, sizeof(*bi)); if (err) { - kfree(usb_if); dev_err(dev->netdev->dev.parent, "unable to read %s bootloader info (err %d)\n", pcan_usb_pro.name, err); - return err; + goto err_out; } + /* tell the device the can driver is running */ + err = pcan_usb_pro_drv_loaded(dev, 1); + if (err) + goto err_out; + dev_info(dev->netdev->dev.parent, "PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n", pcan_usb_pro.name, - bi.hw_rev, bi.serial_num_hi, bi.serial_num_lo, + bi->hw_rev, bi->serial_num_hi, bi->serial_num_lo, pcan_usb_pro.ctrl_count); - - /* tell the device the can driver is running */ - pcan_usb_pro_drv_loaded(dev, 1); } else { usb_if = pcan_usb_pro_dev_if(dev->prev_siblings); } @@ -916,6 +928,13 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) pcan_usb_pro_set_led(dev, 0, 1); return 0; + + err_out: + kfree(bi); + kfree(fi); + kfree(usb_if); + + return err; } static void pcan_usb_pro_exit(struct peak_usb_device *dev) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h index a869918c5620..32275af547e0 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h @@ -29,6 +29,7 @@ /* Vendor Request value for XXX_FCT */ #define PCAN_USBPRO_FCT_DRVLD 5 /* tell device driver is loaded */ +#define PCAN_USBPRO_FCT_DRVLD_REQ_LEN 16 /* PCAN_USBPRO_INFO_BL vendor request record type */ struct __packed pcan_usb_pro_blinfo { diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index de570a8f8967..072c6f14e8fc 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -632,7 +632,6 @@ struct vortex_private { pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ open:1, medialock:1, - must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ large_frames:1, /* accept large frames */ handling_irq:1; /* private in_irq indicator */ /* {get|set}_wol operations are already serialized by rtnl. @@ -1012,6 +1011,12 @@ static int vortex_init_one(struct pci_dev *pdev, if (rc < 0) goto out; + rc = pci_request_regions(pdev, DRV_NAME); + if (rc < 0) { + pci_disable_device(pdev); + goto out; + } + unit = vortex_cards_found; if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { @@ -1027,6 +1032,7 @@ static int vortex_init_one(struct pci_dev *pdev, if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { + pci_release_regions(pdev); pci_disable_device(pdev); rc = -ENOMEM; goto out; @@ -1036,6 +1042,7 @@ static int vortex_init_one(struct pci_dev *pdev, ent->driver_data, unit); if (rc < 0) { pci_iounmap(pdev, ioaddr); + pci_release_regions(pdev); pci_disable_device(pdev); goto out; } @@ -1178,11 +1185,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, /* PCI-only startup logic */ if (pdev) { - /* EISA resources already marked, so only PCI needs to do this here */ - /* Ignore return value, because Cardbus drivers already allocate for us */ - if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) - vp->must_free_region = 1; - /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) pci_set_master(pdev); @@ -1220,7 +1222,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, &vp->rx_ring_dma); retval = -ENOMEM; if (!vp->rx_ring) - goto free_region; + goto free_device; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; @@ -1484,9 +1486,7 @@ free_ring: + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); -free_region: - if (vp->must_free_region) - release_region(dev->base_addr, vci->io_size); +free_device: free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); out: @@ -3254,8 +3254,9 @@ static void vortex_remove_one(struct pci_dev *pdev) + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); - if (vp->must_free_region) - release_region(dev->base_addr, vp->io_size); + + pci_release_regions(pdev); + free_netdev(dev); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b8fbe266ab68..638e55435b04 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3192,11 +3192,11 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) rc |= XMIT_CSUM_TCP; if (skb_is_gso_v6(skb)) { - rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); + rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); if (rc & XMIT_CSUM_ENC) rc |= XMIT_GSO_ENC_V6; } else if (skb_is_gso(skb)) { - rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); + rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); if (rc & XMIT_CSUM_ENC) rc |= XMIT_GSO_ENC_V4; } @@ -3313,6 +3313,7 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, */ static void bnx2x_set_pbd_gso(struct sk_buff *skb, struct eth_tx_parse_bd_e1x *pbd, + struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) { pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); @@ -3326,11 +3327,14 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); - } else + /* GSO on 57710/57711 needs FW to calculate IP checksum */ + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; + } else { pbd->tcp_pseudo_csum = bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); + } pbd->global_data |= cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); @@ -3479,19 +3483,18 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, { u16 hlen_w = 0; u8 outerip_off, outerip_len = 0; + /* from outer IP to transport */ hlen_w = (skb_inner_transport_header(skb) - skb_network_header(skb)) >> 1; /* transport len */ - if (xmit_type & XMIT_CSUM_TCP) - hlen_w += inner_tcp_hdrlen(skb) >> 1; - else - hlen_w += sizeof(struct udphdr) >> 1; + hlen_w += inner_tcp_hdrlen(skb) >> 1; pbd2->fw_ip_hdr_to_payload_w = hlen_w; - if (xmit_type & XMIT_CSUM_ENC_V4) { + /* outer IP header info */ + if (xmit_type & XMIT_CSUM_V4) { struct iphdr *iph = ip_hdr(skb); pbd2->fw_ip_csum_wo_len_flags_frag = bswab16(csum_fold((~iph->check) - @@ -3814,7 +3817,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, xmit_type); else - bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); + bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type); } /* Set the PBD's parsing_data field if not zero diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 728d42ab2a76..0f493c8dc28b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 131 +#define TG3_MIN_NUM 132 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "April 09, 2013" +#define DRV_MODULE_RELDATE "May 21, 2013" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -2957,6 +2957,31 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) return 0; } +static bool tg3_phy_power_bug(struct tg3 *tp) +{ + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5700: + case ASIC_REV_5704: + return true; + case ASIC_REV_5780: + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + return true; + return false; + case ASIC_REV_5717: + if (!tp->pci_fn) + return true; + return false; + case ASIC_REV_5719: + case ASIC_REV_5720: + if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !tp->pci_fn) + return true; + return false; + } + + return false; +} + static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; @@ -3016,12 +3041,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) /* The PHY should not be powered down on some chips because * of bugs. */ - if (tg3_asic_rev(tp) == ASIC_REV_5700 || - tg3_asic_rev(tp) == ASIC_REV_5704 || - (tg3_asic_rev(tp) == ASIC_REV_5780 && - (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) || - (tg3_asic_rev(tp) == ASIC_REV_5717 && - !tp->pci_fn)) + if (tg3_phy_power_bug(tp)) return; if (tg3_chip_rev(tp) == CHIPREV_5784_AX || @@ -7428,6 +7448,20 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) return (base > 0xffffdcc0) && (base + len + 8 < base); } +/* Test for TSO DMA buffers that cross into regions which are within MSS bytes + * of any 4GB boundaries: 4G, 8G, etc + */ +static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, + u32 len, u32 mss) +{ + if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { + u32 base = (u32) mapping & 0xffffffff; + + return ((base + len + (mss & 0x3fff)) < base); + } + return 0; +} + /* Test for DMA addresses > 40-bit */ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, int len) @@ -7464,6 +7498,9 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, if (tg3_4g_overflow_test(map, len)) hwbug = true; + if (tg3_4g_tso_overflow_test(tp, map, len, mss)) + hwbug = true; + if (tg3_40bit_overflow_test(tp, map, len)) hwbug = true; @@ -8874,6 +8911,10 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_halt_cpu(tp, RX_CPU_BASE); } + err = tg3_poll_fw(tp); + if (err) + return err; + tw32(GRC_MODE, tp->grc_mode); if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { @@ -8904,10 +8945,6 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); - err = tg3_poll_fw(tp); - if (err) - return err; - tg3_mdio_start(tp); if (tg3_flag(tp, PCI_EXPRESS) && @@ -9431,6 +9468,14 @@ static void tg3_rss_write_indir_tbl(struct tg3 *tp) } } +static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) +{ + if (tg3_asic_rev(tp) == ASIC_REV_5719) + return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; + else + return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; +} + /* tp->lock is held. */ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) { @@ -10116,16 +10161,17 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) tw32_f(RDMAC_MODE, rdmac_mode); udelay(40); - if (tg3_asic_rev(tp) == ASIC_REV_5719) { + if (tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) { for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) break; } if (i < TG3_NUM_RDMA_CHANNELS) { val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); - val |= TG3_LSO_RD_DMA_TX_LENGTH_WA; + val |= tg3_lso_rd_dma_workaround_bit(tp); tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); - tg3_flag_set(tp, 5719_RDMA_BUG); + tg3_flag_set(tp, 5719_5720_RDMA_BUG); } } @@ -10489,15 +10535,15 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp) TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); - if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) && + if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { u32 val; val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); - val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA; + val &= ~tg3_lso_rd_dma_workaround_bit(tp); tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); - tg3_flag_clear(tp, 5719_RDMA_BUG); + tg3_flag_clear(tp, 5719_5720_RDMA_BUG); } TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 9b2d3ac2474a..ff6e30eeae35 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -1422,7 +1422,8 @@ #define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000 -#define TG3_LSO_RD_DMA_TX_LENGTH_WA 0x02000000 +#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719 0x02000000 +#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720 0x00200000 /* 0x4914 --> 0x4be0 unused */ #define TG3_NUM_RDMA_CHANNELS 4 @@ -3059,7 +3060,7 @@ enum TG3_FLAGS { TG3_FLAG_APE_HAS_NCSI, TG3_FLAG_TX_TSTAMP_EN, TG3_FLAG_4K_FIFO_LIMIT, - TG3_FLAG_5719_RDMA_BUG, + TG3_FLAG_5719_5720_RDMA_BUG, TG3_FLAG_RESET_TASK_PENDING, TG3_FLAG_PTP_CAPABLE, TG3_FLAG_5705_PLUS, diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ce4a030d3d0c..07f7ef05c3f2 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3236,9 +3236,10 @@ bnad_init(struct bnad *bnad, sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); bnad->work_q = create_singlethread_workqueue(bnad->wq_name); - - if (!bnad->work_q) + if (!bnad->work_q) { + iounmap(bnad->bar0); return -ENOMEM; + } return 0; } diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 1194446f859a..768285ec10f4 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -22,7 +22,7 @@ if NET_CADENCE config ARM_AT91_ETHER tristate "AT91RM9200 Ethernet support" - depends on GENERIC_HARDIRQS + depends on GENERIC_HARDIRQS && HAS_DMA select NET_CORE select MACB ---help--- @@ -31,6 +31,7 @@ config ARM_AT91_ETHER config MACB tristate "Cadence MACB/GEM support" + depends on HAS_DMA select PHYLIB ---help--- The Cadence MACB ethernet interface is found on many Atmel AT32 and diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 6be513deb17f..c89aa41dd448 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -485,7 +485,8 @@ static void macb_tx_interrupt(struct macb *bp) status = macb_readl(bp, TSR); macb_writel(bp, TSR, status); - macb_writel(bp, ISR, MACB_BIT(TCOMP)); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(TCOMP)); netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", (unsigned long)status); @@ -738,7 +739,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) * now. */ macb_writel(bp, IDR, MACB_RX_INT_FLAGS); - macb_writel(bp, ISR, MACB_BIT(RCOMP)); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(RCOMP)); if (napi_schedule_prep(&bp->napi)) { netdev_vdbg(bp->dev, "scheduling RX softirq\n"); @@ -1062,6 +1064,17 @@ static void macb_configure_dma(struct macb *bp) } } +/* + * Configure peripheral capacities according to integration options used + */ +static void macb_configure_caps(struct macb *bp) +{ + if (macb_is_gem(bp)) { + if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0) + bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; + } +} + static void macb_init_hw(struct macb *bp) { u32 config; @@ -1084,6 +1097,7 @@ static void macb_init_hw(struct macb *bp) bp->duplex = DUPLEX_HALF; macb_configure_dma(bp); + macb_configure_caps(bp); /* Initialize TX and RX buffers */ macb_writel(bp, RBQP, bp->rx_ring_dma); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 993d70380688..548c0ecae869 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -300,6 +300,8 @@ #define MACB_REV_SIZE 16 /* Bitfields in DCFG1. */ +#define GEM_IRQCOR_OFFSET 23 +#define GEM_IRQCOR_SIZE 1 #define GEM_DBWDEF_OFFSET 25 #define GEM_DBWDEF_SIZE 3 @@ -323,6 +325,9 @@ #define MACB_MAN_READ 2 #define MACB_MAN_CODE 2 +/* Capability mask bits */ +#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x1 + /* Bit manipulation macros */ #define MACB_BIT(name) \ (1 << MACB_##name##_OFFSET) @@ -574,6 +579,8 @@ struct macb { unsigned int speed; unsigned int duplex; + u32 caps; + phy_interface_t phy_interface; /* AT91RM9200 transmit */ diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig index aba435c3d4ae..184a063bed5f 100644 --- a/drivers/net/ethernet/calxeda/Kconfig +++ b/drivers/net/ethernet/calxeda/Kconfig @@ -1,6 +1,6 @@ config NET_CALXEDA_XGMAC tristate "Calxeda 1G/10G XGMAC Ethernet driver" - depends on HAS_IOMEM + depends on HAS_IOMEM && HAS_DMA select CRC32 help This is the driver for the XGMAC Ethernet IP block found on Calxeda diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index f544b297c9ab..0a510684e468 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -262,6 +262,7 @@ struct be_rx_compl_info { u8 ipv6; u8 vtm; u8 pkt_type; + u8 ip_frag; }; struct be_rx_obj { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index fd7b547698ab..1db2df61b8af 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -562,7 +562,7 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter) resource_error = lancer_provisioning_error(adapter); if (resource_error) - return -1; + return -EAGAIN; status = lancer_wait_ready(adapter); if (!status) { @@ -590,8 +590,8 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter) * when PF provisions resources. */ resource_error = lancer_provisioning_error(adapter); - if (status == -1 && !resource_error) - adapter->eeh_error = true; + if (resource_error) + status = -EAGAIN; return status; } @@ -2976,22 +2976,17 @@ static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count, for (i = 0; i < desc_count; i++) { desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE; if (((void *)desc + desc->desc_len) > - (void *)(buf + max_buf_size)) { - desc = NULL; - break; - } + (void *)(buf + max_buf_size)) + return NULL; if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1) - break; + return desc; desc = (void *)desc + desc->desc_len; } - if (!desc || i == MAX_RESOURCE_DESC) - return NULL; - - return desc; + return NULL; } /* Uses Mbox */ diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 3c1099b47f2a..8780183c6d1c 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -356,7 +356,7 @@ struct amap_eth_rx_compl_v0 { u8 ip_version; /* dword 1 */ u8 macdst[6]; /* dword 1 */ u8 vtp; /* dword 1 */ - u8 rsvd0; /* dword 1 */ + u8 ip_frag; /* dword 1 */ u8 fragndx[10]; /* dword 1 */ u8 ct[2]; /* dword 1 */ u8 sw; /* dword 1 */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a444110b060f..8bc1b21b1c79 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -780,26 +780,18 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, if (unlikely(!skb)) return skb; - if (vlan_tx_tag_present(skb)) { + if (vlan_tx_tag_present(skb)) vlan_tag = be_get_tx_vlan_tag(adapter, skb); - skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - if (skb) - skb->vlan_tci = 0; - } - - if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { - if (!vlan_tag) - vlan_tag = adapter->pvid; - if (skip_hw_vlan) - *skip_hw_vlan = true; - } + else if (qnq_async_evt_rcvd(adapter) && adapter->pvid) + vlan_tag = adapter->pvid; if (vlan_tag) { skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); if (unlikely(!skb)) return skb; - skb->vlan_tci = 0; + if (skip_hw_vlan) + *skip_hw_vlan = true; } /* Insert the outer VLAN, if any */ @@ -1607,6 +1599,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, compl); } rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); + rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, + ip_frag, compl); } static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) @@ -1628,6 +1622,9 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) else be_parse_rx_compl_v0(compl, rxcp); + if (rxcp->ip_frag) + rxcp->l4_csum = 0; + if (rxcp->vlanf) { /* vlanf could be wrongly set in some cards. * ignore if vtm is not set */ @@ -2176,7 +2173,7 @@ static irqreturn_t be_msix(int irq, void *dev) static inline bool do_gro(struct be_rx_compl_info *rxcp) { - return (rxcp->tcpf && !rxcp->err) ? true : false; + return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false; } static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, @@ -4101,6 +4098,7 @@ static int be_get_initial_config(struct be_adapter *adapter) static int lancer_recover_func(struct be_adapter *adapter) { + struct device *dev = &adapter->pdev->dev; int status; status = lancer_test_and_set_rdy_state(adapter); @@ -4112,8 +4110,7 @@ static int lancer_recover_func(struct be_adapter *adapter) be_clear(adapter); - adapter->hw_error = false; - adapter->fw_timeout = false; + be_clear_all_error(adapter); status = be_setup(adapter); if (status) @@ -4125,13 +4122,13 @@ static int lancer_recover_func(struct be_adapter *adapter) goto err; } - dev_err(&adapter->pdev->dev, - "Adapter SLIPORT recovery succeeded\n"); + dev_err(dev, "Error recovery successful\n"); return 0; err: - if (adapter->eeh_error) - dev_err(&adapter->pdev->dev, - "Adapter SLIPORT recovery failed\n"); + if (status == -EAGAIN) + dev_err(dev, "Waiting for resource provisioning\n"); + else + dev_err(dev, "Error recovery failed\n"); return status; } @@ -4140,28 +4137,27 @@ static void be_func_recovery_task(struct work_struct *work) { struct be_adapter *adapter = container_of(work, struct be_adapter, func_recovery_work.work); - int status; + int status = 0; be_detect_error(adapter); if (adapter->hw_error && lancer_chip(adapter)) { - if (adapter->eeh_error) - goto out; - rtnl_lock(); netif_device_detach(adapter->netdev); rtnl_unlock(); status = lancer_recover_func(adapter); - if (!status) netif_device_attach(adapter->netdev); } -out: - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); + /* In Lancer, for all errors other than provisioning error (-EAGAIN), + * no need to attempt further recovery. + */ + if (!status || status == -EAGAIN) + schedule_delayed_work(&adapter->func_recovery_work, + msecs_to_jiffies(1000)); } static void be_worker(struct work_struct *work) @@ -4444,20 +4440,19 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, dev_err(&adapter->pdev->dev, "EEH error detected\n"); - adapter->eeh_error = true; - - cancel_delayed_work_sync(&adapter->func_recovery_work); + if (!adapter->eeh_error) { + adapter->eeh_error = true; - rtnl_lock(); - netif_device_detach(netdev); - rtnl_unlock(); + cancel_delayed_work_sync(&adapter->func_recovery_work); - if (netif_running(netdev)) { rtnl_lock(); - be_close(netdev); + netif_device_detach(netdev); + if (netif_running(netdev)) + be_close(netdev); rtnl_unlock(); + + be_clear(adapter); } - be_clear(adapter); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; @@ -4482,7 +4477,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) int status; dev_info(&adapter->pdev->dev, "EEH reset\n"); - be_clear_all_error(adapter); status = pci_enable_device(pdev); if (status) @@ -4500,6 +4494,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; pci_cleanup_aer_uncorrect_error_status(pdev); + be_clear_all_error(adapter); return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index aff0310a778b..a667015be22a 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -87,6 +87,8 @@ #define FEC_QUIRK_HAS_GBIT (1 << 3) /* Controller has extend desc buffer */ #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) +/* Controller has hardware checksum support */ +#define FEC_QUIRK_HAS_CSUM (1 << 5) static struct platform_device_id fec_devtype[] = { { @@ -105,9 +107,9 @@ static struct platform_device_id fec_devtype[] = { }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX, + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM, }, { - .name = "mvf-fec", + .name = "mvf600-fec", .driver_data = FEC_QUIRK_ENET_MAC, }, { /* sentinel */ @@ -120,7 +122,7 @@ enum imx_fec_type { IMX27_FEC, /* runs on i.mx27/35/51 */ IMX28_FEC, IMX6Q_FEC, - MVF_FEC, + MVF600_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -128,7 +130,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, - { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], }, + { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -449,7 +451,7 @@ fec_restart(struct net_device *ndev, int duplex) netif_device_detach(ndev); napi_disable(&fep->napi); netif_stop_queue(ndev); - netif_tx_lock(ndev); + netif_tx_lock_bh(ndev); } /* Whack a reset. We should wait for this. */ @@ -614,10 +616,10 @@ fec_restart(struct net_device *ndev, int duplex) writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); if (netif_running(ndev)) { - netif_device_attach(ndev); - napi_enable(&fep->napi); + netif_tx_unlock_bh(ndev); netif_wake_queue(ndev); - netif_tx_unlock(ndev); + napi_enable(&fep->napi); + netif_device_attach(ndev); } } @@ -1036,6 +1038,18 @@ static void fec_get_mac(struct net_device *ndev) iap = &tmpaddr[0]; } + /* + * 5) random mac address + */ + if (!is_valid_ether_addr(iap)) { + /* Report it and use a random ethernet address instead */ + netdev_err(ndev, "Invalid MAC address: %pM\n", iap); + eth_hw_addr_random(ndev); + netdev_info(ndev, "Using random MAC address: %pM\n", + ndev->dev_addr); + return; + } + memcpy(ndev->dev_addr, iap, ETH_ALEN); /* Adjust MAC if using macaddr */ @@ -1744,6 +1758,8 @@ static const struct net_device_ops fec_netdev_ops = { static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); struct bufdesc *cbd_base; /* Allocate memory for buffer descriptors. */ @@ -1775,12 +1791,14 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); - /* enable hw accelerator */ - ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_RXCSUM); - ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_RXCSUM); - fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { + /* enable hw accelerator */ + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM); + ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM); + fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + } fec_restart(ndev, 0); diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 576e4b858fce..083ea2b4d20a 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -524,6 +524,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) return 0; no_clock: + iounmap(etsects->regs); no_ioremap: release_resource(etsects->rsrc); no_resource: diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 4989481c19f0..d300a0c0eafc 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -359,10 +359,26 @@ static int emac_reset(struct emac_instance *dev) } #ifdef CONFIG_PPC_DCR_NATIVE - /* Enable internal clock source */ - if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) - dcri_clrset(SDR0, SDR0_ETH_CFG, - 0, SDR0_ETH_CFG_ECS << dev->cell_index); + /* + * PPC460EX/GT Embedded Processor Advanced User's Manual + * section 28.10.1 Mode Register 0 (EMACx_MR0) states: + * Note: The PHY must provide a TX Clk in order to perform a soft reset + * of the EMAC. If none is present, select the internal clock + * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). + * After a soft reset, select the external clock. + */ + if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { + if (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff) { + /* No PHY: select internal loop clock before reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + 0, SDR0_ETH_CFG_ECS << dev->cell_index); + } else { + /* PHY present: select external clock before reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + SDR0_ETH_CFG_ECS << dev->cell_index, 0); + } + } #endif out_be32(&p->mr0, EMAC_MR0_SRST); @@ -370,10 +386,14 @@ static int emac_reset(struct emac_instance *dev) --n; #ifdef CONFIG_PPC_DCR_NATIVE - /* Enable external clock source */ - if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) - dcri_clrset(SDR0, SDR0_ETH_CFG, - SDR0_ETH_CFG_ECS << dev->cell_index, 0); + if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { + if (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff) { + /* No PHY: restore external clock source after reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + SDR0_ETH_CFG_ECS << dev->cell_index, 0); + } + } #endif if (n) { diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h index 6ce027355fcf..abb300a31912 100644 --- a/drivers/net/ethernet/icplus/ipg.h +++ b/drivers/net/ethernet/icplus/ipg.h @@ -195,57 +195,57 @@ enum ipg_regs { /* TFD data structure masks. */ /* TFDList, TFC */ -#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF -#define IPG_TFC_FRAMEID 0x000000000000FFFF -#define IPG_TFC_WORDALIGN 0x0000000000030000 -#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000 -#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 -#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000 -#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000 -#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000 -#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000 -#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000 -#define IPG_TFC_TXINDICATE 0x0000000000400000 -#define IPG_TFC_TXDMAINDICATE 0x0000000000800000 -#define IPG_TFC_FRAGCOUNT 0x000000000F000000 -#define IPG_TFC_VLANTAGINSERT 0x0000000010000000 -#define IPG_TFC_TFDDONE 0x0000000080000000 -#define IPG_TFC_VID 0x00000FFF00000000 -#define IPG_TFC_CFI 0x0000100000000000 -#define IPG_TFC_USERPRIORITY 0x0000E00000000000 +#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL +#define IPG_TFC_FRAMEID 0x000000000000FFFFULL +#define IPG_TFC_WORDALIGN 0x0000000000030000ULL +#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL +#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL +#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL +#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL +#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL +#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL +#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL +#define IPG_TFC_TXINDICATE 0x0000000000400000ULL +#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL +#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL +#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL +#define IPG_TFC_TFDDONE 0x0000000080000000ULL +#define IPG_TFC_VID 0x00000FFF00000000ULL +#define IPG_TFC_CFI 0x0000100000000000ULL +#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL /* TFDList, FragInfo */ -#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF -#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF -#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL +#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL +#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL +#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL /* RFD data structure masks. */ /* RFDList, RFS */ -#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF -#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF -#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 -#define IPG_RFS_RXRUNTFRAME 0x0000000000020000 -#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000 -#define IPG_RFS_RXFCSERROR 0x0000000000080000 -#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000 -#define IPG_RFS_RXLENGTHERROR 0x0000000000200000 -#define IPG_RFS_VLANDETECTED 0x0000000000400000 -#define IPG_RFS_TCPDETECTED 0x0000000000800000 -#define IPG_RFS_TCPERROR 0x0000000001000000 -#define IPG_RFS_UDPDETECTED 0x0000000002000000 -#define IPG_RFS_UDPERROR 0x0000000004000000 -#define IPG_RFS_IPDETECTED 0x0000000008000000 -#define IPG_RFS_IPERROR 0x0000000010000000 -#define IPG_RFS_FRAMESTART 0x0000000020000000 -#define IPG_RFS_FRAMEEND 0x0000000040000000 -#define IPG_RFS_RFDDONE 0x0000000080000000 -#define IPG_RFS_TCI 0x0000FFFF00000000 +#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL +#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL +#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL +#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL +#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL +#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL +#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL +#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL +#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL +#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL +#define IPG_RFS_TCPERROR 0x0000000001000000ULL +#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL +#define IPG_RFS_UDPERROR 0x0000000004000000ULL +#define IPG_RFS_IPDETECTED 0x0000000008000000ULL +#define IPG_RFS_IPERROR 0x0000000010000000ULL +#define IPG_RFS_FRAMESTART 0x0000000020000000ULL +#define IPG_RFS_FRAMEEND 0x0000000040000000ULL +#define IPG_RFS_RFDDONE 0x0000000080000000ULL +#define IPG_RFS_TCI 0x0000FFFF00000000ULL /* RFDList, FragInfo */ -#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF -#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF -#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL +#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL +#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL +#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL /* I/O Register masks. */ diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index d0afeea181fb..2ad1494efbb3 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -867,7 +867,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); int reclaimed; - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock_bh(nq); reclaimed = 0; while (reclaimed < budget && txq->tx_desc_count > 0) { @@ -913,7 +913,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) dev_kfree_skb(skb); } - __netif_tx_unlock(nq); + __netif_tx_unlock_bh(nq); if (reclaimed < budget) mp->work_tx &= ~(1 << txq->index); @@ -2745,7 +2745,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); - netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); + netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT); init_timer(&mp->rx_oom); mp->rx_oom.data = (unsigned long)mp; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 1df56cc50ee9..0e572a527154 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -222,8 +222,6 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, * FLR process. The only non-zero result in the RESET command * is MLX4_DELAY_RESET_SLAVE*/ if ((MLX4_COMM_CMD_RESET == cmd)) { - mlx4_warn(dev, "Got slave FLRed from Communication" - " channel (ret:0x%x)\n", ret_from_pending); err = MLX4_DELAY_RESET_SLAVE; } else { mlx4_warn(dev, "Communication channel timed out\n"); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index b35f94700093..89c47ea84b50 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1323,6 +1323,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) priv->last_moder_time[ring] = moder_time; cq = &priv->rx_cq[ring]; cq->moder_time = moder_time; + cq->moder_cnt = priv->rx_frames; err = mlx4_en_set_cq_moder(priv, cq); if (err) en_err(priv, "Failed modifying moderation for cq:%d\n", @@ -2118,6 +2119,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_priv *priv; int i; int err; + u64 mac_u64; dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), MAX_TX_RINGS, MAX_RX_RINGS); @@ -2191,10 +2193,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->addr_len = ETH_ALEN; mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); if (!is_valid_ether_addr(dev->dev_addr)) { - en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", - priv->port, dev->dev_addr); - err = -EINVAL; - goto out; + if (mlx4_is_slave(priv->mdev->dev)) { + eth_hw_addr_random(dev); + en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); + mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr); + mdev->dev->caps.def_mac[priv->port] = mac_u64; + } else { + en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", + priv->port, dev->dev_addr); + err = -EINVAL; + goto out; + } } memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac)); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 91f2b2c43c12..d3f508697a3d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -60,7 +60,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; if (user_prio >= 0) { context->pri_path.sched_queue |= user_prio << 3; - context->pri_path.feup = 1 << 6; + context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; } context->pri_path.counter_index = 0xff; context->cqn_send = cpu_to_be32(cqn); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index b147bdd40768..2c97901c6a6d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -131,7 +131,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [2] = "RSS XOR Hash Function support", [3] = "Device manage flow steering support", [4] = "Automatic MAC reassignment support", - [5] = "Time stamping support" + [5] = "Time stamping support", + [6] = "VST (control vlan insertion/stripping) support", + [7] = "FSM (MAC anti-spoofing) support" }; int i; @@ -838,12 +840,16 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_CMD_NATIVE); if (!err && dev->caps.function != slave) { - /* set slave default_mac address */ - MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET); - def_mac += slave << 8; /* if config MAC in DB use it */ if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac) def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; + else { + /* set slave default_mac address */ + MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET); + def_mac += slave << 8; + priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac; + } + MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); /* get port type - currently only eth is enabled */ diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 0d32a82458bf..2f4a26039e80 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1290,7 +1290,6 @@ static int mlx4_init_slave(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); u64 dma = (u64) priv->mfunc.vhcr_dma; - int num_of_reset_retries = NUM_OF_RESET_RETRIES; int ret_from_reset = 0; u32 slave_read; u32 cmd_channel_ver; @@ -1304,18 +1303,10 @@ static int mlx4_init_slave(struct mlx4_dev *dev) * NUM_OF_RESET_RETRIES times before leaving.*/ if (ret_from_reset) { if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { - msleep(SLEEP_TIME_IN_RESET); - while (ret_from_reset && num_of_reset_retries) { - mlx4_warn(dev, "slave is currently in the" - "middle of FLR. retrying..." - "(try num:%d)\n", - (NUM_OF_RESET_RETRIES - - num_of_reset_retries + 1)); - ret_from_reset = - mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, - 0, MLX4_COMM_TIME); - num_of_reset_retries = num_of_reset_retries - 1; - } + mlx4_warn(dev, "slave is currently in the " + "middle of FLR. Deferring probe.\n"); + mutex_unlock(&priv->cmd.slave_cmd_mutex); + return -EPROBE_DEFER; } else goto err; } @@ -1526,7 +1517,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev) } else { err = mlx4_init_slave(dev); if (err) { - mlx4_err(dev, "Failed to initialize slave\n"); + if (err != -EPROBE_DEFER) + mlx4_err(dev, "Failed to initialize slave\n"); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index e12e0d2e0ee0..1157f028a90f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -372,24 +372,29 @@ static int update_vport_qp_param(struct mlx4_dev *dev, if (MLX4_QP_ST_RC == qp_type) return -EINVAL; + /* force strip vlan by clear vsd */ + qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + if (0 != vp_oper->state.default_vlan) { + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; + } else { /* priority tagged */ + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + } + + qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; qpc->pri_path.vlan_index = vp_oper->vlan_idx; - qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/ - qpc->pri_path.feup |= 1 << 3; /* set fvl bit */ + qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; + qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; qpc->pri_path.sched_queue &= 0xC7; qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; - mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n", - be32_to_cpu(qpc->local_qpn) & 0xffffff, port, - (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan, - vp_oper->vlan_idx, (int)(qpc->pri_path.feup), - (int)(qpc->pri_path.fl)); } if (vp_oper->state.spoofchk) { - qpc->pri_path.feup |= 1 << 5; /* set fsm bit */; + qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; - mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n", - be32_to_cpu(qpc->local_qpn) & 0xffffff, port, - (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc, - vp_oper->mac_idx); } return 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 90c253b145ef..c1b693cb3df3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -429,6 +429,7 @@ struct qlcnic_hardware_context { u16 port_type; u16 board_type; + u16 supported_type; u16 link_speed; u16 link_duplex; @@ -906,8 +907,11 @@ struct qlcnic_ipaddr { #define QLCNIC_FW_HANG 0x4000 #define QLCNIC_FW_LRO_MSS_CAP 0x8000 #define QLCNIC_TX_INTR_SHARED 0x10000 +#define QLCNIC_APP_CHANGED_FLAGS 0x20000 #define QLCNIC_IS_MSI_FAMILY(adapter) \ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) +#define QLCNIC_IS_TSO_CAPABLE(adapter) \ + ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) #define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 #define QLCNIC_MSIX_TBL_SPACE 8192 @@ -1033,6 +1037,7 @@ struct qlcnic_adapter { spinlock_t rx_mac_learn_lock; u32 file_prd_off; /*File fw product offset*/ u32 fw_version; + u32 offload_flags; const struct firmware *fw; }; @@ -1514,6 +1519,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); +int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); @@ -1540,6 +1546,8 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16); int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); int qlcnic_read_mac_addr(struct qlcnic_adapter *); int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); +void qlcnic_set_netdev_features(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); void qlcnic_sriov_vf_schedule_multi(struct net_device *); void qlcnic_vf_add_mc_list(struct net_device *, u16); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index ea790a93ee7c..b4ff1e35a11d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -696,15 +696,14 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) return 1; } -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time) { u32 data; - unsigned long wait_time = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; /* wait for mailbox completion */ do { data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); - if (++wait_time > QLCNIC_MBX_TIMEOUT) { + if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) { data = QLCNIC_RCODE_TIMEOUT; break; } @@ -720,8 +719,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, u16 opcode; u8 mbx_err_code; unsigned long flags; - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd; struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0; opcode = LSW(cmd->req.arg[0]); if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { @@ -754,15 +753,13 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, /* Signal FW about the impending command */ QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); poll: - rsp = qlcnic_83xx_mbx_poll(adapter); + rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); if (rsp != QLCNIC_RCODE_TIMEOUT) { /* Get the FW response data */ fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); - if (mbx_val) - goto poll; + goto poll; } mbx_err_code = QLCNIC_MBX_STATUS(fw_data); rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); @@ -1276,11 +1273,13 @@ out: return err; } -static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) +static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, + int num_sds_ring) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_rds_ring *rds_ring; + u16 adapter_state = adapter->is_up; u8 ring; int ret; @@ -1304,6 +1303,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) ret = qlcnic_fw_create_ctx(adapter); if (ret) { qlcnic_detach(adapter); + if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) { + adapter->max_sds_rings = num_sds_ring; + qlcnic_attach(adapter); + } netif_device_attach(netdev); return ret; } @@ -1596,7 +1599,8 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; - ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, + max_sds_rings); if (ret) goto fail_diag_alloc; @@ -2830,6 +2834,23 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) break; } config = cmd.rsp.arg[3]; + if (QLC_83XX_SFP_PRESENT(config)) { + switch (ahw->module_type) { + case LINKEVENT_MODULE_OPTICAL_UNKNOWN: + case LINKEVENT_MODULE_OPTICAL_SRLR: + case LINKEVENT_MODULE_OPTICAL_LRM: + case LINKEVENT_MODULE_OPTICAL_SFP_1G: + ahw->supported_type = PORT_FIBRE; + break; + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: + case LINKEVENT_MODULE_TWINAX: + ahw->supported_type = PORT_TP; + break; + default: + ahw->supported_type = PORT_OTHER; + } + } if (config & 1) err = 1; } @@ -2838,7 +2859,8 @@ out: return config; } -int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) +int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) { u32 config = 0; int status = 0; @@ -2851,6 +2873,54 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); /* hard code until there is a way to get it from flash */ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; + + if (netif_running(adapter->netdev) && ahw->has_link_events) { + ethtool_cmd_speed_set(ecmd, ahw->link_speed); + ecmd->duplex = ahw->link_duplex; + ecmd->autoneg = ahw->link_autoneg; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + ecmd->autoneg = AUTONEG_DISABLE; + } + + if (ahw->port_type == QLCNIC_XGBE) { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } else { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + ecmd->advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + } + + switch (ahw->supported_type) { + case PORT_FIBRE: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + break; + case PORT_TP: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->transceiver = XCVR_INTERNAL; + break; + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + ecmd->transceiver = XCVR_EXTERNAL; + break; + } + ecmd->phy_address = ahw->physical_port; return status; } @@ -3046,7 +3116,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EIO; - ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, + max_sds_rings); if (ret) goto fail_diag_irq; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 1f1d85e6f2af..f5db67fc9f55 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -603,7 +603,7 @@ int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); -int qlcnic_83xx_get_settings(struct qlcnic_adapter *); +int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, struct ethtool_pauseparam *); @@ -620,7 +620,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *); int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *); +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index ab1d8d99cbd5..5e7fb1dfb97b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -382,8 +382,6 @@ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter) clear_bit(__QLCNIC_RESETTING, &adapter->state); dev_err(&adapter->pdev->dev, "%s:\n", __func__); - adapter->netdev->trans_start = jiffies; - return 0; } @@ -435,10 +433,6 @@ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter) } done: netif_device_attach(netdev); - if (netif_running(netdev)) { - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } } static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, @@ -642,15 +636,21 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) { + struct qlcnic_hardware_context *ahw = adapter->ahw; + qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); - clear_bit(__QLCNIC_RESETTING, &adapter->state); set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); - adapter->ahw->idc.quiesce_req = 0; - adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; - adapter->ahw->idc.err_code = 0; - adapter->ahw->idc.collect_dump = 0; + + ahw->idc.quiesce_req = 0; + ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + ahw->idc.err_code = 0; + ahw->idc.collect_dump = 0; + ahw->reset_context = 0; + adapter->tx_timeo_cnt = 0; + + clear_bit(__QLCNIC_RESETTING, &adapter->state); } /** @@ -851,6 +851,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) /* Check for soft reset request */ if (ahw->reset_context && !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { + adapter->ahw->reset_context = 0; qlcnic_83xx_idc_tx_soft_reset(adapter); return ret; } @@ -914,6 +915,7 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) { dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); + clear_bit(__QLCNIC_RESETTING, &adapter->state); adapter->ahw->idc.err_code = -EIO; return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 08efb4635007..f67652de5a63 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -131,12 +131,13 @@ static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { "ctx_lro_pkt_cnt", "ctx_ip_csum_error", "ctx_rx_pkts_wo_ctx", - "ctx_rx_pkts_dropped_wo_sts", + "ctx_rx_pkts_drop_wo_sds_on_card", + "ctx_rx_pkts_drop_wo_sds_on_host", "ctx_rx_osized_pkts", "ctx_rx_pkts_dropped_wo_rds", "ctx_rx_unexpected_mcast_pkts", "ctx_invalid_mac_address", - "ctx_rx_rds_ring_prim_attemoted", + "ctx_rx_rds_ring_prim_attempted", "ctx_rx_rds_ring_prim_success", "ctx_num_lro_flows_added", "ctx_num_lro_flows_removed", @@ -251,6 +252,18 @@ static int qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct qlcnic_adapter *adapter = netdev_priv(dev); + + if (qlcnic_82xx_check(adapter)) + return qlcnic_82xx_get_settings(adapter, ecmd); + else if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_get_settings(adapter, ecmd); + + return -EIO; +} + +int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) +{ struct qlcnic_hardware_context *ahw = adapter->ahw; u32 speed, reg; int check_sfp_module = 0; @@ -276,10 +289,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) } else if (adapter->ahw->port_type == QLCNIC_XGBE) { u32 val = 0; - if (qlcnic_83xx_check(adapter)) - qlcnic_83xx_get_settings(adapter); - else - val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); + val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); if (val == QLCNIC_PORT_MODE_802_3_AP) { ecmd->supported = SUPPORTED_1000baseT_Full; @@ -289,16 +299,13 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ecmd->advertising = ADVERTISED_10000baseT_Full; } - if (netif_running(dev) && adapter->ahw->has_link_events) { - if (qlcnic_82xx_check(adapter)) { - reg = QLCRD32(adapter, - P3P_LINK_SPEED_REG(pcifn)); - speed = P3P_LINK_SPEED_VAL(pcifn, reg); - ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; - } - ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed); - ecmd->autoneg = adapter->ahw->link_autoneg; - ecmd->duplex = adapter->ahw->link_duplex; + if (netif_running(adapter->netdev) && ahw->has_link_events) { + reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); + speed = P3P_LINK_SPEED_VAL(pcifn, reg); + ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; + ethtool_cmd_speed_set(ecmd, ahw->link_speed); + ecmd->autoneg = ahw->link_autoneg; + ecmd->duplex = ahw->link_duplex; goto skip; } @@ -340,8 +347,8 @@ skip: case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: ecmd->advertising |= ADVERTISED_TP; ecmd->supported |= SUPPORTED_TP; - check_sfp_module = netif_running(dev) && - adapter->ahw->has_link_events; + check_sfp_module = netif_running(adapter->netdev) && + ahw->has_link_events; case QLCNIC_BRDTYPE_P3P_10G_XFP: ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; @@ -355,8 +362,8 @@ skip: ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(dev) && - adapter->ahw->has_link_events; + check_sfp_module = netif_running(adapter->netdev) && + ahw->has_link_events; } else { ecmd->autoneg = AUTONEG_ENABLE; ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); @@ -365,13 +372,6 @@ skip: ecmd->port = PORT_TP; } break; - case QLCNIC_BRDTYPE_83XX_10G: - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(dev) && ahw->has_link_events; - break; default: dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", adapter->ahw->board_type); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 6a6512ba9f38..106a12f2a02f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -973,16 +973,57 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu) return rc; } +static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter, + netdev_features_t features) +{ + u32 offload_flags = adapter->offload_flags; + + if (offload_flags & BIT_0) { + features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + adapter->rx_csum = 1; + if (QLCNIC_IS_TSO_CAPABLE(adapter)) { + if (!(offload_flags & BIT_1)) + features &= ~NETIF_F_TSO; + else + features |= NETIF_F_TSO; + + if (!(offload_flags & BIT_2)) + features &= ~NETIF_F_TSO6; + else + features |= NETIF_F_TSO6; + } + } else { + features &= ~(NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + + if (QLCNIC_IS_TSO_CAPABLE(adapter)) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + adapter->rx_csum = 0; + } + + return features; +} netdev_features_t qlcnic_fix_features(struct net_device *netdev, netdev_features_t features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed; - if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) && - qlcnic_82xx_check(adapter)) { - netdev_features_t changed = features ^ netdev->features; - features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + if (qlcnic_82xx_check(adapter) && + (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) { + features = qlcnic_process_flags(adapter, features); + } else { + changed = features ^ netdev->features; + features ^= changed & (NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6); + } } if (!(features & NETIF_F_RXCSUM)) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 95b1b5732838..b6818f4356b9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -134,7 +134,7 @@ struct qlcnic_mailbox_metadata { #define QLCNIC_SET_OWNER 1 #define QLCNIC_CLR_OWNER 0 -#define QLCNIC_MBX_TIMEOUT 10000 +#define QLCNIC_MBX_TIMEOUT 5000 #define QLCNIC_MBX_RSP_OK 1 #define QLCNIC_MBX_PORT_RSP_OK 0x1a diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 264d5a4f8153..aeb26a850679 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -37,24 +37,24 @@ MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); int qlcnic_use_msi = 1; -MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); +MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)"); module_param_named(use_msi, qlcnic_use_msi, int, 0444); int qlcnic_use_msi_x = 1; -MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); +MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)"); module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); int qlcnic_auto_fw_reset = 1; -MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); +MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)"); module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); int qlcnic_load_fw_file; -MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); +MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); int qlcnic_config_npars; module_param(qlcnic_config_npars, int, 0444); -MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); +MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)"); static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void qlcnic_remove(struct pci_dev *pdev); @@ -84,14 +84,9 @@ static int qlcnic_start_firmware(struct qlcnic_adapter *); static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); static int qlcnicvf_start_firmware(struct qlcnic_adapter *); -static void qlcnic_set_netdev_features(struct qlcnic_adapter *, - struct qlcnic_esw_func_cfg *); static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16); static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16); -#define QLCNIC_IS_TSO_CAPABLE(adapter) \ - ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) - static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -308,6 +303,23 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) return 0; } +static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mac_list_s *cur; + struct list_head *head; + + list_for_each(head, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_list_s, list); + if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) { + qlcnic_sre_macaddr_change(adapter, cur->mac_addr, + 0, QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + return; + } + } +} + static int qlcnic_set_mac(struct net_device *netdev, void *p) { struct qlcnic_adapter *adapter = netdev_priv(netdev); @@ -322,11 +334,15 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; + if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN)) + return 0; + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_device_detach(netdev); qlcnic_napi_disable(adapter); } + qlcnic_delete_adapter_mac(adapter); memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); qlcnic_set_multi(adapter->netdev); @@ -1053,8 +1069,6 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, if (!esw_cfg->promisc_mode) adapter->flags |= QLCNIC_PROMISC_DISABLED; - - qlcnic_set_netdev_features(adapter, esw_cfg); } int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) @@ -1069,51 +1083,23 @@ int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) return -EIO; qlcnic_set_vlan_config(adapter, &esw_cfg); qlcnic_set_eswitch_port_features(adapter, &esw_cfg); + qlcnic_set_netdev_features(adapter, &esw_cfg); return 0; } -static void -qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) +void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) { struct net_device *netdev = adapter->netdev; - unsigned long features, vlan_features; if (qlcnic_83xx_check(adapter)) return; - features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_IPV6_CSUM | NETIF_F_GRO); - vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM); - - if (QLCNIC_IS_TSO_CAPABLE(adapter)) { - features |= (NETIF_F_TSO | NETIF_F_TSO6); - vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); - } - - if (netdev->features & NETIF_F_LRO) - features |= NETIF_F_LRO; - - if (esw_cfg->offload_flags & BIT_0) { - netdev->features |= features; - adapter->rx_csum = 1; - if (!(esw_cfg->offload_flags & BIT_1)) { - netdev->features &= ~NETIF_F_TSO; - features &= ~NETIF_F_TSO; - } - if (!(esw_cfg->offload_flags & BIT_2)) { - netdev->features &= ~NETIF_F_TSO6; - features &= ~NETIF_F_TSO6; - } - } else { - netdev->features &= ~features; - features &= ~features; - adapter->rx_csum = 0; - } - - netdev->vlan_features = (features & vlan_features); + adapter->offload_flags = esw_cfg->offload_flags; + adapter->flags |= QLCNIC_APP_CHANGED_FLAGS; + netdev_update_features(netdev); + adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS; } static int @@ -1995,8 +1981,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_enable_pcie_error_reporting(pdev); ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL); - if (!ahw) + if (!ahw) { + err = -ENOMEM; goto err_out_free_res; + } switch (ent->device) { case PCI_DEVICE_ID_QLOGIC_QLE824X: @@ -2032,6 +2020,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic"); if (adapter->qlcnic_wq == NULL) { + err = -ENOMEM; dev_err(&pdev->dev, "Failed to create workqueue\n"); goto err_out_free_netdev; } @@ -2112,6 +2101,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable_msi; } + err = qlcnic_get_act_pci_func(adapter); + if (err) + goto err_out_disable_mbx_intr; + err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); if (err) goto err_out_disable_mbx_intr; @@ -2141,9 +2134,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } - if (qlcnic_get_act_pci_func(adapter)) - goto err_out_disable_mbx_intr; - if (adapter->drv_mac_learn) qlcnic_alloc_lb_filters_mem(adapter); @@ -2481,12 +2471,17 @@ static void qlcnic_tx_timeout(struct net_device *netdev) if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return; - dev_err(&netdev->dev, "transmit timeout, resetting.\n"); - - if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) - adapter->need_fw_reset = 1; - else + if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) { + netdev_info(netdev, "Tx timeout, reset the adapter.\n"); + if (qlcnic_82xx_check(adapter)) + adapter->need_fw_reset = 1; + else if (qlcnic_83xx_check(adapter)) + qlcnic_83xx_idc_request_reset(adapter, + QLCNIC_FORCE_FW_DUMP_KEY); + } else { + netdev_info(netdev, "Tx timeout, reset adapter context.\n"); adapter->ahw->reset_context = 1; + } } static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) @@ -3123,10 +3118,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) if (adapter->need_fw_reset) goto detach; - if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) { + if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) qlcnic_reset_hw_context(adapter); - adapter->netdev->trans_start = jiffies; - } return 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 44d547d78b84..196b2d100407 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -280,9 +280,9 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u8 pci_func, u8 size) { + u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; unsigned long flags; - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val; u16 opcode; u8 mbx_err_code; int i, j; @@ -330,15 +330,13 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, * assume something is wrong. */ poll: - rsp = qlcnic_83xx_mbx_poll(adapter); + rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); if (rsp != QLCNIC_RCODE_TIMEOUT) { /* Get the FW response data */ fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); - if (mbx_val) - goto poll; + goto poll; } mbx_err_code = QLCNIC_MBX_STATUS(fw_data); rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); @@ -1736,7 +1734,6 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) if (!qlcnic_sriov_vf_reinit_driver(adapter)) { qlcnic_sriov_vf_attach(adapter); - adapter->netdev->trans_start = jiffies; adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; adapter->fw_fail_cnt = 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index c81be2da119b..1a66ccded235 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1133,9 +1133,6 @@ static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf, if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) return -EINVAL; - if (!(cmd->req.arg[1] & BIT_8)) - return -EINVAL; - return 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 4e22e794a186..e7a2fe21b649 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -544,6 +544,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]); + rtnl_lock(); + qlcnic_set_netdev_features(adapter, &esw_cfg[i]); + rtnl_unlock(); break; case QLCNIC_ADD_VLAN: qlcnic_set_vlan_config(adapter, &esw_cfg[i]); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 87463bc701a6..f87cc216045b 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1106,6 +1106,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, if (pci_dma_mapping_error(qdev->pdev, map)) { __free_pages(rx_ring->pg_chunk.page, qdev->lbq_buf_order); + rx_ring->pg_chunk.page = NULL; netif_err(qdev, drv, qdev->ndev, "PCI mapping failed.\n"); return -ENOMEM; @@ -2777,6 +2778,12 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring curr_idx = 0; } + if (rx_ring->pg_chunk.page) { + pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, + ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); + put_page(rx_ring->pg_chunk.page); + rx_ring->pg_chunk.page = NULL; + } } static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) @@ -4710,6 +4717,7 @@ static int qlge_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "net device registration failed.\n"); ql_release_all(pdev); pci_disable_device(pdev); + free_netdev(ndev); return err; } /* Start up the timer to trigger EEH if diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 7d1fb9ad1296..03523459c406 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1136,6 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp) cp->dev->stats.tx_dropped++; } } + netdev_reset_queue(cp->dev); memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 79c520b64fdd..393f961a013c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5856,7 +5856,20 @@ err_out: return -EIO; } -static inline void rtl8169_tso_csum(struct rtl8169_private *tp, +static bool rtl_skb_pad(struct sk_buff *skb) +{ + if (skb_padto(skb, ETH_ZLEN)) + return false; + skb_put(skb, ETH_ZLEN - skb->len); + return true; +} + +static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) +{ + return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; +} + +static inline bool rtl8169_tso_csum(struct rtl8169_private *tp, struct sk_buff *skb, u32 *opts) { const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version; @@ -5869,13 +5882,20 @@ static inline void rtl8169_tso_csum(struct rtl8169_private *tp, } else if (skb->ip_summed == CHECKSUM_PARTIAL) { const struct iphdr *ip = ip_hdr(skb); + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb); + if (ip->protocol == IPPROTO_TCP) opts[offset] |= info->checksum.tcp; else if (ip->protocol == IPPROTO_UDP) opts[offset] |= info->checksum.udp; else WARN_ON_ONCE(1); + } else { + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return rtl_skb_pad(skb); } + return true; } static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, @@ -5896,17 +5916,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, goto err_stop_0; } - /* 8168evl does not automatically pad to minimum length. */ - if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 && - skb->len < ETH_ZLEN)) { - if (skb_padto(skb, ETH_ZLEN)) - goto err_update_stats; - skb_put(skb, ETH_ZLEN - skb->len); - } - if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) goto err_stop_0; + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); + opts[0] = DescOwn; + + if (!rtl8169_tso_csum(tp, skb, opts)) + goto err_update_stats; + len = skb_headlen(skb); mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(d, mapping))) { @@ -5918,11 +5936,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->tx_skb[entry].len = len; txd->addr = cpu_to_le64(mapping); - opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); - opts[0] = DescOwn; - - rtl8169_tso_csum(tp, skb, opts); - frags = rtl8169_xmit_frags(tp, skb, opts); if (frags < 0) goto err_dma_1; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 33dc6f2418f2..b4479b5aaee4 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -897,8 +897,8 @@ static int sh_eth_check_reset(struct net_device *ndev) mdelay(1); cnt--; } - if (cnt < 0) { - pr_err("Device reset fail\n"); + if (cnt <= 0) { + pr_err("Device reset failed\n"); ret = -ETIMEDOUT; } return ret; @@ -2745,11 +2745,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (mdp->cd->tsu) { struct resource *rtsu; rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!rtsu) { - dev_err(&pdev->dev, "Not found TSU resource\n"); - ret = -ENODEV; - goto out_release; - } mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); if (IS_ERR(mdp->tsu_addr)) { ret = PTR_ERR(mdp->tsu_addr); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 01b99206139a..39e4cb39de29 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -638,14 +638,16 @@ static void efx_start_datapath(struct efx_nic *efx) EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + efx->type->rx_buffer_padding); rx_buf_len = (sizeof(struct efx_rx_page_state) + - EFX_PAGE_IP_ALIGN + efx->rx_dma_len); + NET_IP_ALIGN + efx->rx_dma_len); if (rx_buf_len <= PAGE_SIZE) { efx->rx_scatter = false; efx->rx_buffer_order = 0; } else if (efx->type->can_rx_scatter) { + BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + - EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE > - PAGE_SIZE / 2); + 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, + EFX_RX_BUF_ALIGNMENT) > + PAGE_SIZE); efx->rx_scatter = true; efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; efx->rx_buffer_order = 0; diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9bd433a095c5..39d6bd77f015 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -72,8 +72,20 @@ /* Maximum possible MTU the driver supports */ #define EFX_MAX_MTU (9 * 1024) -/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */ -#define EFX_RX_USR_BUF_SIZE 1824 +/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page, + * and should be a multiple of the cache line size. + */ +#define EFX_RX_USR_BUF_SIZE (2048 - 256) + +/* If possible, we should ensure cache line alignment at start and end + * of every buffer. Otherwise, we just need to ensure 4-byte + * alignment of the network header. + */ +#if NET_IP_ALIGN == 0 +#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES +#else +#define EFX_RX_BUF_ALIGNMENT 4 +#endif /* Forward declare Precision Time Protocol (PTP) support structure. */ struct efx_ptp_data; @@ -468,24 +480,11 @@ enum nic_state { }; /* - * Alignment of page-allocated RX buffers - * - * Controls the number of bytes inserted at the start of an RX buffer. - * This is the equivalent of NET_IP_ALIGN [which controls the alignment - * of the skb->head for hardware DMA]. - */ -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#define EFX_PAGE_IP_ALIGN 0 -#else -#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN -#endif - -/* * Alignment of the skb->head which wraps a page-allocated RX buffer * * The skb allocated to wrap an rx_buffer can have this alignment. Since * the data is memcpy'd from the rx_buf, it does not need to be equal to - * EFX_PAGE_IP_ALIGN. + * NET_IP_ALIGN. */ #define EFX_PAGE_SKB_ALIGN 2 diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index e73e30bac10e..a7dfe36cabf4 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -93,8 +93,8 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, void efx_rx_config_page_split(struct efx_nic *efx) { - efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN, - L1_CACHE_BYTES); + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, + EFX_RX_BUF_ALIGNMENT); efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / efx->rx_page_buf_step); @@ -188,9 +188,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) do { index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; + rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; rx_buf->page = page; - rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; + rx_buf->page_offset = page_offset + NET_IP_ALIGN; rx_buf->len = efx->rx_dma_len; rx_buf->flags = 0; ++rx_queue->added_count; diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index f695a50bac47..43c1f3223322 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,6 +1,6 @@ config STMMAC_ETH tristate "STMicroelectronics 10/100/1000 Ethernet driver" - depends on HAS_IOMEM + depends on HAS_IOMEM && HAS_DMA select NET_CORE select MII select PHYLIB diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 12aec173564c..b2275d1b19b3 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -449,10 +449,9 @@ static int davinci_mdio_suspend(struct device *dev) __raw_writel(ctrl, &data->regs->control); wait_for_idle(data); - pm_runtime_put_sync(data->dev); - data->suspended = true; spin_unlock(&data->lock); + pm_runtime_put_sync(data->dev); return 0; } @@ -462,9 +461,9 @@ static int davinci_mdio_resume(struct device *dev) struct davinci_mdio_data *data = dev_get_drvdata(dev); u32 ctrl; - spin_lock(&data->lock); pm_runtime_get_sync(data->dev); + spin_lock(&data->lock); /* restart the scan state machine */ ctrl = __raw_readl(&data->regs->control); ctrl |= CONTROL_ENABLE; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 919b983114e9..b7268b3dae77 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -946,7 +946,8 @@ static int xemaclite_open(struct net_device *dev) phy_write(lp->phy_dev, MII_CTRL1000, 0); /* Advertise only 10 and 100mbps full/half duplex speeds */ - phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL); + phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL | + ADVERTISE_CSMA); /* Restart auto negotiation */ bmcr = phy_read(lp->phy_dev, MII_BMCR); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 088c55496191..ab2307b5d9a7 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -31,6 +31,7 @@ #include <linux/inetdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> +#include <linux/if_vlan.h> #include <linux/in.h> #include <linux/slab.h> #include <net/arp.h> @@ -284,7 +285,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, skb->protocol = eth_type_trans(skb, net); skb->ip_summed = CHECKSUM_NONE; - skb->vlan_tci = packet->vlan_tci; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci); net->stats.rx_packets++; net->stats.rx_bytes += packet->total_data_buflen; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d5a141c7c4e7..1c502bb0c916 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -229,7 +229,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) } if (port->passthru) - vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); + vlan = list_first_or_null_rcu(&port->vlans, + struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); if (vlan == NULL) @@ -814,7 +815,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (err < 0) goto upper_dev_unlink; - list_add_tail(&vlan->list, &port->vlans); + list_add_tail_rcu(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; @@ -842,7 +843,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); - list_del(&vlan->list); + list_del_rcu(&vlan->list); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(vlan->lowerdev, dev); } diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index ed947dd76fbd..f3cdf64997d6 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev) if (dev == NULL) return; + list_del(&dev->list); + ndev = dev->ndev; unregister_netdev(ndev); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c14f14741b3f..38f0b312ff85 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1044,7 +1044,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); idx = phy_find_setting(phydev->speed, phydev->duplex); - if ((lp & adv & settings[idx].setting)) + if (!(lp & adv & settings[idx].setting)) goto eee_exit; if (clk_stop_enable) { diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 7c43261975bd..b3051052f3ad 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1092,8 +1092,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev) } port->index = -1; - team_port_enable(team, port); list_add_tail_rcu(&port->list, &team->port_list); + team_port_enable(team, port); __team_compute_features(team); __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); __team_options_change_check(team); @@ -2374,7 +2374,8 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq, bool incomplete; int i; - port = list_first_entry(&team->port_list, struct team_port, list); + port = list_first_entry_or_null(&team->port_list, + struct team_port, list); start_again: err = __send_and_alloc_skb(&skb, team, portid, send_func); @@ -2402,8 +2403,8 @@ start_again: err = team_nl_fill_one_port_get(skb, one_port); if (err) goto errout; - } else { - list_for_each_entry(port, &team->port_list, list) { + } else if (port) { + list_for_each_entry_from(port, &team->port_list, list) { err = team_nl_fill_one_port_get(skb, port); if (err) { if (err == -EMSGSIZE) { diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c index 5ca14d463ba7..7f032e211343 100644 --- a/drivers/net/team/team_mode_random.c +++ b/drivers/net/team/team_mode_random.c @@ -28,6 +28,8 @@ static bool rnd_transmit(struct team *team, struct sk_buff *skb) port_index = random_N(team->en_port_count); port = team_get_port_by_index_rcu(team, port_index); + if (unlikely(!port)) + goto drop; port = team_get_first_port_txable_rcu(team, port); if (unlikely(!port)) goto drop; diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index d268e4de781b..472623f8ce3d 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -32,6 +32,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb) port_index = rr_priv(team)->sent_packets++ % team->en_port_count; port = team_get_port_by_index_rcu(team, port_index); + if (unlikely(!port)) + goto drop; port = team_get_first_port_txable_rcu(team, port); if (unlikely(!port)) goto drop; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index f042b0373e5d..bfa9bb48e42d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -352,7 +352,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) u32 numqueues = 0; rcu_read_lock(); - numqueues = tun->numqueues; + numqueues = ACCESS_ONCE(tun->numqueues); txq = skb_get_rxhash(skb); if (txq) { @@ -1585,6 +1585,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) else return -EINVAL; + if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != + !!(tun->flags & TUN_TAP_MQ)) + return -EINVAL; + if (tun_not_capable(tun)) return -EPERM; err = security_tun_dev_open(tun->security); @@ -2155,6 +2159,8 @@ static int tun_chr_open(struct inode *inode, struct file * file) set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags); INIT_LIST_HEAD(&tfile->next); + sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); + return 0; } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 078795fe6e31..04ee044dde51 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -627,6 +627,12 @@ static const struct usb_device_id products [] = { .driver_info = 0, }, +/* Huawei E1820 - handled by qmi_wwan */ +{ + USB_DEVICE_INTERFACE_NUMBER(HUAWEI_VENDOR_ID, 0x14ac, 1), + .driver_info = 0, +}, + /* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */ #if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE) { diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index cf887c2384e9..d095d0d3056b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -519,6 +519,7 @@ static const struct usb_device_id products[] = { /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ + {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, @@ -582,6 +583,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ + {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index a491d3a95393..6cbdac67f3a0 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -130,19 +130,23 @@ struct rtl8150 { struct usb_device *udev; struct tasklet_struct tl; struct net_device *netdev; - struct urb *rx_urb, *tx_urb, *intr_urb, *ctrl_urb; + struct urb *rx_urb, *tx_urb, *intr_urb; struct sk_buff *tx_skb, *rx_skb; struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE]; spinlock_t rx_pool_lock; struct usb_ctrlrequest dr; int intr_interval; - __le16 rx_creg; u8 *intr_buff; u8 phy; }; typedef struct rtl8150 rtl8150_t; +struct async_req { + struct usb_ctrlrequest dr; + u16 rx_creg; +}; + static const char driver_name [] = "rtl8150"; /* @@ -164,51 +168,47 @@ static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) indx, 0, data, size, 500); } -static void ctrl_callback(struct urb *urb) +static void async_set_reg_cb(struct urb *urb) { - rtl8150_t *dev; + struct async_req *req = (struct async_req *)urb->context; int status = urb->status; - switch (status) { - case 0: - break; - case -EINPROGRESS: - break; - case -ENOENT: - break; - default: - if (printk_ratelimit()) - dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status); - } - dev = urb->context; - clear_bit(RX_REG_SET, &dev->flags); + if (status < 0) + dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status); + kfree(req); + usb_free_urb(urb); } -static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size) +static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg) { - int ret; - - if (test_bit(RX_REG_SET, &dev->flags)) - return -EAGAIN; + int res = -ENOMEM; + struct urb *async_urb; + struct async_req *req; - dev->dr.bRequestType = RTL8150_REQT_WRITE; - dev->dr.bRequest = RTL8150_REQ_SET_REGS; - dev->dr.wValue = cpu_to_le16(indx); - dev->dr.wIndex = 0; - dev->dr.wLength = cpu_to_le16(size); - dev->ctrl_urb->transfer_buffer_length = size; - usb_fill_control_urb(dev->ctrl_urb, dev->udev, - usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr, - &dev->rx_creg, size, ctrl_callback, dev); - if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) { - if (ret == -ENODEV) + req = kmalloc(sizeof(struct async_req), GFP_ATOMIC); + if (req == NULL) + return res; + async_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (async_urb == NULL) { + kfree(req); + return res; + } + req->rx_creg = cpu_to_le16(reg); + req->dr.bRequestType = RTL8150_REQT_WRITE; + req->dr.bRequest = RTL8150_REQ_SET_REGS; + req->dr.wIndex = 0; + req->dr.wValue = cpu_to_le16(indx); + req->dr.wLength = cpu_to_le16(size); + usb_fill_control_urb(async_urb, dev->udev, + usb_sndctrlpipe(dev->udev, 0), (void *)&req->dr, + &req->rx_creg, size, async_set_reg_cb, req); + res = usb_submit_urb(async_urb, GFP_ATOMIC); + if (res) { + if (res == -ENODEV) netif_device_detach(dev->netdev); - dev_err(&dev->udev->dev, - "control request submission failed: %d\n", ret); - } else - set_bit(RX_REG_SET, &dev->flags); - - return ret; + dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res); + } + return res; } static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg) @@ -330,13 +330,6 @@ static int alloc_all_urbs(rtl8150_t * dev) usb_free_urb(dev->tx_urb); return 0; } - dev->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!dev->ctrl_urb) { - usb_free_urb(dev->rx_urb); - usb_free_urb(dev->tx_urb); - usb_free_urb(dev->intr_urb); - return 0; - } return 1; } @@ -346,7 +339,6 @@ static void free_all_urbs(rtl8150_t * dev) usb_free_urb(dev->rx_urb); usb_free_urb(dev->tx_urb); usb_free_urb(dev->intr_urb); - usb_free_urb(dev->ctrl_urb); } static void unlink_all_urbs(rtl8150_t * dev) @@ -354,7 +346,6 @@ static void unlink_all_urbs(rtl8150_t * dev) usb_kill_urb(dev->rx_urb); usb_kill_urb(dev->tx_urb); usb_kill_urb(dev->intr_urb); - usb_kill_urb(dev->ctrl_urb); } static inline struct sk_buff *pull_skb(rtl8150_t *dev) @@ -629,7 +620,6 @@ static int enable_net_traffic(rtl8150_t * dev) } /* RCR bit7=1 attach Rx info at the end; =0 HW CRC (which is broken) */ rcr = 0x9e; - dev->rx_creg = cpu_to_le16(rcr); tcr = 0xd8; cr = 0x0c; if (!(rcr & 0x80)) @@ -662,20 +652,22 @@ static void rtl8150_tx_timeout(struct net_device *netdev) static void rtl8150_set_multicast(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); + u16 rx_creg = 0x9e; + netif_stop_queue(netdev); if (netdev->flags & IFF_PROMISC) { - dev->rx_creg |= cpu_to_le16(0x0001); + rx_creg |= 0x0001; dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name); } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { - dev->rx_creg &= cpu_to_le16(0xfffe); - dev->rx_creg |= cpu_to_le16(0x0002); + rx_creg &= 0xfffe; + rx_creg |= 0x0002; dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); } else { /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ - dev->rx_creg &= cpu_to_le16(0x00fc); + rx_creg &= 0x00fc; } - async_set_registers(dev, RCR, 2); + async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg); netif_wake_queue(netdev); } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index f95cb032394b..06ee82f557d4 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1477,7 +1477,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) /* usbnet already took usb runtime pm, so have to enable the feature * for usb interface, otherwise usb_autopm_get_interface may return - * failure if USB_SUSPEND(RUNTIME_PM) is enabled. + * failure if RUNTIME_PM is enabled. */ if (!driver->supports_autosuspend) { driver->supports_autosuspend = 1; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3c23fdc27bf0..c9e00387d999 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -28,7 +28,7 @@ #include <linux/slab.h> #include <linux/cpu.h> -static int napi_weight = 128; +static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); static bool csum = true, gso = true; @@ -636,10 +636,11 @@ static int virtnet_open(struct net_device *dev) struct virtnet_info *vi = netdev_priv(dev); int i; - for (i = 0; i < vi->curr_queue_pairs; i++) { - /* Make sure we have some buffers: if oom use wq. */ - if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); + for (i = 0; i < vi->max_queue_pairs; i++) { + if (i < vi->curr_queue_pairs) + /* Make sure we have some buffers: if oom use wq. */ + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); virtnet_napi_enable(&vi->rq[i]); } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ba81f3c39a83..3b1d2ee7156b 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -301,7 +301,7 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, } /* Look up Ethernet address in forwarding table */ -static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, +static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, const u8 *mac) { @@ -316,6 +316,18 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, return NULL; } +static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, + const u8 *mac) +{ + struct vxlan_fdb *f; + + f = __vxlan_find_mac(vxlan, mac); + if (f) + f->used = jiffies; + + return f; +} + /* Add/update destinations for multicast */ static int vxlan_fdb_append(struct vxlan_fdb *f, __be32 ip, __be16 port, __u32 vni, __u32 ifindex) @@ -353,7 +365,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, struct vxlan_fdb *f; int notify = 0; - f = vxlan_find_mac(vxlan, mac); + f = __vxlan_find_mac(vxlan, mac); if (f) { if (flags & NLM_F_EXCL) { netdev_dbg(vxlan->dev, @@ -563,7 +575,6 @@ static void vxlan_snoop(struct net_device *dev, f = vxlan_find_mac(vxlan, src_mac); if (likely(f)) { - f->used = jiffies; if (likely(f->remote.remote_ip == src_ip)) return; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 9b20d9ee2719..7f702fe3ecc2 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -2369,6 +2369,9 @@ ath5k_tx_complete_poll_work(struct work_struct *work) int i; bool needreset = false; + if (!test_bit(ATH_STAT_STARTED, ah->status)) + return; + mutex_lock(&ah->lock); for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { @@ -2676,6 +2679,7 @@ done: mmiowb(); mutex_unlock(&ah->lock); + set_bit(ATH_STAT_STARTED, ah->status); ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); @@ -2737,6 +2741,7 @@ void ath5k_stop(struct ieee80211_hw *hw) ath5k_stop_tasklets(ah); + clear_bit(ATH_STAT_STARTED, ah->status); cancel_delayed_work_sync(&ah->tx_complete_work); if (!ath5k_modparam_no_hw_rfkill_switch) diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 17507dc8a1e7..3c2cbc9d6295 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -17,7 +17,7 @@ config ATH9K_BTCOEX_SUPPORT config ATH9K tristate "Atheros 802.11n wireless cards support" - depends on MAC80211 + depends on MAC80211 && HAS_DMA select ATH9K_HW select MAC80211_LEDS select LEDS_CLASS @@ -92,13 +92,17 @@ config ATH9K_MAC_DEBUG This option enables collection of statistics for Rx/Tx status data and some other MAC related statistics -config ATH9K_RATE_CONTROL +config ATH9K_LEGACY_RATE_CONTROL bool "Atheros ath9k rate control" depends on ATH9K - default y + default n ---help--- Say Y, if you want to use the ath9k specific rate control - module instead of minstrel_ht. + module instead of minstrel_ht. Be warned that there are various + issues with the ath9k RC and minstrel is a more robust algorithm. + Note that even if this option is selected, "ath9k_rate_control" + has to be passed to mac80211 using the module parameter, + ieee80211_default_rc_algo. config ATH9K_HTC tristate "Atheros HTC based wireless cards support" diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index 2ad8f9474ba1..75ee9e7704ce 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -8,7 +8,7 @@ ath9k-y += beacon.o \ antenna.o ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o -ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o +ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o ath9k-$(CONFIG_ATH9K_PCI) += pci.o ath9k-$(CONFIG_ATH9K_AHB) += ahb.o ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index db5ffada2217..7546b9a7dcbf 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -958,11 +958,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = { {0x0000a074, 0x00000000}, {0x0000a078, 0x00000000}, {0x0000a07c, 0x00000000}, - {0x0000a080, 0x1a1a1a1a}, - {0x0000a084, 0x1a1a1a1a}, - {0x0000a088, 0x1a1a1a1a}, - {0x0000a08c, 0x1a1a1a1a}, - {0x0000a090, 0x171a1a1a}, + {0x0000a080, 0x22222229}, + {0x0000a084, 0x1d1d1d1d}, + {0x0000a088, 0x1d1d1d1d}, + {0x0000a08c, 0x1d1d1d1d}, + {0x0000a090, 0x171d1d1d}, {0x0000a094, 0x11111717}, {0x0000a098, 0x00030311}, {0x0000a09c, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 639ba7d18ea4..6988e1d081f2 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -965,7 +965,7 @@ static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah, { int i; - if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah)) + if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah)) return; for (i = 0; i < AR9300_MAX_CHAINS; i++) { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 54ba42f4108a..874f6570bd1c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -68,13 +68,16 @@ #define AR9300_BASE_ADDR 0x3ff #define AR9300_BASE_ADDR_512 0x1ff -#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000) -#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18) +#define AR9300_OTP_BASE \ + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) +#define AR9300_OTP_STATUS \ + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18) #define AR9300_OTP_STATUS_TYPE 0x7 #define AR9300_OTP_STATUS_VALID 0x4 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 #define AR9300_OTP_STATUS_SM_BUSY 0x1 -#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c) +#define AR9300_OTP_READ_DATA \ + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c) enum targetPowerHTRates { HT_TARGET_RATE_0_8_16, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 2bf6548dd143..e1714d7c9eeb 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -334,7 +334,8 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1); - if (REG_READ_FIELD(ah, AR_PHY_MODE, + if (!AR_SREV_9340(ah) && + REG_READ_FIELD(ah, AR_PHY_MODE, AR_PHY_MODE_DYNAMIC) == 0x1) REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1); diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index 712f415b8c08..88ff1d7b53ab 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -1020,7 +1020,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h index 0c2ac0c6dc89..e85a8b076c22 100644 --- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h @@ -233,9 +233,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = { {0x00009d10, 0x01834061}, {0x00009d14, 0x00c00400}, {0x00009d18, 0x00000000}, - {0x00009e08, 0x0078230c}, - {0x00009e24, 0x990bb515}, - {0x00009e28, 0x126f0000}, + {0x00009e08, 0x0038230c}, + {0x00009e24, 0x9907b515}, + {0x00009e28, 0x126f0600}, {0x00009e30, 0x06336f77}, {0x00009e34, 0x6af6532f}, {0x00009e38, 0x0cc80c00}, @@ -337,7 +337,7 @@ static const u32 ar9565_1p0_baseband_core[][2] = { static const u32 ar9565_1p0_baseband_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d}, + {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8009}, {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81}, @@ -345,9 +345,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = { {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, - {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, - {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, - {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, + {0x00009e04, 0x00802020, 0x00802020, 0x00142020, 0x00142020}, + {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, + {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e}, {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, @@ -450,6 +450,8 @@ static const u32 ar9565_1p0_soc_postamble[][5] = { static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { /* Addr allmodes */ + {0x00004050, 0x00300300}, + {0x0000406c, 0x00100000}, {0x0000a000, 0x00010000}, {0x0000a004, 0x00030002}, {0x0000a008, 0x00050004}, @@ -498,27 +500,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { {0x0000a0b4, 0x00000000}, {0x0000a0b8, 0x00000000}, {0x0000a0bc, 0x00000000}, - {0x0000a0c0, 0x001f0000}, - {0x0000a0c4, 0x01000101}, - {0x0000a0c8, 0x011e011f}, - {0x0000a0cc, 0x011c011d}, - {0x0000a0d0, 0x02030204}, - {0x0000a0d4, 0x02010202}, - {0x0000a0d8, 0x021f0200}, - {0x0000a0dc, 0x0302021e}, - {0x0000a0e0, 0x03000301}, - {0x0000a0e4, 0x031e031f}, - {0x0000a0e8, 0x0402031d}, - {0x0000a0ec, 0x04000401}, - {0x0000a0f0, 0x041e041f}, - {0x0000a0f4, 0x0502041d}, - {0x0000a0f8, 0x05000501}, - {0x0000a0fc, 0x051e051f}, - {0x0000a100, 0x06010602}, - {0x0000a104, 0x061f0600}, - {0x0000a108, 0x061d061e}, - {0x0000a10c, 0x07020703}, - {0x0000a110, 0x07000701}, + {0x0000a0c0, 0x00bf00a0}, + {0x0000a0c4, 0x11a011a1}, + {0x0000a0c8, 0x11be11bf}, + {0x0000a0cc, 0x11bc11bd}, + {0x0000a0d0, 0x22632264}, + {0x0000a0d4, 0x22612262}, + {0x0000a0d8, 0x227f2260}, + {0x0000a0dc, 0x4322227e}, + {0x0000a0e0, 0x43204321}, + {0x0000a0e4, 0x433e433f}, + {0x0000a0e8, 0x4462433d}, + {0x0000a0ec, 0x44604461}, + {0x0000a0f0, 0x447e447f}, + {0x0000a0f4, 0x5582447d}, + {0x0000a0f8, 0x55805581}, + {0x0000a0fc, 0x559e559f}, + {0x0000a100, 0x66816682}, + {0x0000a104, 0x669f6680}, + {0x0000a108, 0x669d669e}, + {0x0000a10c, 0x77627763}, + {0x0000a110, 0x77607761}, {0x0000a114, 0x00000000}, {0x0000a118, 0x00000000}, {0x0000a11c, 0x00000000}, @@ -530,27 +532,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { {0x0000a134, 0x00000000}, {0x0000a138, 0x00000000}, {0x0000a13c, 0x00000000}, - {0x0000a140, 0x001f0000}, - {0x0000a144, 0x01000101}, - {0x0000a148, 0x011e011f}, - {0x0000a14c, 0x011c011d}, - {0x0000a150, 0x02030204}, - {0x0000a154, 0x02010202}, - {0x0000a158, 0x021f0200}, - {0x0000a15c, 0x0302021e}, - {0x0000a160, 0x03000301}, - {0x0000a164, 0x031e031f}, - {0x0000a168, 0x0402031d}, - {0x0000a16c, 0x04000401}, - {0x0000a170, 0x041e041f}, - {0x0000a174, 0x0502041d}, - {0x0000a178, 0x05000501}, - {0x0000a17c, 0x051e051f}, - {0x0000a180, 0x06010602}, - {0x0000a184, 0x061f0600}, - {0x0000a188, 0x061d061e}, - {0x0000a18c, 0x07020703}, - {0x0000a190, 0x07000701}, + {0x0000a140, 0x00bf00a0}, + {0x0000a144, 0x11a011a1}, + {0x0000a148, 0x11be11bf}, + {0x0000a14c, 0x11bc11bd}, + {0x0000a150, 0x22632264}, + {0x0000a154, 0x22612262}, + {0x0000a158, 0x227f2260}, + {0x0000a15c, 0x4322227e}, + {0x0000a160, 0x43204321}, + {0x0000a164, 0x433e433f}, + {0x0000a168, 0x4462433d}, + {0x0000a16c, 0x44604461}, + {0x0000a170, 0x447e447f}, + {0x0000a174, 0x5582447d}, + {0x0000a178, 0x55805581}, + {0x0000a17c, 0x559e559f}, + {0x0000a180, 0x66816682}, + {0x0000a184, 0x669f6680}, + {0x0000a188, 0x669d669e}, + {0x0000a18c, 0x77e677e7}, + {0x0000a190, 0x77e477e5}, {0x0000a194, 0x00000000}, {0x0000a198, 0x00000000}, {0x0000a19c, 0x00000000}, @@ -770,7 +772,7 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = { static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = { /* Addr allmodes */ - {0x00018c00, 0x18213ede}, + {0x00018c00, 0x18212ede}, {0x00018c04, 0x000801d8}, {0x00018c08, 0x0003780c}, }; @@ -889,8 +891,8 @@ static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = { {0x0000a180, 0x66816682}, {0x0000a184, 0x669f6680}, {0x0000a188, 0x669d669e}, - {0x0000a18c, 0x77627763}, - {0x0000a190, 0x77607761}, + {0x0000a18c, 0x77e677e7}, + {0x0000a190, 0x77e477e5}, {0x0000a194, 0x00000000}, {0x0000a198, 0x00000000}, {0x0000a19c, 0x00000000}, @@ -1114,7 +1116,7 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = { {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df}, {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004}, @@ -1140,13 +1142,13 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = { {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5}, {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9}, {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb}, - {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, + {0x0000a564, 0x7804ff56, 0x7804ff56, 0x60001cf0, 0x60001cf0}, + {0x0000a568, 0x7804ff56, 0x7804ff56, 0x61001cf1, 0x61001cf1}, + {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x62001cf2, 0x62001cf2}, + {0x0000a570, 0x7804ff56, 0x7804ff56, 0x63001cf3, 0x63001cf3}, + {0x0000a574, 0x7804ff56, 0x7804ff56, 0x64001cf4, 0x64001cf4}, + {0x0000a578, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6}, + {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, @@ -1174,7 +1176,7 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = { {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df}, {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, @@ -1200,13 +1202,13 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = { {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, - {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a564, 0x7504ff56, 0x7504ff56, 0x59001cf0, 0x59001cf0}, + {0x0000a568, 0x7504ff56, 0x7504ff56, 0x5a001cf1, 0x5a001cf1}, + {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x5b001cf2, 0x5b001cf2}, + {0x0000a570, 0x7504ff56, 0x7504ff56, 0x5c001cf3, 0x5c001cf3}, + {0x0000a574, 0x7504ff56, 0x7504ff56, 0x5d001cf4, 0x5d001cf4}, + {0x0000a578, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6}, + {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 8a1888d02070..42b03dc39d14 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -251,9 +251,9 @@ struct ath_atx_tid { int tidno; int baw_head; /* first un-acked tx buffer */ int baw_tail; /* next unused tx buffer slot */ - int sched; - int paused; - u8 state; + bool sched; + bool paused; + bool active; }; struct ath_node { @@ -274,10 +274,6 @@ struct ath_node { #endif }; -#define AGGR_CLEANUP BIT(1) -#define AGGR_ADDBA_COMPLETE BIT(2) -#define AGGR_ADDBA_PROGRESS BIT(3) - struct ath_tx_control { struct ath_txq *txq; struct ath_node *an; diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index e6307b86363a..b37eb8d38811 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -2008,6 +2008,14 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw, WARN_ON(i != ATH9K_SSTATS_LEN); } +void ath9k_deinit_debug(struct ath_softc *sc) +{ + if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) { + relay_close(sc->rfs_chan_spec_scan); + sc->rfs_chan_spec_scan = NULL; + } +} + int ath9k_init_debug(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 794a7ec83a24..9d49aab8b989 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -304,6 +304,7 @@ struct ath9k_debug { }; int ath9k_init_debug(struct ath_hw *ah); +void ath9k_deinit_debug(struct ath_softc *sc); void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, @@ -339,6 +340,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah) return 0; } +static inline void ath9k_deinit_debug(struct ath_softc *sc) +{ +} + static inline void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status) { diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 7f25da8444fe..15dfefcf2d0f 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1172,6 +1172,7 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan) static inline void ath9k_hw_set_dma(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); + int txbuf_size; ENABLE_REGWRITE_BUFFER(ah); @@ -1225,13 +1226,17 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah) * So set the usable tx buf size also to half to * avoid data/delimiter underruns */ - REG_WRITE(ah, AR_PCU_TXBUF_CTRL, - AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE); - } else if (!AR_SREV_9271(ah)) { - REG_WRITE(ah, AR_PCU_TXBUF_CTRL, - AR_PCU_TXBUF_CTRL_USABLE_SIZE); + txbuf_size = AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE; + } else if (AR_SREV_9340_13_OR_LATER(ah)) { + /* Uses fewer entries for AR934x v1.3+ to prevent rx overruns */ + txbuf_size = AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE; + } else { + txbuf_size = AR_PCU_TXBUF_CTRL_USABLE_SIZE; } + if (!AR_SREV_9271(ah)) + REG_WRITE(ah, AR_PCU_TXBUF_CTRL, txbuf_size); + REGWRITE_BUFFER_FLUSH(ah); if (AR_SREV_9300_20_OR_LATER(ah)) @@ -1306,9 +1311,13 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; } else { tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); - if (tmpReg & - (AR_INTR_SYNC_LOCAL_TIMEOUT | - AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { + if (AR_SREV_9340(ah)) + tmpReg &= AR9340_INTR_SYNC_LOCAL_TIMEOUT; + else + tmpReg &= AR_INTR_SYNC_LOCAL_TIMEOUT | + AR_INTR_SYNC_RADM_CPL_TIMEOUT; + + if (tmpReg) { u32 val; REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 0237b2868961..2ba494567777 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -787,8 +787,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->iface_combinations = if_comb; hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); - if (AR_SREV_5416(sc->sc_ah)) - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; @@ -830,10 +829,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) sc->ant_rx = hw->wiphy->available_antennas_rx; sc->ant_tx = hw->wiphy->available_antennas_tx; -#ifdef CONFIG_ATH9K_RATE_CONTROL - hw->rate_control_algorithm = "ath9k_rate_control"; -#endif - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &sc->sbands[IEEE80211_BAND_2GHZ]; @@ -906,7 +901,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, if (!ath_is_world_regd(reg)) { error = regulatory_hint(hw->wiphy, reg->alpha2); if (error) - goto unregister; + goto debug_cleanup; } ath_init_leds(sc); @@ -914,6 +909,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, return 0; +debug_cleanup: + ath9k_deinit_debug(sc); unregister: ieee80211_unregister_hw(hw); rx_cleanup: @@ -942,11 +939,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc) sc->dfs_detector->exit(sc->dfs_detector); ath9k_eeprom_release(sc); - - if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) { - relay_close(sc->rfs_chan_spec_scan); - sc->rfs_chan_spec_scan = NULL; - } } void ath9k_deinit_device(struct ath_softc *sc) @@ -960,6 +952,7 @@ void ath9k_deinit_device(struct ath_softc *sc) ath9k_ps_restore(sc); + ath9k_deinit_debug(sc); ieee80211_unregister_hw(hw); ath_rx_cleanup(sc); ath9k_deinit_softc(sc); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 498fee04afa0..566109a40fb3 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -410,7 +410,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); - if (AR_SREV_9340(ah)) + if (AR_SREV_9340(ah) && !AR_SREV_9340_13_OR_LATER(ah)) REG_WRITE(ah, AR_DMISC(q), AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1); else diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6963862a1872..5092ecae7706 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -227,13 +227,13 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) goto work; - ath9k_set_beacon(sc); - if (ah->opmode == NL80211_IFTYPE_STATION && test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { spin_lock_irqsave(&sc->sc_pm_lock, flags); sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; spin_unlock_irqrestore(&sc->sc_pm_lock, flags); + } else { + ath9k_set_beacon(sc); } work: ath_restart_work(sc); @@ -1332,6 +1332,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_node *an = (struct ath_node *) sta->drv_priv; struct ieee80211_key_conf ps_key = { }; + int key; ath_node_attach(sc, sta, vif); @@ -1339,7 +1340,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, vif->type != NL80211_IFTYPE_AP_VLAN) return 0; - an->ps_key = ath_key_config(common, vif, sta, &ps_key); + key = ath_key_config(common, vif, sta, &ps_key); + if (key > 0) + an->ps_key = key; return 0; } @@ -1356,6 +1359,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc, return; ath_key_delete(common, &ps_key); + an->ps_key = 0; } static int ath9k_sta_remove(struct ieee80211_hw *hw, @@ -1683,6 +1687,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, u16 tid, u16 *ssn, u8 buf_size) { struct ath_softc *sc = hw->priv; + bool flush = false; int ret = 0; local_bh_disable(); @@ -1699,12 +1704,14 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); ath9k_ps_restore(sc); break; - case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + flush = true; + case IEEE80211_AMPDU_TX_STOP_CONT: ath9k_ps_wakeup(sc); ath_tx_aggr_stop(sc, sta, tid); - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + if (!flush) + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); ath9k_ps_restore(sc); break; case IEEE80211_AMPDU_TX_OPERATIONAL: diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index aa4d368d8d3d..7eb1f4b458e4 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1227,10 +1227,7 @@ static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta, return false; txtid = ATH_AN_2_TID(an, tidno); - - if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS))) - return true; - return false; + return !txtid->active; } diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h index 267dbfcfaa96..b9a87383cb43 100644 --- a/drivers/net/wireless/ath/ath9k/rc.h +++ b/drivers/net/wireless/ath/ath9k/rc.h @@ -231,7 +231,7 @@ static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix, } #endif -#ifdef CONFIG_ATH9K_RATE_CONTROL +#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL int ath_rate_control_register(void); void ath_rate_control_unregister(void); #else diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 5c4ab5026dca..f7c90cc58d56 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -798,6 +798,10 @@ #define AR_SREV_REVISION_9485_10 0 #define AR_SREV_REVISION_9485_11 1 #define AR_SREV_VERSION_9340 0x300 +#define AR_SREV_REVISION_9340_10 0 +#define AR_SREV_REVISION_9340_11 1 +#define AR_SREV_REVISION_9340_12 2 +#define AR_SREV_REVISION_9340_13 3 #define AR_SREV_VERSION_9580 0x1C0 #define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */ #define AR_SREV_VERSION_9462 0x280 @@ -897,6 +901,10 @@ #define AR_SREV_9340(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9340)) +#define AR_SREV_9340_13_OR_LATER(_ah) \ + (AR_SREV_9340((_ah)) && \ + ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9340_13)) + #define AR_SREV_9285E_20(_ah) \ (AR_SREV_9285_12_OR_LATER(_ah) && \ ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1)) @@ -1007,6 +1015,8 @@ enum { AR_INTR_SYNC_LOCAL_TIMEOUT | AR_INTR_SYNC_MAC_SLEEP_ACCESS), + AR9340_INTR_SYNC_LOCAL_TIMEOUT = 0x00000010, + AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF, }; @@ -1881,6 +1891,7 @@ enum { #define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF #define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700 #define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380 +#define AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE 0x500 #define AR_PCU_MISC_MODE2 0x8344 #define AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE 0x00000002 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index eab0fcb7ded6..1c9b1bac8b0d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -125,24 +125,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) list_add_tail(&ac->list, &txq->axq_acq); } -static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) -{ - struct ath_txq *txq = tid->ac->txq; - - WARN_ON(!tid->paused); - - ath_txq_lock(sc, txq); - tid->paused = false; - - if (skb_queue_empty(&tid->buf_q)) - goto unlock; - - ath_tx_queue_tid(txq, tid); - ath_txq_schedule(sc, txq); -unlock: - ath_txq_unlock_complete(sc, txq); -} - static struct ath_frame_info *get_frame_info(struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); @@ -201,11 +183,6 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) } } - if (tid->baw_head == tid->baw_tail) { - tid->state &= ~AGGR_ADDBA_COMPLETE; - tid->state &= ~AGGR_CLEANUP; - } - if (sendbar) { ath_txq_unlock(sc, txq); ath_send_bar(tid, tid->seq_start); @@ -277,9 +254,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, list_add_tail(&bf->list, &bf_head); - if (fi->retries) - ath_tx_update_baw(sc, tid, bf->bf_state.seqno); - + ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); } @@ -491,19 +466,19 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, tx_info = IEEE80211_SKB_CB(skb); fi = get_frame_info(skb); - if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { + /* + * Outside of the current BlockAck window, + * maybe part of a previous session + */ + txfail = 1; + } else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { /* transmit completion, subframe is * acked by block ack */ acked_cnt++; } else if (!isaggr && txok) { /* transmit completion */ acked_cnt++; - } else if (tid->state & AGGR_CLEANUP) { - /* - * cleanup in progress, just fail - * the un-acked sub-frames - */ - txfail = 1; } else if (flush) { txpending = 1; } else if (fi->retries < ATH_MAX_SW_RETRIES) { @@ -527,7 +502,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (bf_next != NULL || !bf_last->bf_stale) list_move_tail(&bf->list, &bf_head); - if (!txpending || (tid->state & AGGR_CLEANUP)) { + if (!txpending) { /* * complete the acked-ones/xretried ones; update * block-ack window @@ -601,9 +576,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_txq_lock(sc, txq); } - if (tid->state & AGGR_CLEANUP) - ath_tx_flush_tid(sc, tid); - rcu_read_unlock(); if (needreset) @@ -620,6 +592,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf, struct list_head *bf_head) { + struct ieee80211_tx_info *info; bool txok, flush; txok = !(ts->ts_status & ATH9K_TXERR_MASK); @@ -631,8 +604,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, txq->axq_ampdu_depth--; if (!bf_isampdu(bf)) { - if (!flush) + if (!flush) { + info = IEEE80211_SKB_CB(bf->bf_mpdu); + memcpy(info->control.rates, bf->rates, + sizeof(info->control.rates)); ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); + } ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); } else ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok); @@ -676,7 +653,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, skb = bf->bf_mpdu; tx_info = IEEE80211_SKB_CB(skb); - rates = tx_info->control.rates; + rates = bf->rates; /* * Find the lowest frame length among the rate series that will have a @@ -1231,9 +1208,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, an = (struct ath_node *)sta->drv_priv; txtid = ATH_AN_2_TID(an, tid); - if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) - return -EAGAIN; - /* update ampdu factor/density, they may have changed. This may happen * in HT IBSS when a beacon with HT-info is received after the station * has already been added. @@ -1245,7 +1219,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, an->mpdudensity = density; } - txtid->state |= AGGR_ADDBA_PROGRESS; + txtid->active = true; txtid->paused = true; *ssn = txtid->seq_start = txtid->seq_next; txtid->bar_index = -1; @@ -1262,28 +1236,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); struct ath_txq *txq = txtid->ac->txq; - if (txtid->state & AGGR_CLEANUP) - return; - - if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { - txtid->state &= ~AGGR_ADDBA_PROGRESS; - return; - } - ath_txq_lock(sc, txq); + txtid->active = false; txtid->paused = true; - - /* - * If frames are still being transmitted for this TID, they will be - * cleaned up during tx completion. To prevent race conditions, this - * TID can only be reused after all in-progress subframes have been - * completed. - */ - if (txtid->baw_head != txtid->baw_tail) - txtid->state |= AGGR_CLEANUP; - else - txtid->state &= ~AGGR_ADDBA_COMPLETE; - ath_tx_flush_tid(sc, txtid); ath_txq_unlock_complete(sc, txq); } @@ -1349,18 +1304,28 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) } } -void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) +void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, + u16 tidno) { - struct ath_atx_tid *txtid; + struct ath_atx_tid *tid; struct ath_node *an; + struct ath_txq *txq; an = (struct ath_node *)sta->drv_priv; + tid = ATH_AN_2_TID(an, tidno); + txq = tid->ac->txq; - txtid = ATH_AN_2_TID(an, tid); - txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; - txtid->state |= AGGR_ADDBA_COMPLETE; - txtid->state &= ~AGGR_ADDBA_PROGRESS; - ath_tx_resume_tid(sc, txtid); + ath_txq_lock(sc, txq); + + tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; + tid->paused = false; + + if (!skb_queue_empty(&tid->buf_q)) { + ath_tx_queue_tid(txq, tid); + ath_txq_schedule(sc, txq); + } + + ath_txq_unlock_complete(sc, txq); } /********************/ @@ -2409,12 +2374,10 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) tid->baw_head = tid->baw_tail = 0; tid->sched = false; tid->paused = false; - tid->state &= ~AGGR_CLEANUP; + tid->active = false; __skb_queue_head_init(&tid->buf_q); acno = TID_TO_WME_AC(tidno); tid->ac = &an->ac[acno]; - tid->state &= ~AGGR_ADDBA_COMPLETE; - tid->state &= ~AGGR_ADDBA_PROGRESS; } for (acno = 0, ac = &an->ac[acno]; @@ -2451,8 +2414,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) } ath_tid_drain(sc, txq, tid); - tid->state &= ~AGGR_ADDBA_COMPLETE; - tid->state &= ~AGGR_CLEANUP; + tid->active = false; ath_txq_unlock(sc, txq); } diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 830bb1d1f957..b827d51c30a3 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -1624,7 +1624,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, netif_carrier_off(dev); - if (!proc_create_data("driver/atmel", 0, NULL, &atmel_proc_fops, priv)); + if (!proc_create_data("driver/atmel", 0, NULL, &atmel_proc_fops, priv)) printk(KERN_WARNING "atmel: unable to create /proc entry.\n"); printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %pM\n", diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 523355b87659..f7c70b3a6ea9 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -1728,6 +1728,25 @@ drop_recycle_buffer: sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); } +void b43_dma_handle_rx_overflow(struct b43_dmaring *ring) +{ + int current_slot, previous_slot; + + B43_WARN_ON(ring->tx); + + /* Device has filled all buffers, drop all packets and let TCP + * decrease speed. + * Decrement RX index by one will let the device to see all slots + * as free again + */ + /* + *TODO: How to increase rx_drop in mac80211? + */ + current_slot = ring->ops->get_current_rxslot(ring); + previous_slot = prev_slot(ring, current_slot); + ring->ops->set_current_rxslot(ring, previous_slot); +} + void b43_dma_rx(struct b43_dmaring *ring) { const struct b43_dma_ops *ops = ring->ops; diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index 9fdd1983079c..df8c8cdcbdb5 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h @@ -9,7 +9,7 @@ /* DMA-Interrupt reasons. */ #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ | (1 << 14) | (1 << 15)) -#define B43_DMAIRQ_NONFATALMASK (1 << 13) +#define B43_DMAIRQ_RDESC_UFLOW (1 << 13) #define B43_DMAIRQ_RX_DONE (1 << 16) /*** 32-bit DMA Engine. ***/ @@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev, void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status); +void b43_dma_handle_rx_overflow(struct b43_dmaring *ring); + void b43_dma_rx(struct b43_dmaring *ring); void b43_dma_direct_fifo_rx(struct b43_wldev *dev, diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index d377f77d30b5..a95b77ab360e 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -1902,30 +1902,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) } } - if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | - B43_DMAIRQ_NONFATALMASK))) { - if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { - b43err(dev->wl, "Fatal DMA error: " - "0x%08X, 0x%08X, 0x%08X, " - "0x%08X, 0x%08X, 0x%08X\n", - dma_reason[0], dma_reason[1], - dma_reason[2], dma_reason[3], - dma_reason[4], dma_reason[5]); - b43err(dev->wl, "This device does not support DMA " + if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) { + b43err(dev->wl, + "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43err(dev->wl, "This device does not support DMA " "on your system. It will now be switched to PIO.\n"); - /* Fall back to PIO transfers if we get fatal DMA errors! */ - dev->use_pio = true; - b43_controller_restart(dev, "DMA error"); - return; - } - if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { - b43err(dev->wl, "DMA error: " - "0x%08X, 0x%08X, 0x%08X, " - "0x%08X, 0x%08X, 0x%08X\n", - dma_reason[0], dma_reason[1], - dma_reason[2], dma_reason[3], - dma_reason[4], dma_reason[5]); - } + /* Fall back to PIO transfers if we get fatal DMA errors! */ + dev->use_pio = true; + b43_controller_restart(dev, "DMA error"); + return; } if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) @@ -1944,6 +1932,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) handle_irq_noise(dev); /* Check the DMA reason registers for received data. */ + if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) { + if (B43_DEBUG) + b43warn(dev->wl, "RX descriptor underrun\n"); + b43_dma_handle_rx_overflow(dev->dma.rx_ring); + } if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { if (b43_using_pio_transfers(dev)) b43_pio_rx(dev->pio.rx_queue); @@ -2001,7 +1994,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev) return IRQ_NONE; dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) - & 0x0001DC00; + & 0x0001FC00; dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) & 0x0000DC00; dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) @@ -2465,7 +2458,7 @@ static void b43_request_firmware(struct work_struct *work) for (i = 0; i < B43_NR_FWTYPES; i++) { errmsg = ctx->errors[i]; if (strlen(errmsg)) - b43err(dev->wl, errmsg); + b43err(dev->wl, "%s", errmsg); } b43_print_fw_helptext(dev->wl, 1); goto out; @@ -3130,7 +3123,7 @@ static int b43_chip_init(struct b43_wldev *dev) b43_write32(dev, 0x018C, 0x02000000); } b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); - b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00); b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c index be0787cab24f..9431af2465f3 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c @@ -27,7 +27,6 @@ #include "tracepoint.h" #define PKTFILTER_BUF_SIZE 128 -#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */ #define BRCMF_DEFAULT_BCN_TIMEOUT 3 #define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 @@ -338,23 +337,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) goto done; } - /* Try to set and enable ARP offload feature, this may fail */ - err = brcmf_fil_iovar_int_set(ifp, "arp_ol", BRCMF_ARPOL_MODE); - if (err) { - brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n", - BRCMF_ARPOL_MODE, err); - err = 0; - } else { - err = brcmf_fil_iovar_int_set(ifp, "arpoe", 1); - if (err) { - brcmf_dbg(TRACE, "failed to enable ARP offload err = %d\n", - err); - err = 0; - } else - brcmf_dbg(TRACE, "successfully enabled ARP offload to 0x%x\n", - BRCMF_ARPOL_MODE); - } - /* Setup packet filter */ brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER); brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER, diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 59c25463e428..b98f2235978e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -653,10 +653,13 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name); + ndev->destructor = free_netdev; return 0; fail: + drvr->iflist[ifp->bssidx] = NULL; ndev->netdev_ops = NULL; + free_netdev(ndev); return -EBADE; } @@ -720,6 +723,9 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp) return 0; fail: + ifp->drvr->iflist[ifp->bssidx] = NULL; + ndev->netdev_ops = NULL; + free_netdev(ndev); return -EBADE; } @@ -788,6 +794,7 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx) struct brcmf_if *ifp; ifp = drvr->iflist[bssidx]; + drvr->iflist[bssidx] = NULL; if (!ifp) { brcmf_err("Null interface, idx=%d\n", bssidx); return; @@ -808,15 +815,13 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx) cancel_work_sync(&ifp->setmacaddr_work); cancel_work_sync(&ifp->multicast_work); } - + /* unregister will take care of freeing it */ unregister_netdev(ifp->ndev); if (bssidx == 0) brcmf_cfg80211_detach(drvr->config); - free_netdev(ifp->ndev); } else { kfree(ifp); } - drvr->iflist[bssidx] = NULL; } int brcmf_attach(uint bus_hdrlen, struct device *dev) @@ -925,8 +930,6 @@ fail: brcmf_fws_del_interface(ifp); brcmf_fws_deinit(drvr); } - free_netdev(ifp->ndev); - drvr->iflist[0] = NULL; if (p2p_ifp) { free_netdev(p2p_ifp->ndev); drvr->iflist[1] = NULL; @@ -934,7 +937,8 @@ fail: return ret; } if ((brcmf_p2p_enable) && (p2p_ifp)) - brcmf_net_p2p_attach(p2p_ifp); + if (brcmf_net_p2p_attach(p2p_ifp) < 0) + brcmf_p2p_enable = 0; return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index 5a64280e6485..83ee53a7c76e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c @@ -202,7 +202,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, return; brcmf_fws_add_interface(ifp); if (!drvr->fweh.evt_handler[BRCMF_E_IF]) - err = brcmf_net_attach(ifp, false); + if (brcmf_net_attach(ifp, false) < 0) + return; } if (ifevent->action == BRCMF_E_IF_CHANGE) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h index 0f2c83bc95dc..665ef69e974b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h @@ -23,6 +23,12 @@ #define BRCMF_FIL_ACTION_FRAME_SIZE 1800 +/* ARP Offload feature flags for arp_ol iovar */ +#define BRCMF_ARP_OL_AGENT 0x00000001 +#define BRCMF_ARP_OL_SNOOP 0x00000002 +#define BRCMF_ARP_OL_HOST_AUTO_REPLY 0x00000004 +#define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008 + enum brcmf_fil_p2p_if_types { BRCMF_FIL_P2P_IF_CLIENT, diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index e7a1a4770996..79555f006d53 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -47,6 +47,7 @@ #define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \ (channel == SOCIAL_CHAN_2) || \ (channel == SOCIAL_CHAN_3)) +#define BRCMF_P2P_TEMP_CHAN SOCIAL_CHAN_3 #define SOCIAL_CHAN_CNT 3 #define AF_PEER_SEARCH_CNT 2 @@ -1954,21 +1955,21 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg) err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1); if (err < 0) { brcmf_err("set p2p_disc error\n"); - brcmf_free_vif(p2p_vif); + brcmf_free_vif(cfg, p2p_vif); goto exit; } /* obtain bsscfg index for P2P discovery */ err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx); if (err < 0) { brcmf_err("retrieving discover bsscfg index failed\n"); - brcmf_free_vif(p2p_vif); + brcmf_free_vif(cfg, p2p_vif); goto exit; } /* Verify that firmware uses same bssidx as driver !! */ if (p2p_ifp->bssidx != bssidx) { brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n", bssidx, p2p_ifp->bssidx); - brcmf_free_vif(p2p_vif); + brcmf_free_vif(cfg, p2p_vif); goto exit; } @@ -1996,7 +1997,7 @@ void brcmf_p2p_detach(struct brcmf_p2p_info *p2p) brcmf_p2p_cancel_remain_on_channel(vif->ifp); brcmf_p2p_deinit_discovery(p2p); /* remove discovery interface */ - brcmf_free_vif(vif); + brcmf_free_vif(p2p->cfg, vif); p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; } /* just set it all to zero */ @@ -2013,17 +2014,30 @@ static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p, u16 *chanspec) { struct brcmf_if *ifp; - struct brcmf_fil_chan_info_le ci; + u8 mac_addr[ETH_ALEN]; struct brcmu_chan ch; - s32 err; + struct brcmf_bss_info_le *bi; + u8 *buf; ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; - ch.chnum = 11; - - err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci)); - if (!err) - ch.chnum = le32_to_cpu(ci.hw_channel); + if (brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mac_addr, + ETH_ALEN) == 0) { + buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); + if (buf != NULL) { + *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX); + if (brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO, + buf, WL_BSS_INFO_MAX) == 0) { + bi = (struct brcmf_bss_info_le *)(buf + 4); + *chanspec = le16_to_cpu(bi->chanspec); + kfree(buf); + return; + } + kfree(buf); + } + } + /* Use default channel for P2P */ + ch.chnum = BRCMF_P2P_TEMP_CHAN; ch.bw = BRCMU_CHAN_BW_20; p2p->cfg->d11inf.encchspec(&ch); *chanspec = ch.chspec; @@ -2208,7 +2222,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, return &p2p_vif->wdev; fail: - brcmf_free_vif(p2p_vif); + brcmf_free_vif(p2p->cfg, p2p_vif); return ERR_PTR(err); } @@ -2217,13 +2231,31 @@ fail: * * @vif: virtual interface object to delete. */ -static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_vif *vif) +static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_info *cfg, + struct brcmf_cfg80211_vif *vif) { - struct brcmf_p2p_info *p2p = &vif->ifp->drvr->config->p2p; - cfg80211_unregister_wdev(&vif->wdev); - p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; - brcmf_free_vif(vif); + cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; + brcmf_free_vif(cfg, vif); +} + +/** + * brcmf_p2p_free_p2p_if() - free up net device related data. + * + * @ndev: net device that needs to be freed. + */ +static void brcmf_p2p_free_p2p_if(struct net_device *ndev) +{ + struct brcmf_cfg80211_info *cfg; + struct brcmf_cfg80211_vif *vif; + struct brcmf_if *ifp; + + ifp = netdev_priv(ndev); + cfg = ifp->drvr->config; + vif = ifp->vif; + + brcmf_free_vif(cfg, vif); + free_netdev(ifp->ndev); } /** @@ -2303,6 +2335,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, brcmf_err("Registering netdevice failed\n"); goto fail; } + /* override destructor */ + ifp->ndev->destructor = brcmf_p2p_free_p2p_if; + cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif; /* Disable firmware roaming for P2P interface */ brcmf_fil_iovar_int_set(ifp, "roam_off", 1); @@ -2314,7 +2349,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, return &ifp->vif->wdev; fail: - brcmf_free_vif(vif); + brcmf_free_vif(cfg, vif); return ERR_PTR(err); } @@ -2350,7 +2385,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) break; case NL80211_IFTYPE_P2P_DEVICE: - brcmf_p2p_delete_p2pdev(vif); + brcmf_p2p_delete_p2pdev(cfg, vif); return 0; default: return -ENOTSUPP; @@ -2378,7 +2413,6 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) err = 0; } brcmf_cfg80211_arm_vif_event(cfg, NULL); - brcmf_free_vif(vif); p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; return err; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 6d758f285352..301e572e8923 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -459,6 +459,38 @@ send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key) return err; } +static s32 +brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable) +{ + s32 err; + u32 mode; + + if (enable) + mode = BRCMF_ARP_OL_AGENT | BRCMF_ARP_OL_PEER_AUTO_REPLY; + else + mode = 0; + + /* Try to set and enable ARP offload feature, this may fail, then it */ + /* is simply not supported and err 0 will be returned */ + err = brcmf_fil_iovar_int_set(ifp, "arp_ol", mode); + if (err) { + brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n", + mode, err); + err = 0; + } else { + err = brcmf_fil_iovar_int_set(ifp, "arpoe", enable); + if (err) { + brcmf_dbg(TRACE, "failed to configure (%d) ARP offload err = %d\n", + enable, err); + err = 0; + } else + brcmf_dbg(TRACE, "successfully configured (%d) ARP offload to 0x%x\n", + enable, mode); + } + + return err; +} + static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, const char *name, enum nl80211_iftype type, @@ -2216,6 +2248,11 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, } pm = enabled ? PM_FAST : PM_OFF; + /* Do not enable the power save after assoc if it is a p2p interface */ + if (ifp->vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) { + brcmf_dbg(INFO, "Do not enable power save for P2P clients\n"); + pm = PM_OFF; + } brcmf_dbg(INFO, "power save %s\n", (pm ? "enabled" : "disabled")); err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm); @@ -3640,10 +3677,28 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif, } static s32 +brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg, + struct brcmf_if *ifp, + struct ieee80211_channel *channel) +{ + u16 chanspec; + s32 err; + + brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band, + channel->center_freq); + + chanspec = channel_to_chanspec(&cfg->d11inf, channel); + err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); + + return err; +} + +static s32 brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ap_settings *settings) { s32 ie_offset; + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_tlv *ssid_ie; struct brcmf_ssid_le ssid_le; @@ -3683,6 +3738,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, } brcmf_set_mpc(ifp, 0); + brcmf_configure_arp_offload(ifp, false); /* find the RSN_IE */ rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, @@ -3713,6 +3769,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon); + err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan); + if (err < 0) { + brcmf_err("Set Channel failed, %d\n", err); + goto exit; + } + if (settings->beacon_interval) { err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, settings->beacon_interval); @@ -3789,8 +3851,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); exit: - if (err) + if (err) { brcmf_set_mpc(ifp, 1); + brcmf_configure_arp_offload(ifp, true); + } return err; } @@ -3831,6 +3895,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) brcmf_err("bss_enable config failed %d\n", err); } brcmf_set_mpc(ifp, 1); + brcmf_configure_arp_offload(ifp, true); set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); @@ -4140,11 +4205,15 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = { .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE) + } }; static const struct ieee80211_iface_combination brcmf_iface_combos[] = { { .max_interfaces = BRCMF_IFACE_MAX_CNT, - .num_different_channels = 1, /* no multi-channel for now */ + .num_different_channels = 2, .n_limits = ARRAY_SIZE(brcmf_iface_limits), .limits = brcmf_iface_limits } @@ -4197,7 +4266,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO); + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_DEVICE); wiphy->iface_combinations = brcmf_iface_combos; wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; @@ -4251,20 +4321,16 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, return vif; } -void brcmf_free_vif(struct brcmf_cfg80211_vif *vif) +void brcmf_free_vif(struct brcmf_cfg80211_info *cfg, + struct brcmf_cfg80211_vif *vif) { - struct brcmf_cfg80211_info *cfg; - struct wiphy *wiphy; - - wiphy = vif->wdev.wiphy; - cfg = wiphy_priv(wiphy); list_del(&vif->list); cfg->vif_cnt--; kfree(vif); if (!cfg->vif_cnt) { - wiphy_unregister(wiphy); - wiphy_free(wiphy); + wiphy_unregister(cfg->wiphy); + wiphy_free(cfg->wiphy); } } @@ -4641,7 +4707,6 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp, return 0; case BRCMF_E_IF_DEL: - ifp->vif = NULL; mutex_unlock(&event->vif_event_lock); /* event may not be upon user request */ if (brcmf_cfg80211_vif_event_armed(cfg)) @@ -4847,8 +4912,7 @@ cfg80211_p2p_attach_out: wl_deinit_priv(cfg); cfg80211_attach_out: - brcmf_free_vif(vif); - wiphy_free(wiphy); + brcmf_free_vif(cfg, vif); return NULL; } @@ -4860,7 +4924,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) wl_deinit_priv(cfg); brcmf_btcoex_detach(cfg); list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) { - brcmf_free_vif(vif); + brcmf_free_vif(cfg, vif); } } @@ -5224,6 +5288,8 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) if (err) goto default_conf_out; + brcmf_configure_arp_offload(ifp, true); + cfg->dongle_up = true; default_conf_out: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h index a71cff84cdcf..d9bdaf9a72d0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h @@ -487,7 +487,8 @@ enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp); struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, enum nl80211_iftype type, bool pm_block); -void brcmf_free_vif(struct brcmf_cfg80211_vif *vif); +void brcmf_free_vif(struct brcmf_cfg80211_info *cfg, + struct brcmf_cfg80211_vif *vif); s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, const u8 *vndr_ie_buf, u32 vndr_ie_len); diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index b8f82e688c72..9a95045c97b6 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -5741,8 +5741,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | - IEEE80211_HW_SUPPORTS_DYNAMIC_PS; + IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; if (il->cfg->sku & IL_SKU_N) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 592d0aa634a8..e9a3cbc409ae 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -1423,7 +1423,7 @@ il_setup_rx_scan_handlers(struct il_priv *il) } EXPORT_SYMBOL(il_setup_rx_scan_handlers); -inline u16 +u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, u8 n_probes) { diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h index f8246f2d88f9..4caaf52986a4 100644 --- a/drivers/net/wireless/iwlegacy/common.h +++ b/drivers/net/wireless/iwlegacy/common.h @@ -1832,16 +1832,16 @@ u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval); __le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, u32 beacon_interval); -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP extern const struct dev_pm_ops il_pm_ops; #define IL_LEGACY_PM_OPS (&il_pm_ops) -#else /* !CONFIG_PM */ +#else /* !CONFIG_PM_SLEEP */ #define IL_LEGACY_PM_OPS NULL -#endif /* !CONFIG_PM */ +#endif /* !CONFIG_PM_SLEEP */ /***************************************************** * Error Handling Debugging diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index db183b44e038..c3c13ce96eb0 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -735,7 +735,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) memcpy(&lq, priv->stations[i].lq, sizeof(struct iwl_link_quality_cmd)); - if (!memcmp(&lq, &zero_lq, sizeof(lq))) + if (memcmp(&lq, &zero_lq, sizeof(lq))) send_lq = true; } spin_unlock_bh(&priv->sta_lock); diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 191dcae8ba47..c6384555aab4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -173,6 +173,8 @@ enum { REPLY_DEBUG_CMD = 0xf0, DEBUG_LOG_MSG = 0xf7, + MCAST_FILTER_CMD = 0xd0, + /* D3 commands/notifications */ D3_CONFIG_CMD = 0xd3, PROT_OFFLOAD_CONFIG_CMD = 0xd4, @@ -948,4 +950,29 @@ struct iwl_set_calib_default_cmd { u8 data[0]; } __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */ +#define MAX_PORT_ID_NUM 2 + +/** + * struct iwl_mcast_filter_cmd - configure multicast filter. + * @filter_own: Set 1 to filter out multicast packets sent by station itself + * @port_id: Multicast MAC addresses array specifier. This is a strange way + * to identify network interface adopted in host-device IF. + * It is used by FW as index in array of addresses. This array has + * MAX_PORT_ID_NUM members. + * @count: Number of MAC addresses in the array + * @pass_all: Set 1 to pass all multicast packets. + * @bssid: current association BSSID. + * @addr_list: Place holder for array of MAC addresses. + * IMPORTANT: add padding if necessary to ensure DWORD alignment. + */ +struct iwl_mcast_filter_cmd { + u8 filter_own; + u8 port_id; + u8 count; + u8 pass_all; + u8 bssid[6]; + u8 reserved[2]; + u8 addr_list[0]; +} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index e6eca4d66f6c..b2cc3d98e0f7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -586,10 +586,12 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, */ static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mac_data_sta *ctxt_sta) + struct iwl_mac_data_sta *ctxt_sta, + bool force_assoc_off) { /* We need the dtim_period to set the MAC as associated */ - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) { + if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && + !force_assoc_off) { u32 dtim_offs; /* @@ -659,7 +661,8 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm, cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON); /* Fill the data specific for station mode */ - iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta); + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta, + action == FW_CTXT_ACTION_ADD); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } @@ -677,7 +680,8 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); /* Fill the data specific for station mode */ - iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta); + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta, + action == FW_CTXT_ACTION_ADD); cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index dd158ec571fb..a5eb8c82f16a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -701,6 +701,20 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, *total_flags = 0; } +static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mcast_filter_cmd mcast_filter_cmd = { + .pass_all = 1, + }; + + memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN); + + return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, + sizeof(mcast_filter_cmd), + &mcast_filter_cmd); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -722,6 +736,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, return; } iwl_mvm_bt_coex_vif_assoc(mvm, vif); + iwl_mvm_configure_mcast_filter(mvm, vif); } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { /* remove AP station now that the MAC is unassoc */ ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); @@ -931,7 +946,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, switch (cmd) { case STA_NOTIFY_SLEEP: - if (atomic_read(&mvmsta->pending_frames) > 0) + if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) ieee80211_sta_block_awake(hw, sta, true); /* * The fw updates the STA to be asleep. Tx packets on the Tx diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 8269bc562951..9f46b23801bc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -292,6 +292,7 @@ struct iwl_mvm { struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; struct work_struct sta_drained_wk; unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; + atomic_t pending_frames[IWL_MVM_STATION_COUNT]; /* configured by mac80211 */ u32 rts_threshold; diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index fe031d304d1e..b29c31a41594 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -292,6 +292,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(BT_COEX_PROT_ENV), CMD(BT_PROFILE_NOTIFICATION), CMD(BT_CONFIG), + CMD(MCAST_FILTER_CMD), }; #undef CMD diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 2157b0f8ced5..2476e43799d5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -298,6 +298,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm, else cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); + /* + * TODO: This is a WA due to a bug in the FW AUX framework that does not + * properly handle time events that fail to be scheduled + */ + cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); + cmd->repeats = cpu_to_le32(1); /* diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 0fd96e4da461..5c664ed54400 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -219,7 +219,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; /* HW restart, don't assume the memory has been zeroed */ - atomic_set(&mvm_sta->pending_frames, 0); + atomic_set(&mvm->pending_frames[sta_id], 0); mvm_sta->tid_disable_agg = 0; mvm_sta->tfd_queue_msk = 0; for (i = 0; i < IEEE80211_NUM_ACS; i++) @@ -407,14 +407,21 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, } /* + * Make sure that the tx response code sees the station as -EBUSY and + * calls the drain worker. + */ + spin_lock_bh(&mvm_sta->lock); + /* * There are frames pending on the AC queues for this station. * We need to wait until all the frames are drained... */ - if (atomic_read(&mvm_sta->pending_frames)) { - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], ERR_PTR(-EBUSY)); + spin_unlock_bh(&mvm_sta->lock); + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); } else { + spin_unlock_bh(&mvm_sta->lock); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); } diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index 12abd2d71835..a4ddce77aaae 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -274,7 +274,6 @@ struct iwl_mvm_tid_data { * @bt_reduced_txpower: is reduced tx power enabled for this station * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. - * @pending_frames: number of frames for this STA on the shared Tx queues. * @tid_data: per tid data. Look at %iwl_mvm_tid_data. * * When mac80211 creates a station it reserves some space (hw->sta_data_size) @@ -290,7 +289,6 @@ struct iwl_mvm_sta { u8 max_agg_bufsize; bool bt_reduced_txpower; spinlock_t lock; - atomic_t pending_frames; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; struct iwl_lq_sta lq_sta; struct ieee80211_vif *vif; diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 479074303bd7..f212f16502ff 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -416,9 +416,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - if (mvmsta->vif->type == NL80211_IFTYPE_AP && - txq_id < IWL_MVM_FIRST_AGG_QUEUE) - atomic_inc(&mvmsta->pending_frames); + if (txq_id < IWL_MVM_FIRST_AGG_QUEUE) + atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); return 0; @@ -680,16 +679,41 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, /* * If the txq is not an AMPDU queue, there is no chance we freed * several skbs. Check that out... - * If there are no pending frames for this STA, notify mac80211 that - * this station can go to sleep in its STA table. */ - if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && mvmsta && - !WARN_ON(skb_freed > 1) && - mvmsta->vif->type == NL80211_IFTYPE_AP && - atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) { - ieee80211_sta_block_awake(mvm->hw, sta, false); - set_bit(sta_id, mvm->sta_drained); - schedule_work(&mvm->sta_drained_wk); + if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) && + atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) { + if (mvmsta) { + /* + * If there are no pending frames for this STA, notify + * mac80211 that this station can go to sleep in its + * STA table. + */ + if (mvmsta->vif->type == NL80211_IFTYPE_AP) + ieee80211_sta_block_awake(mvm->hw, sta, false); + /* + * We might very well have taken mvmsta pointer while + * the station was being removed. The remove flow might + * have seen a pending_frame (because we didn't take + * the lock) even if now the queues are drained. So make + * really sure now that this the station is not being + * removed. If it is, run the drain worker to remove it. + */ + spin_lock_bh(&mvmsta->lock); + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (IS_ERR_OR_NULL(sta)) { + /* + * Station disappeared in the meantime: + * so we are draining. + */ + set_bit(sta_id, mvm->sta_drained); + schedule_work(&mvm->sta_drained_wk); + } + spin_unlock_bh(&mvmsta->lock); + } else if (!mvmsta) { + /* Tx response without STA, so we are draining */ + set_bit(sta_id, mvm->sta_drained); + schedule_work(&mvm->sta_drained_wk); + } } rcu_read_unlock(); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index b878a32e7a98..cb34c7895f2a 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1723,11 +1723,11 @@ static void mac80211_hwsim_free(void) class_destroy(hwsim_class); } - -static struct device_driver mac80211_hwsim_driver = { - .name = "mac80211_hwsim", - .bus = &platform_bus_type, - .owner = THIS_MODULE, +static struct platform_driver mac80211_hwsim_driver = { + .driver = { + .name = "mac80211_hwsim", + .owner = THIS_MODULE, + }, }; static const struct net_device_ops hwsim_netdev_ops = { @@ -2219,7 +2219,7 @@ static int __init init_mac80211_hwsim(void) spin_lock_init(&hwsim_radio_lock); INIT_LIST_HEAD(&hwsim_radios); - err = driver_register(&mac80211_hwsim_driver); + err = platform_driver_register(&mac80211_hwsim_driver); if (err) return err; @@ -2254,7 +2254,7 @@ static int __init init_mac80211_hwsim(void) err = -ENOMEM; goto failed_drvdata; } - data->dev->driver = &mac80211_hwsim_driver; + data->dev->driver = &mac80211_hwsim_driver.driver; err = device_bind_driver(data->dev); if (err != 0) { printk(KERN_DEBUG @@ -2564,7 +2564,7 @@ failed_drvdata: failed: mac80211_hwsim_free(); failed_unregister_driver: - driver_unregister(&mac80211_hwsim_driver); + platform_driver_unregister(&mac80211_hwsim_driver); return err; } module_init(init_mac80211_hwsim); @@ -2577,6 +2577,6 @@ static void __exit exit_mac80211_hwsim(void) mac80211_hwsim_free(); unregister_netdev(hwsim_mon); - driver_unregister(&mac80211_hwsim_driver); + platform_driver_unregister(&mac80211_hwsim_driver); } module_exit(exit_mac80211_hwsim); diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index d3c8ece980d8..e42b266a023a 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2234,9 +2234,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) if (wdev->netdev->reg_state == NETREG_REGISTERED) unregister_netdevice(wdev->netdev); - if (wdev->netdev->reg_state == NETREG_UNREGISTERED) - free_netdev(wdev->netdev); - /* Clear the priv in adapter */ priv->netdev = NULL; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 74db0d24a579..26755d9acb55 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -1191,6 +1191,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter) adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; adapter->is_hs_configured = false; + adapter->is_suspended = false; mwifiex_hs_activated_event(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), false); diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c index 753b5682d53f..a5f9875cfd6e 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/mwifiex/debugfs.c @@ -26,10 +26,17 @@ static struct dentry *mwifiex_dfs_dir; static char *bss_modes[] = { - "Unknown", - "Ad-hoc", - "Managed", - "Auto" + "UNSPECIFIED", + "ADHOC", + "STATION", + "AP", + "AP_VLAN", + "WDS", + "MONITOR", + "MESH_POINT", + "P2P_CLIENT", + "P2P_GO", + "P2P_DEVICE", }; /* size/addr for mwifiex_debug_info */ @@ -200,7 +207,12 @@ mwifiex_info_read(struct file *file, char __user *ubuf, p += sprintf(p, "driver_version = %s", fmt); p += sprintf(p, "\nverext = %s", priv->version_str); p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name); - p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]); + + if (info.bss_mode >= ARRAY_SIZE(bss_modes)) + p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode); + else + p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]); + p += sprintf(p, "media_state=\"%s\"\n", (!priv->media_connected ? "Disconnected" : "Connected")); p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr); diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 121443a0f2a1..2eb88ea9acf7 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -655,6 +655,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev) { dev->netdev_ops = &mwifiex_netdev_ops; + dev->destructor = free_netdev; /* Initialize private structure */ priv->current_key_index = 0; priv->media_connected = false; diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 311d0b26b81c..1a8a19dbd635 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -96,7 +96,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, } else { /* Multicast */ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; - if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) { + if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) { dev_dbg(priv->adapter->dev, "info: Enabling All Multicast!\n"); priv->curr_pkt_filter |= @@ -108,20 +108,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, dev_dbg(priv->adapter->dev, "info: Set multicast list=%d\n", mcast_list->num_multicast_addr); - /* Set multicast addresses to firmware */ - if (old_pkt_filter == priv->curr_pkt_filter) { - /* Send request to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } else { - /* Send request to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } + /* Send multicast addresses to firmware */ + ret = mwifiex_send_cmd_async(priv, + HostCmd_CMD_MAC_MULTICAST_ADR, + HostCmd_ACT_GEN_SET, 0, + mcast_list); } } } diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 999ffc12578b..c97e9d327331 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -764,6 +764,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) "can't alloc skb for rx\n"); goto done; } + kmemleak_not_leak(new_skb); pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb), diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h index d3a02e73f53a..21ca33a7c770 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h @@ -550,7 +550,7 @@ do { \ rxmcs == DESC92C_RATE11M) struct phy_rx_agc_info_t { - #if __LITTLE_ENDIAN + #ifdef __LITTLE_ENDIAN u8 gain:7, trsw:1; #else u8 trsw:1, gain:7; @@ -574,7 +574,7 @@ struct phy_status_rpt { u8 stream_target_csi[2]; u8 sig_evm; u8 rsvd_3; -#if __LITTLE_ENDIAN +#ifdef __LITTLE_ENDIAN u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ u8 sgi_en:1; u8 rxsc:2; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 3d0498e69c8c..189ba124a8c6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -1973,26 +1973,35 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } } -void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - u8 rssi_level) +static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u32 ratr_value = (u32) mac->basic_rates; - u8 *mcsrate = mac->mcs; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 ratr_value; u8 ratr_index = 0; u8 nmode = mac->ht_enable; - u8 mimo_ps = 1; - u16 shortgi_rate = 0; - u32 tmp_ratr_value = 0; + u8 mimo_ps = IEEE80211_SMPS_OFF; + u16 shortgi_rate; + u32 tmp_ratr_value; u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = mac->sgi_40; - u8 curshortgi_20mhz = mac->sgi_20; + u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; enum wireless_mode wirelessmode = mac->mode; - ratr_value |= ((*(u16 *) (mcsrate))) << 12; + if (rtlhal->current_bandtype == BAND_ON_5G) + ratr_value = sta->supp_rates[1] << 4; + else + ratr_value = sta->supp_rates[0]; + if (mac->opmode == NL80211_IFTYPE_ADHOC) + ratr_value = 0xfff; + + ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | + sta->ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: if (ratr_value & 0x0000000c) @@ -2006,7 +2015,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: nmode = 1; - if (mimo_ps == 0) { + if (mimo_ps == IEEE80211_SMPS_STATIC) { ratr_value &= 0x0007F005; } else { u32 ratr_mask; @@ -2016,8 +2025,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, ratr_mask = 0x000ff005; else ratr_mask = 0x0f0ff005; - if (curtxbw_40mhz) - ratr_mask |= 0x00000010; + ratr_value &= ratr_mask; } break; @@ -2026,41 +2034,74 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, ratr_value &= 0x000ff0ff; else ratr_value &= 0x0f0ff0ff; + break; } + ratr_value &= 0x0FFFFFFF; - if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || - (!curtxbw_40mhz && curshortgi_20mhz))) { + + if (nmode && ((curtxbw_40mhz && + curshortgi_40mhz) || (!curtxbw_40mhz && + curshortgi_20mhz))) { + ratr_value |= 0x10000000; tmp_ratr_value = (ratr_value >> 12); + for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { if ((1 << shortgi_rate) & tmp_ratr_value) break; } + shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | - (shortgi_rate << 4) | (shortgi_rate); + (shortgi_rate << 4) | (shortgi_rate); } + rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); + + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", + rtl_read_dword(rtlpriv, REG_ARFR0)); } -void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) +static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u32 ratr_bitmap = (u32) mac->basic_rates; - u8 *p_mcsrate = mac->mcs; - u8 ratr_index = 0; - u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = mac->sgi_40; - u8 curshortgi_20mhz = mac->sgi_20; - enum wireless_mode wirelessmode = mac->mode; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_sta_info *sta_entry = NULL; + u32 ratr_bitmap; + u8 ratr_index; + u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; + u8 curshortgi_40mhz = curtxbw_40mhz && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; + enum wireless_mode wirelessmode = 0; bool shortgi = false; u8 rate_mask[5]; u8 macid = 0; - u8 mimops = 1; - - ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12); + u8 mimo_ps = IEEE80211_SMPS_OFF; + + sta_entry = (struct rtl_sta_info *) sta->drv_priv; + wirelessmode = sta_entry->wireless_mode; + if (mac->opmode == NL80211_IFTYPE_STATION || + mac->opmode == NL80211_IFTYPE_MESH_POINT) + curtxbw_40mhz = mac->bw_40; + else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) + macid = sta->aid + 1; + + if (rtlhal->current_bandtype == BAND_ON_5G) + ratr_bitmap = sta->supp_rates[1] << 4; + else + ratr_bitmap = sta->supp_rates[0]; + if (mac->opmode == NL80211_IFTYPE_ADHOC) + ratr_bitmap = 0xfff; + ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | + sta->ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: ratr_index = RATR_INX_WIRELESS_B; @@ -2071,6 +2112,7 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) break; case WIRELESS_MODE_G: ratr_index = RATR_INX_WIRELESS_GB; + if (rssi_level == 1) ratr_bitmap &= 0x00000f00; else if (rssi_level == 2) @@ -2085,7 +2127,8 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: ratr_index = RATR_INX_WIRELESS_NGB; - if (mimops == 0) { + + if (mimo_ps == IEEE80211_SMPS_STATIC) { if (rssi_level == 1) ratr_bitmap &= 0x00070000; else if (rssi_level == 2) @@ -2128,8 +2171,10 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) } } } + if ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz && curshortgi_20mhz)) { + if (macid == 0) shortgi = true; else if (macid == 1) @@ -2138,21 +2183,42 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) break; default: ratr_index = RATR_INX_WIRELESS_NGB; + if (rtlphy->rf_type == RF_1T2R) ratr_bitmap &= 0x000ff0ff; else ratr_bitmap &= 0x0f0ff0ff; break; } - RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n", - ratr_bitmap); - *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) | - ratr_index << 28); + sta_entry->ratr_index = ratr_index; + + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, + "ratr_bitmap :%x\n", ratr_bitmap); + *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | + (ratr_index << 28); rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "Rate_index:%x, ratr_val:%x, %5phC\n", ratr_index, ratr_bitmap, rate_mask); - rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); + memcpy(rtlpriv->rate_mask, rate_mask, 5); + /* rtl92c_fill_h2c_cmd() does USB I/O and will result in a + * "scheduled while atomic" if called directly */ + schedule_work(&rtlpriv->works.fill_h2c_cmd); + + if (macid != 0) + sta_entry->ratr_index = ratr_index; +} + +void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->dm.useramask) + rtl92cu_update_hal_rate_mask(hw, sta, rssi_level); + else + rtl92cu_update_hal_rate_table(hw, sta); } void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h index f41a3aa4a26f..8e3ec1e25644 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h @@ -98,10 +98,6 @@ void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw, u32 add_msr, u32 rm_msr); void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); -void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - u8 rssi_level); -void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level); void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw); bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index 85b6bdb163c0..da4f587199ee 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -289,14 +289,30 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index, macaddr = cam_const_broad; entry_id = key_index; } else { + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_MESH_POINT) { + entry_id = rtl_cam_get_free_entry(hw, + p_macaddr); + if (entry_id >= TOTAL_CAM_ENTRY) { + RT_TRACE(rtlpriv, COMP_SEC, + DBG_EMERG, + "Can not find free hw security cam entry\n"); + return; + } + } else { + entry_id = CAM_PAIRWISE_KEY_POSITION; + } + key_index = PAIRWISE_KEYIDX; - entry_id = CAM_PAIRWISE_KEY_POSITION; is_pairwise = true; } } if (rtlpriv->sec.key_len[key_index] == 0) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "delete one entry\n"); + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_MESH_POINT) + rtl_cam_del_entry(hw, p_macaddr); rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); } else { RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 23d640a4debd..826f085c29dd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -106,8 +106,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { .update_interrupt_mask = rtl92cu_update_interrupt_mask, .get_hw_reg = rtl92cu_get_hw_reg, .set_hw_reg = rtl92cu_set_hw_reg, - .update_rate_tbl = rtl92cu_update_hal_rate_table, - .update_rate_mask = rtl92cu_update_hal_rate_mask, + .update_rate_tbl = rtl92cu_update_hal_rate_tbl, .fill_tx_desc = rtl92cu_tx_fill_desc, .fill_fake_txdesc = rtl92cu_fill_fake_txdesc, .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc, @@ -137,6 +136,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate, .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback, .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower, + .fill_h2c_cmd = rtl92c_fill_h2c_cmd, }; static struct rtl_mod_params rtl92cu_mod_params = { @@ -349,6 +349,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/ + {RTL_USB_DEVICE(0x0846, 0xf001, rtl92cu_hal_cfg)}, /*On Netwrks N300MA*/ {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/ {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/ diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h index a1310abd0d54..262e1e4c6e5b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h @@ -49,5 +49,8 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask); void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw); +void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level); #endif diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 76732b0cd221..a3532e077871 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -824,6 +824,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) /* should after adapter start and interrupt enable. */ set_hal_stop(rtlhal); + cancel_work_sync(&rtlpriv->works.fill_h2c_cmd); /* Enable software */ SET_USB_STOP(rtlusb); rtl_usb_deinit(hw); @@ -1026,6 +1027,16 @@ static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw, return false; } +static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work) +{ + struct rtl_works *rtlworks = + container_of(work, struct rtl_works, fill_h2c_cmd); + struct ieee80211_hw *hw = rtlworks->hw; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask); +} + static struct rtl_intf_ops rtl_usb_ops = { .adapter_start = rtl_usb_start, .adapter_stop = rtl_usb_stop, @@ -1057,6 +1068,8 @@ int rtl_usb_probe(struct usb_interface *intf, /* this spin lock must be initialized early */ spin_lock_init(&rtlpriv->locks.usb_lock); + INIT_WORK(&rtlpriv->works.fill_h2c_cmd, + rtl_fill_h2c_cmd_work_callback); rtlpriv->usb_data_index = 0; init_completion(&rtlpriv->firmware_loading_complete); diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index 44328baa6389..cc03e7c87cbe 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -1736,6 +1736,8 @@ struct rtl_hal_ops { void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, bool mstate); void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw); + void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *p_cmdbuffer); }; struct rtl_intf_ops { @@ -1869,6 +1871,7 @@ struct rtl_works { struct delayed_work fwevt_wq; struct work_struct lps_change_work; + struct work_struct fill_h2c_cmd; }; struct rtl_debug { @@ -2048,6 +2051,7 @@ struct rtl_priv { }; }; bool enter_ps; /* true when entering PS */ + u8 rate_mask[5]; /*This must be the last item so that it points to the data allocated diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c index affdb3ec6225..4a0bbb13806b 100644 --- a/drivers/net/wireless/ti/wl12xx/scan.c +++ b/drivers/net/wireless/ti/wl12xx/scan.c @@ -310,7 +310,7 @@ static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd, memcpy(cmd->channels_2, cmd_channels->channels_2, sizeof(cmd->channels_2)); memcpy(cmd->channels_5, cmd_channels->channels_5, - sizeof(cmd->channels_2)); + sizeof(cmd->channels_5)); /* channels_4 are not supported, so no need to copy them */ } diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h index 222d03540200..9e5484a73667 100644 --- a/drivers/net/wireless/ti/wl12xx/wl12xx.h +++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h @@ -36,12 +36,12 @@ #define WL127X_IFTYPE_SR_VER 3 #define WL127X_MAJOR_SR_VER 10 #define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE -#define WL127X_MINOR_SR_VER 115 +#define WL127X_MINOR_SR_VER 133 /* minimum multi-role FW version for wl127x */ #define WL127X_IFTYPE_MR_VER 5 #define WL127X_MAJOR_MR_VER 7 #define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE -#define WL127X_MINOR_MR_VER 115 +#define WL127X_MINOR_MR_VER 42 /* FW chip version for wl128x */ #define WL128X_CHIP_VER 7 @@ -49,7 +49,7 @@ #define WL128X_IFTYPE_SR_VER 3 #define WL128X_MAJOR_SR_VER 10 #define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE -#define WL128X_MINOR_SR_VER 115 +#define WL128X_MINOR_SR_VER 133 /* minimum multi-role FW version for wl128x */ #define WL128X_IFTYPE_MR_VER 5 #define WL128X_MAJOR_MR_VER 7 diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c index 09d944505ac0..2b642f8c9266 100644 --- a/drivers/net/wireless/ti/wl18xx/scan.c +++ b/drivers/net/wireless/ti/wl18xx/scan.c @@ -34,7 +34,7 @@ static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd, memcpy(cmd->channels_2, cmd_channels->channels_2, sizeof(cmd->channels_2)); memcpy(cmd->channels_5, cmd_channels->channels_5, - sizeof(cmd->channels_2)); + sizeof(cmd->channels_5)); /* channels_4 are not supported, so no need to copy them */ } diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index 4775d4e61b88..74a852e4e41f 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig @@ -28,7 +28,7 @@ config NFC_WILINK config NFC_MEI_PHY tristate "MEI bus NFC device support" - depends on INTEL_MEI_BUS_NFC && NFC_HCI + depends on INTEL_MEI && NFC_HCI help This adds support to use an mei bus nfc device. Select this if you will use an HCI NFC driver for an NFC chip connected behind an diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c index b8f8abc422f0..1201bdbfb791 100644 --- a/drivers/nfc/mei_phy.c +++ b/drivers/nfc/mei_phy.c @@ -64,6 +64,15 @@ int nfc_mei_phy_enable(void *phy_id) return r; } + r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy); + if (r) { + pr_err("MEY_PHY: Event cb registration failed\n"); + mei_cl_disable_device(phy->device); + phy->powered = 0; + + return r; + } + phy->powered = 1; return 0; diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index 1ad044dce7b6..cdf1bc53b257 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c @@ -43,24 +43,16 @@ static int microread_mei_probe(struct mei_cl_device *device, return -ENOMEM; } - r = mei_cl_register_event_cb(device, nfc_mei_event_cb, phy); - if (r) { - pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); - goto err_out; - } - r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME, MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, &phy->hdev); - if (r < 0) - goto err_out; - - return 0; + if (r < 0) { + nfc_mei_phy_free(phy); -err_out: - nfc_mei_phy_free(phy); + return r; + } - return r; + return 0; } static int microread_mei_remove(struct mei_cl_device *device) @@ -71,8 +63,6 @@ static int microread_mei_remove(struct mei_cl_device *device) microread_remove(phy->hdev); - nfc_mei_phy_disable(phy); - nfc_mei_phy_free(phy); return 0; diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c index 1eb48848a35a..b5d3d18179eb 100644 --- a/drivers/nfc/pn544/mei.c +++ b/drivers/nfc/pn544/mei.c @@ -43,24 +43,16 @@ static int pn544_mei_probe(struct mei_cl_device *device, return -ENOMEM; } - r = mei_cl_register_event_cb(device, nfc_mei_event_cb, phy); - if (r) { - pr_err(PN544_DRIVER_NAME ": event cb registration failed\n"); - goto err_out; - } - r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME, MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, &phy->hdev); - if (r < 0) - goto err_out; - - return 0; + if (r < 0) { + nfc_mei_phy_free(phy); -err_out: - nfc_mei_phy_free(phy); + return r; + } - return r; + return 0; } static int pn544_mei_remove(struct mei_cl_device *device) @@ -71,8 +63,6 @@ static int pn544_mei_remove(struct mei_cl_device *device) pn544_hci_remove(phy->hdev); - nfc_mei_phy_disable(phy); - nfc_mei_phy_free(phy); return 0; diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c index f802e7c92356..2dacd19e1b8a 100644 --- a/drivers/ntb/ntb_hw.c +++ b/drivers/ntb/ntb_hw.c @@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val) */ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) { - if (mw > NTB_NUM_MW) + if (mw >= NTB_NUM_MW) return NULL; return ndev->mw[mw].vbase; @@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) */ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) { - if (mw > NTB_NUM_MW) + if (mw >= NTB_NUM_MW) return 0; return ndev->mw[mw].bar_sz; @@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) */ void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) { - if (mw > NTB_NUM_MW) + if (mw >= NTB_NUM_MW) return; dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, @@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ndev->mw[i].vbase = ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), ndev->mw[i].bar_sz); - dev_info(&pdev->dev, "MW %d size %d\n", i, - (u32) pci_resource_len(pdev, MW_TO_BAR(i))); + dev_info(&pdev->dev, "MW %d size %llu\n", i, + pci_resource_len(pdev, MW_TO_BAR(i))); if (!ndev->mw[i].vbase) { dev_warn(&pdev->dev, "Cannot remap BAR %d\n", MW_TO_BAR(i)); diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index e0bdfd7f9930..f8d7081ee301 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -58,7 +58,7 @@ #include <linux/ntb.h> #include "ntb_hw.h" -#define NTB_TRANSPORT_VERSION 2 +#define NTB_TRANSPORT_VERSION 3 static unsigned int transport_mtu = 0x401E; module_param(transport_mtu, uint, 0644); @@ -173,10 +173,13 @@ struct ntb_payload_header { enum { VERSION = 0, - MW0_SZ, - MW1_SZ, - NUM_QPS, QP_LINKS, + NUM_QPS, + NUM_MWS, + MW0_SZ_HIGH, + MW0_SZ_LOW, + MW1_SZ_HIGH, + MW1_SZ_LOW, MAX_SPAD, }; @@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name) { struct ntb_transport_client_dev *client_dev; struct ntb_transport *nt; - int rc; + int rc, i = 0; if (list_empty(&ntb_transport_list)) return -ENODEV; @@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name) dev = &client_dev->dev; /* setup and register client devices */ - dev_set_name(dev, "%s", device_name); + dev_set_name(dev, "%s%d", device_name, i); dev->bus = &ntb_bus_type; dev->release = ntb_client_release; dev->parent = &ntb_query_pdev(nt->ndev)->dev; @@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name) } list_add_tail(&client_dev->entry, &nt->client_devs); + i++; } return 0; @@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, (qp_num / NTB_NUM_MW * rx_size); rx_size -= sizeof(struct ntb_rx_info); - qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info); - qp->rx_max_frame = min(transport_mtu, rx_size); + qp->rx_buff = qp->remote_rx_info + 1; + /* Due to housekeeping, there must be atleast 2 buffs */ + qp->rx_max_frame = min(transport_mtu, rx_size / 2); qp->rx_max_entry = rx_size / qp->rx_max_frame; qp->rx_index = 0; - qp->remote_rx_info->entry = qp->rx_max_entry; + qp->remote_rx_info->entry = qp->rx_max_entry - 1; /* setup the hdr offsets with 0's */ for (i = 0; i < qp->rx_max_entry; i++) { @@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, qp->rx_pkts = 0; qp->tx_pkts = 0; + qp->tx_index = 0; +} + +static void ntb_free_mw(struct ntb_transport *nt, int num_mw) +{ + struct ntb_transport_mw *mw = &nt->mw[num_mw]; + struct pci_dev *pdev = ntb_query_pdev(nt->ndev); + + if (!mw->virt_addr) + return; + + dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr); + mw->virt_addr = NULL; } static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) @@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) struct ntb_transport_mw *mw = &nt->mw[num_mw]; struct pci_dev *pdev = ntb_query_pdev(nt->ndev); + /* No need to re-setup */ + if (mw->size == ALIGN(size, 4096)) + return 0; + + if (mw->size != 0) + ntb_free_mw(nt, num_mw); + /* Alloc memory for receiving data. Must be 4k aligned */ mw->size = ALIGN(size, 4096); mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, GFP_KERNEL); if (!mw->virt_addr) { + mw->size = 0; dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", (int) mw->size); return -ENOMEM; @@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work) u32 val; int rc, i; - /* send the local info */ - rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); - if (rc) { - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - 0, VERSION); - goto out; - } + /* send the local info, in the opposite order of the way we read it */ + for (i = 0; i < NTB_NUM_MW; i++) { + rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), + ntb_get_mw_size(ndev, i) >> 32); + if (rc) { + dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", + (u32)(ntb_get_mw_size(ndev, i) >> 32), + MW0_SZ_HIGH + (i * 2)); + goto out; + } - rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0)); - if (rc) { - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - (u32) ntb_get_mw_size(ndev, 0), MW0_SZ); - goto out; + rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2), + (u32) ntb_get_mw_size(ndev, i)); + if (rc) { + dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", + (u32) ntb_get_mw_size(ndev, i), + MW0_SZ_LOW + (i * 2)); + goto out; + } } - rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1)); + rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW); if (rc) { dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - (u32) ntb_get_mw_size(ndev, 1), MW1_SZ); + NTB_NUM_MW, NUM_MWS); goto out; } @@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work) goto out; } - rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val); - if (rc) { - dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS); - goto out; - } - - rc = ntb_write_remote_spad(ndev, QP_LINKS, val); + rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); if (rc) { dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - val, QP_LINKS); + NTB_TRANSPORT_VERSION, VERSION); goto out; } @@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work) goto out; dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); - rc = ntb_read_remote_spad(ndev, MW0_SZ, &val); + rc = ntb_read_remote_spad(ndev, NUM_MWS, &val); if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ); + dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS); goto out; } - if (!val) + if (val != NTB_NUM_MW) goto out; - dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val); + dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); - rc = ntb_set_mw(nt, 0, val); - if (rc) - goto out; + for (i = 0; i < NTB_NUM_MW; i++) { + u64 val64; - rc = ntb_read_remote_spad(ndev, MW1_SZ, &val); - if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ); - goto out; - } + rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val); + if (rc) { + dev_err(&pdev->dev, "Error reading remote spad %d\n", + MW0_SZ_HIGH + (i * 2)); + goto out1; + } - if (!val) - goto out; - dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val); + val64 = (u64) val << 32; - rc = ntb_set_mw(nt, 1, val); - if (rc) - goto out; + rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val); + if (rc) { + dev_err(&pdev->dev, "Error reading remote spad %d\n", + MW0_SZ_LOW + (i * 2)); + goto out1; + } + + val64 |= val; + + dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64); + + rc = ntb_set_mw(nt, i, val64); + if (rc) + goto out1; + } nt->transport_link = NTB_LINK_UP; @@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work) return; +out1: + for (i = 0; i < NTB_NUM_MW; i++) + ntb_free_mw(nt, i); out: if (ntb_hw_link_status(ndev)) schedule_delayed_work(&nt->link_work, @@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt, (qp_num / NTB_NUM_MW * tx_size); tx_size -= sizeof(struct ntb_rx_info); - qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info); - qp->tx_max_frame = min(transport_mtu, tx_size); + qp->tx_mw = qp->rx_info + 1; + /* Due to housekeeping, there must be atleast 2 buffs */ + qp->tx_max_frame = min(transport_mtu, tx_size / 2); qp->tx_max_entry = tx_size / qp->tx_max_frame; - qp->tx_index = 0; if (nt->debugfs_dir) { char debugfs_name[4]; @@ -897,10 +936,7 @@ void ntb_transport_free(void *transport) pdev = ntb_query_pdev(nt->ndev); for (i = 0; i < NTB_NUM_MW; i++) - if (nt->mw[i].virt_addr) - dma_free_coherent(&pdev->dev, nt->mw[i].size, - nt->mw[i].virt_addr, - nt->mw[i].dma_addr); + ntb_free_mw(nt, i); kfree(nt->qps); ntb_unregister_transport(nt->ndev); @@ -999,11 +1035,16 @@ out: static void ntb_transport_rx(unsigned long data) { struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; - int rc; + int rc, i; - do { + /* Limit the number of packets processed in a single interrupt to + * provide fairness to others + */ + for (i = 0; i < qp->rx_max_entry; i++) { rc = ntb_process_rxc(qp); - } while (!rc); + if (rc) + break; + } } static void ntb_transport_rxc_db(void *data, int db_num) @@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); */ void ntb_transport_free_queue(struct ntb_transport_qp *qp) { - struct pci_dev *pdev = ntb_query_pdev(qp->ndev); + struct pci_dev *pdev; struct ntb_queue_entry *entry; if (!qp) return; + pdev = ntb_query_pdev(qp->ndev); + cancel_delayed_work_sync(&qp->link_work); ntb_unregister_db_callback(qp->ndev, qp->qp_num); @@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up); */ void ntb_transport_link_down(struct ntb_transport_qp *qp) { - struct pci_dev *pdev = ntb_query_pdev(qp->ndev); + struct pci_dev *pdev; int rc, val; if (!qp) return; + pdev = ntb_query_pdev(qp->ndev); qp->client_ready = NTB_LINK_DOWN; rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); @@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down); */ bool ntb_transport_link_query(struct ntb_transport_qp *qp) { + if (!qp) + return false; + return qp->qp_link == NTB_LINK_UP; } EXPORT_SYMBOL_GPL(ntb_transport_link_query); @@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query); */ unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) { + if (!qp) + return 0; + return qp->qp_num; } EXPORT_SYMBOL_GPL(ntb_transport_qp_num); @@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num); */ unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) { + if (!qp) + return 0; + return qp->tx_max_frame - sizeof(struct ntb_payload_header); } EXPORT_SYMBOL_GPL(ntb_transport_max_size); diff --git a/drivers/of/base.c b/drivers/of/base.c index c76d16c972cc..f53b992f060a 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1208,11 +1208,11 @@ static int __of_parse_phandle_with_args(const struct device_node *np, out_args->args_count = count; for (i = 0; i < count; i++) out_args->args[i] = be32_to_cpup(list++); + } else { + of_node_put(node); } /* Found it! return success */ - if (node) - of_node_put(node); return 0; } diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 2ef7103270bb..1f05913ae677 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -668,7 +668,7 @@ lba_fixup_bus(struct pci_bus *bus) BUG(); } - if (ldev->hba.elmmio_space.start) { + if (ldev->hba.elmmio_space.flags) { err = request_resource(&iomem_resource, &(ldev->hba.elmmio_space)); if (err < 0) { @@ -993,7 +993,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) case PAT_LMMIO: /* used to fix up pre-initialized MEM BARs */ - if (!lba_dev->hba.lmmio_space.start) { + if (!lba_dev->hba.lmmio_space.flags) { sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO", (int)lba_dev->hba.bus_num.start); @@ -1001,7 +1001,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) io->start; r = &lba_dev->hba.lmmio_space; r->name = lba_dev->hba.lmmio_name; - } else if (!lba_dev->hba.elmmio_space.start) { + } else if (!lba_dev->hba.elmmio_space.flags) { sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO", (int)lba_dev->hba.bus_num.start); @@ -1096,6 +1096,7 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) r->name = "LBA PCI Busses"; r->start = lba_num & 0xff; r->end = (lba_num>>8) & 0xff; + r->flags = IORESOURCE_BUS; /* Set up local PCI Bus resources - we don't need them for ** Legacy boxes but it's nice to see in /proc/iomem. @@ -1494,7 +1495,7 @@ lba_driver_probe(struct parisc_device *dev) pci_add_resource_offset(&resources, &lba_dev->hba.io_space, HBA_PORT_BASE(lba_dev->hba.hba_num)); - if (lba_dev->hba.elmmio_space.start) + if (lba_dev->hba.elmmio_space.flags) pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space, lba_dev->hba.lmmio_space_offset); if (lba_dev->hba.lmmio_space.flags) diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index ac6e8e7a02df..a042d065a0c7 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c @@ -494,15 +494,4 @@ static struct pci_driver superio_driver = { .probe = superio_probe, }; -static int __init superio_modinit(void) -{ - return pci_register_driver(&superio_driver); -} - -static void __exit superio_exit(void) -{ - pci_unregister_driver(&superio_driver); -} - -module_init(superio_modinit); -module_exit(superio_exit); +module_pci_driver(superio_driver); diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index 24e12d4d1769..a50576081b34 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig @@ -71,7 +71,7 @@ config PARPORT_PC_FIFO config PARPORT_PC_SUPERIO bool "SuperIO chipset support" - depends on PARPORT_PC + depends on PARPORT_PC && !PARISC help Saying Y here enables some probes for Super-IO chipsets in order to find out things like base addresses, IRQ lines and DMA channels. It diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index a5251cb5fb0c..6e3a60c78873 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c @@ -234,7 +234,7 @@ static int parport_PS2_supported(struct parport *pb) struct parport *parport_gsc_probe_port(unsigned long base, unsigned long base_hi, int irq, - int dma, struct pci_dev *dev) + int dma, struct parisc_device *padev) { struct parport_gsc_private *priv; struct parport_operations *ops; @@ -258,7 +258,6 @@ struct parport *parport_gsc_probe_port(unsigned long base, priv->ctr_writable = 0xff; priv->dma_buf = 0; priv->dma_handle = 0; - priv->dev = dev; p->base = base; p->base_hi = base_hi; p->irq = irq; @@ -282,6 +281,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, return NULL; } + p->dev = &padev->dev; p->base_hi = base_hi; p->modes = tmp.modes; p->size = (p->modes & PARPORT_MODE_EPP)?8:3; @@ -373,7 +373,7 @@ static int parport_init_chip(struct parisc_device *dev) } p = parport_gsc_probe_port(port, 0, dev->irq, - /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL); + /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, dev); if (p) parport_count++; dev_set_drvdata(&dev->dev, p); diff --git a/drivers/parport/parport_gsc.h b/drivers/parport/parport_gsc.h index fc9c37c54022..812214768d27 100644 --- a/drivers/parport/parport_gsc.h +++ b/drivers/parport/parport_gsc.h @@ -217,6 +217,6 @@ extern void parport_gsc_dec_use_count(void); extern struct parport *parport_gsc_probe_port(unsigned long base, unsigned long base_hi, int irq, int dma, - struct pci_dev *dev); + struct parisc_device *padev); #endif /* __DRIVERS_PARPORT_PARPORT_GSC_H */ diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 96fed19c6d90..716aa93fff76 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -950,6 +950,20 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK ; } +void acpiphp_check_host_bridge(acpi_handle handle) +{ + struct acpiphp_bridge *bridge; + + bridge = acpiphp_handle_to_bridge(handle); + if (bridge) { + acpiphp_check_bridge(bridge); + put_bridge(bridge); + } + + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, + ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL); +} + static void _handle_hotplug_event_bridge(struct work_struct *work) { struct acpiphp_bridge *bridge; diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 8ec8b4f48560..0f4554e48cc5 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -580,6 +580,7 @@ struct aer_recover_entry u8 devfn; u16 domain; int severity; + struct aer_capability_regs *regs; }; static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry, @@ -593,7 +594,7 @@ static DEFINE_SPINLOCK(aer_recover_ring_lock); static DECLARE_WORK(aer_recover_work, aer_recover_work_func); void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, - int severity) + int severity, struct aer_capability_regs *aer_regs) { unsigned long flags; struct aer_recover_entry entry = { @@ -601,6 +602,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, .devfn = devfn, .domain = domain, .severity = severity, + .regs = aer_regs, }; spin_lock_irqsave(&aer_recover_ring_lock, flags); @@ -627,6 +629,7 @@ static void aer_recover_work_func(struct work_struct *work) PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn)); continue; } + cper_print_aer(pdev, entry.severity, entry.regs); do_recovery(pdev, entry.severity); pci_dev_put(pdev); } diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 5ab14251839d..2c7c9f5f592c 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c @@ -220,7 +220,7 @@ int cper_severity_to_aer(int cper_severity) } EXPORT_SYMBOL_GPL(cper_severity_to_aer); -void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity, +void cper_print_aer(struct pci_dev *dev, int cper_severity, struct aer_capability_regs *aer) { int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0; @@ -244,7 +244,7 @@ void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity, agent = AER_GET_AGENT(aer_severity, status); dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask); - cper_print_bits(prefix, status, status_strs, status_strs_size); + cper_print_bits("", status, status_strs, status_strs_size); dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", aer_error_layer[layer], aer_agent_string[agent]); if (aer_severity != AER_CORRECTABLE) diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index a3a851e49321..18c0d8d1ddf7 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c @@ -68,12 +68,6 @@ MODULE_LICENSE("Dual MPL/GPL"); #if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) -/* The RPX series use SLOT_B */ -#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE) -#define CONFIG_PCMCIA_SLOT_B -#define CONFIG_BD_IS_MHZ -#endif - /* The ADS board use SLOT_A */ #ifdef CONFIG_ADS #define CONFIG_PCMCIA_SLOT_A @@ -253,81 +247,6 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev); #define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */ -/* ------------------------------------------------------------------------- */ -/* board specific stuff: */ -/* voltage_set(), hardware_enable() and hardware_disable() */ -/* ------------------------------------------------------------------------- */ -/* RPX Boards from Embedded Planet */ - -#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE) - -/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks. - * SYPCR is write once only, therefore must the slowest memory be faster - * than the bus monitor or we will get a machine check due to the bus timeout. - */ - -#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE" - -#undef PCMCIA_BMT_LIMIT -#define PCMCIA_BMT_LIMIT (6*8) - -static int voltage_set(int slot, int vcc, int vpp) -{ - u32 reg = 0; - - switch (vcc) { - case 0: - break; - case 33: - reg |= BCSR1_PCVCTL4; - break; - case 50: - reg |= BCSR1_PCVCTL5; - break; - default: - return 1; - } - - switch (vpp) { - case 0: - break; - case 33: - case 50: - if (vcc == vpp) - reg |= BCSR1_PCVCTL6; - else - return 1; - break; - case 120: - reg |= BCSR1_PCVCTL7; - default: - return 1; - } - - if (!((vcc == 50) || (vcc == 0))) - return 1; - - /* first, turn off all power */ - - out_be32(((u32 *) RPX_CSR_ADDR), - in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | - BCSR1_PCVCTL5 | - BCSR1_PCVCTL6 | - BCSR1_PCVCTL7)); - - /* enable new powersettings */ - - out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg); - - return 0; -} - -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V -#define hardware_enable(_slot_) /* No hardware to enable */ -#define hardware_disable(_slot_) /* No hardware to disable */ - -#endif /* CONFIG_RPXCLASSIC */ - /* FADS Boards from Motorola */ #if defined(CONFIG_FADS) @@ -419,65 +338,6 @@ static inline int voltage_set(int slot, int vcc, int vpp) #endif -/* ------------------------------------------------------------------------- */ -/* Motorola MBX860 */ - -#if defined(CONFIG_MBX) - -#define PCMCIA_BOARD_MSG "MBX" - -static int voltage_set(int slot, int vcc, int vpp) -{ - u8 reg = 0; - - switch (vcc) { - case 0: - break; - case 33: - reg |= CSR2_VCC_33; - break; - case 50: - reg |= CSR2_VCC_50; - break; - default: - return 1; - } - - switch (vpp) { - case 0: - break; - case 33: - case 50: - if (vcc == vpp) - reg |= CSR2_VPP_VCC; - else - return 1; - break; - case 120: - if ((vcc == 33) || (vcc == 50)) - reg |= CSR2_VPP_12; - else - return 1; - default: - return 1; - } - - /* first, turn off all power */ - out_8((u8 *) MBX_CSR2_ADDR, - in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK)); - - /* enable new powersettings */ - out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg); - - return 0; -} - -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V -#define hardware_enable(_slot_) /* No hardware to enable */ -#define hardware_disable(_slot_) /* No hardware to disable */ - -#endif /* CONFIG_MBX */ - #if defined(CONFIG_PRxK) #include <asm/cpld.h> extern volatile fpga_pc_regs *fpga_pc; diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c index c67c37e23dd7..694c3ace4520 100644 --- a/drivers/pinctrl/pinconf.c +++ b/drivers/pinctrl/pinconf.c @@ -610,7 +610,7 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d) bool found = false; unsigned long config; - mutex_lock(&pctldev->mutex); + mutex_lock(&pinctrl_maps_mutex); /* Parse the pinctrl map and look for the elected pin/state */ for_each_maps(maps_node, i, map) { @@ -659,7 +659,7 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d) confops->pin_config_config_dbg_show(pctldev, s, config); exit: - mutex_unlock(&pctldev->mutex); + mutex_unlock(&pinctrl_maps_mutex); return 0; } diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c index aa17f7580f61..6d4532702f80 100644 --- a/drivers/pinctrl/pinctrl-abx500.c +++ b/drivers/pinctrl/pinctrl-abx500.c @@ -851,23 +851,12 @@ static int abx500_gpio_probe(struct platform_device *pdev) if (abx500_pdata) pdata = abx500_pdata->gpio; - if (!pdata) { - if (np) { - const struct of_device_id *match; - match = of_match_device(abx500_gpio_match, &pdev->dev); - if (!match) - return -ENODEV; - id = (unsigned long)match->data; - } else { - dev_err(&pdev->dev, "gpio dt and platform data missing\n"); - return -ENODEV; - } + if (!(pdata || np)) { + dev_err(&pdev->dev, "gpio dt and platform data missing\n"); + return -ENODEV; } - if (platid) - id = platid->driver_data; - pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl), GFP_KERNEL); if (pct == NULL) { @@ -882,6 +871,16 @@ static int abx500_gpio_probe(struct platform_device *pdev) pct->chip.dev = &pdev->dev; pct->chip.base = (np) ? -1 : pdata->gpio_base; + if (platid) + id = platid->driver_data; + else if (np) { + const struct of_device_id *match; + + match = of_match_device(abx500_gpio_match, &pdev->dev); + if (match) + id = (unsigned long)match->data; + } + /* initialize the lock */ mutex_init(&pct->lock); @@ -900,8 +899,7 @@ static int abx500_gpio_probe(struct platform_device *pdev) abx500_pinctrl_ab8505_init(&pct->soc); break; default: - dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", - (int) platid->driver_data); + dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", id); mutex_destroy(&pct->lock); return -EINVAL; } diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c index edde3acc4186..d6b41747d687 100644 --- a/drivers/pinctrl/pinctrl-coh901.c +++ b/drivers/pinctrl/pinctrl-coh901.c @@ -713,11 +713,6 @@ static int __init u300_gpio_probe(struct platform_device *pdev) gpio->dev = &pdev->dev; memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!memres) { - dev_err(gpio->dev, "could not get GPIO memory resource\n"); - return -ENODEV; - } - gpio->base = devm_ioremap_resource(&pdev->dev, memres); if (IS_ERR(gpio->base)) return PTR_ERR(gpio->base); @@ -835,7 +830,8 @@ static int __init u300_gpio_probe(struct platform_device *pdev) return 0; err_no_range: - err = gpiochip_remove(&gpio->chip); + if (gpiochip_remove(&gpio->chip)) + dev_err(&pdev->dev, "failed to remove gpio chip\n"); err_no_chip: err_no_domain: err_no_port: diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c index ac742817ebce..2d76f66a2e0b 100644 --- a/drivers/pinctrl/pinctrl-exynos.c +++ b/drivers/pinctrl/pinctrl-exynos.c @@ -196,6 +196,12 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data) return IRQ_HANDLED; } +struct exynos_eint_gpio_save { + u32 eint_con; + u32 eint_fltcon0; + u32 eint_fltcon1; +}; + /* * exynos_eint_gpio_init() - setup handling of external gpio interrupts. * @d: driver data of samsung pinctrl driver. @@ -204,8 +210,8 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d) { struct samsung_pin_bank *bank; struct device *dev = d->dev; - unsigned int ret; - unsigned int i; + int ret; + int i; if (!d->irq) { dev_err(dev, "irq number not available\n"); @@ -227,11 +233,29 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d) bank->nr_pins, &exynos_gpio_irqd_ops, bank); if (!bank->irq_domain) { dev_err(dev, "gpio irq domain add failed\n"); - return -ENXIO; + ret = -ENXIO; + goto err_domains; + } + + bank->soc_priv = devm_kzalloc(d->dev, + sizeof(struct exynos_eint_gpio_save), GFP_KERNEL); + if (!bank->soc_priv) { + irq_domain_remove(bank->irq_domain); + ret = -ENOMEM; + goto err_domains; } } return 0; + +err_domains: + for (--i, --bank; i >= 0; --i, --bank) { + if (bank->eint_type != EINT_TYPE_GPIO) + continue; + irq_domain_remove(bank->irq_domain); + } + + return ret; } static void exynos_wkup_irq_unmask(struct irq_data *irqd) @@ -326,6 +350,28 @@ static int exynos_wkup_irq_set_type(struct irq_data *irqd, unsigned int type) return 0; } +static u32 exynos_eint_wake_mask = 0xffffffff; + +u32 exynos_get_eint_wake_mask(void) +{ + return exynos_eint_wake_mask; +} + +static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on) +{ + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); + unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq); + + pr_info("wake %s for irq %d\n", on ? "enabled" : "disabled", irqd->irq); + + if (!on) + exynos_eint_wake_mask |= bit; + else + exynos_eint_wake_mask &= ~bit; + + return 0; +} + /* * irq_chip for wakeup interrupts */ @@ -335,6 +381,7 @@ static struct irq_chip exynos_wkup_irq_chip = { .irq_mask = exynos_wkup_irq_mask, .irq_ack = exynos_wkup_irq_ack, .irq_set_type = exynos_wkup_irq_set_type, + .irq_set_wake = exynos_wkup_irq_set_wake, }; /* interrupt handler for wakeup interrupts 0..15 */ @@ -505,6 +552,72 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) return 0; } +static void exynos_pinctrl_suspend_bank( + struct samsung_pinctrl_drv_data *drvdata, + struct samsung_pin_bank *bank) +{ + struct exynos_eint_gpio_save *save = bank->soc_priv; + void __iomem *regs = drvdata->virt_base; + + save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); + save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset); + save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); + + pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); + pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); + pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); +} + +static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) +{ + struct samsung_pin_ctrl *ctrl = drvdata->ctrl; + struct samsung_pin_bank *bank = ctrl->pin_banks; + int i; + + for (i = 0; i < ctrl->nr_banks; ++i, ++bank) + if (bank->eint_type == EINT_TYPE_GPIO) + exynos_pinctrl_suspend_bank(drvdata, bank); +} + +static void exynos_pinctrl_resume_bank( + struct samsung_pinctrl_drv_data *drvdata, + struct samsung_pin_bank *bank) +{ + struct exynos_eint_gpio_save *save = bank->soc_priv; + void __iomem *regs = drvdata->virt_base; + + pr_debug("%s: con %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset), save->eint_con); + pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset), save->eint_fltcon0); + pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4), save->eint_fltcon1); + + writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); + writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset); + writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); +} + +static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) +{ + struct samsung_pin_ctrl *ctrl = drvdata->ctrl; + struct samsung_pin_bank *bank = ctrl->pin_banks; + int i; + + for (i = 0; i < ctrl->nr_banks; ++i, ++bank) + if (bank->eint_type == EINT_TYPE_GPIO) + exynos_pinctrl_resume_bank(drvdata, bank); +} + /* pin banks of exynos4210 pin-controller 0 */ static struct samsung_pin_bank exynos4210_pin_banks0[] = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -568,6 +681,8 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, .label = "exynos4210-gpio-ctrl0", }, { /* pin-controller instance 1 data */ @@ -582,6 +697,8 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = { .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, .label = "exynos4210-gpio-ctrl1", }, { /* pin-controller instance 2 data */ @@ -663,6 +780,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, .label = "exynos4x12-gpio-ctrl0", }, { /* pin-controller instance 1 data */ @@ -677,6 +796,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, .label = "exynos4x12-gpio-ctrl1", }, { /* pin-controller instance 2 data */ @@ -687,6 +808,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, .label = "exynos4x12-gpio-ctrl2", }, { /* pin-controller instance 3 data */ @@ -697,6 +820,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, .label = "exynos4x12-gpio-ctrl3", }, }; @@ -775,6 +900,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, .label = "exynos5250-gpio-ctrl0", }, { /* pin-controller instance 1 data */ @@ -785,6 +912,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, .label = "exynos5250-gpio-ctrl1", }, { /* pin-controller instance 2 data */ @@ -795,6 +924,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, .label = "exynos5250-gpio-ctrl2", }, { /* pin-controller instance 3 data */ @@ -805,6 +936,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, + .suspend = exynos_pinctrl_suspend, + .resume = exynos_pinctrl_resume, .label = "exynos5250-gpio-ctrl3", }, }; diff --git a/drivers/pinctrl/pinctrl-exynos.h b/drivers/pinctrl/pinctrl-exynos.h index 9b1f77a5bf0f..3c91c357792f 100644 --- a/drivers/pinctrl/pinctrl-exynos.h +++ b/drivers/pinctrl/pinctrl-exynos.h @@ -19,6 +19,7 @@ /* External GPIO and wakeup interrupt related definitions */ #define EXYNOS_GPIO_ECON_OFFSET 0x700 +#define EXYNOS_GPIO_EFLTCON_OFFSET 0x800 #define EXYNOS_GPIO_EMASK_OFFSET 0x900 #define EXYNOS_GPIO_EPEND_OFFSET 0xA00 #define EXYNOS_WKUP_ECON_OFFSET 0xE00 diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/pinctrl-exynos5440.c index 6038503ed929..32a48f44f574 100644 --- a/drivers/pinctrl/pinctrl-exynos5440.c +++ b/drivers/pinctrl/pinctrl-exynos5440.c @@ -1000,11 +1000,6 @@ static int exynos5440_pinctrl_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "cannot find IO resource\n"); - return -ENOENT; - } - priv->reg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->reg_base)) return PTR_ERR(priv->reg_base); diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c index 615c5002b757..d22ca252b80d 100644 --- a/drivers/pinctrl/pinctrl-lantiq.c +++ b/drivers/pinctrl/pinctrl-lantiq.c @@ -52,7 +52,8 @@ static void ltq_pinctrl_dt_free_map(struct pinctrl_dev *pctldev, int i; for (i = 0; i < num_maps; i++) - if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN) + if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN || + map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) kfree(map[i].data.configs.configs); kfree(map); } diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c index 976366899f68..63ac22e89678 100644 --- a/drivers/pinctrl/pinctrl-samsung.c +++ b/drivers/pinctrl/pinctrl-samsung.c @@ -28,6 +28,7 @@ #include <linux/gpio.h> #include <linux/irqdomain.h> #include <linux/spinlock.h> +#include <linux/syscore_ops.h> #include "core.h" #include "pinctrl-samsung.h" @@ -48,6 +49,9 @@ static struct pin_config { { "samsung,pin-pud-pdn", PINCFG_TYPE_PUD_PDN }, }; +/* Global list of devices (struct samsung_pinctrl_drv_data) */ +LIST_HEAD(drvdata_list); + static unsigned int pin_base; static inline struct samsung_pin_bank *gc_to_pin_bank(struct gpio_chip *gc) @@ -932,11 +936,6 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) drvdata->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "cannot find IO resource\n"); - return -ENOENT; - } - drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(drvdata->virt_base)) return PTR_ERR(drvdata->virt_base); @@ -961,9 +960,151 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) ctrl->eint_wkup_init(drvdata); platform_set_drvdata(pdev, drvdata); + + /* Add to the global list */ + list_add_tail(&drvdata->node, &drvdata_list); + return 0; } +#ifdef CONFIG_PM + +/** + * samsung_pinctrl_suspend_dev - save pinctrl state for suspend for a device + * + * Save data for all banks handled by this device. + */ +static void samsung_pinctrl_suspend_dev( + struct samsung_pinctrl_drv_data *drvdata) +{ + struct samsung_pin_ctrl *ctrl = drvdata->ctrl; + void __iomem *virt_base = drvdata->virt_base; + int i; + + for (i = 0; i < ctrl->nr_banks; i++) { + struct samsung_pin_bank *bank = &ctrl->pin_banks[i]; + void __iomem *reg = virt_base + bank->pctl_offset; + + u8 *offs = bank->type->reg_offset; + u8 *widths = bank->type->fld_width; + enum pincfg_type type; + + /* Registers without a powerdown config aren't lost */ + if (!widths[PINCFG_TYPE_CON_PDN]) + continue; + + for (type = 0; type < PINCFG_TYPE_NUM; type++) + if (widths[type]) + bank->pm_save[type] = readl(reg + offs[type]); + + if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) { + /* Some banks have two config registers */ + bank->pm_save[PINCFG_TYPE_NUM] = + readl(reg + offs[PINCFG_TYPE_FUNC] + 4); + pr_debug("Save %s @ %p (con %#010x %08x)\n", + bank->name, reg, + bank->pm_save[PINCFG_TYPE_FUNC], + bank->pm_save[PINCFG_TYPE_NUM]); + } else { + pr_debug("Save %s @ %p (con %#010x)\n", bank->name, + reg, bank->pm_save[PINCFG_TYPE_FUNC]); + } + } + + if (ctrl->suspend) + ctrl->suspend(drvdata); +} + +/** + * samsung_pinctrl_resume_dev - restore pinctrl state from suspend for a device + * + * Restore one of the banks that was saved during suspend. + * + * We don't bother doing anything complicated to avoid glitching lines since + * we're called before pad retention is turned off. + */ +static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata) +{ + struct samsung_pin_ctrl *ctrl = drvdata->ctrl; + void __iomem *virt_base = drvdata->virt_base; + int i; + + if (ctrl->resume) + ctrl->resume(drvdata); + + for (i = 0; i < ctrl->nr_banks; i++) { + struct samsung_pin_bank *bank = &ctrl->pin_banks[i]; + void __iomem *reg = virt_base + bank->pctl_offset; + + u8 *offs = bank->type->reg_offset; + u8 *widths = bank->type->fld_width; + enum pincfg_type type; + + /* Registers without a powerdown config aren't lost */ + if (!widths[PINCFG_TYPE_CON_PDN]) + continue; + + if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) { + /* Some banks have two config registers */ + pr_debug("%s @ %p (con %#010x %08x => %#010x %08x)\n", + bank->name, reg, + readl(reg + offs[PINCFG_TYPE_FUNC]), + readl(reg + offs[PINCFG_TYPE_FUNC] + 4), + bank->pm_save[PINCFG_TYPE_FUNC], + bank->pm_save[PINCFG_TYPE_NUM]); + writel(bank->pm_save[PINCFG_TYPE_NUM], + reg + offs[PINCFG_TYPE_FUNC] + 4); + } else { + pr_debug("%s @ %p (con %#010x => %#010x)\n", bank->name, + reg, readl(reg + offs[PINCFG_TYPE_FUNC]), + bank->pm_save[PINCFG_TYPE_FUNC]); + } + for (type = 0; type < PINCFG_TYPE_NUM; type++) + if (widths[type]) + writel(bank->pm_save[type], reg + offs[type]); + } +} + +/** + * samsung_pinctrl_suspend - save pinctrl state for suspend + * + * Save data for all banks across all devices. + */ +static int samsung_pinctrl_suspend(void) +{ + struct samsung_pinctrl_drv_data *drvdata; + + list_for_each_entry(drvdata, &drvdata_list, node) { + samsung_pinctrl_suspend_dev(drvdata); + } + + return 0; +} + +/** + * samsung_pinctrl_resume - restore pinctrl state for suspend + * + * Restore data for all banks across all devices. + */ +static void samsung_pinctrl_resume(void) +{ + struct samsung_pinctrl_drv_data *drvdata; + + list_for_each_entry_reverse(drvdata, &drvdata_list, node) { + samsung_pinctrl_resume_dev(drvdata); + } +} + +#else +#define samsung_pinctrl_suspend NULL +#define samsung_pinctrl_resume NULL +#endif + +static struct syscore_ops samsung_pinctrl_syscore_ops = { + .suspend = samsung_pinctrl_suspend, + .resume = samsung_pinctrl_resume, +}; + static const struct of_device_id samsung_pinctrl_dt_match[] = { #ifdef CONFIG_PINCTRL_EXYNOS { .compatible = "samsung,exynos4210-pinctrl", @@ -992,6 +1133,14 @@ static struct platform_driver samsung_pinctrl_driver = { static int __init samsung_pinctrl_drv_register(void) { + /* + * Register syscore ops for save/restore of registers across suspend. + * It's important to ensure that this driver is running at an earlier + * initcall level than any arch-specific init calls that install syscore + * ops that turn off pad retention (like exynos_pm_resume). + */ + register_syscore_ops(&samsung_pinctrl_syscore_ops); + return platform_driver_register(&samsung_pinctrl_driver); } postcore_initcall(samsung_pinctrl_drv_register); diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/pinctrl-samsung.h index 7c7f9ebcd05b..26d3519240c9 100644 --- a/drivers/pinctrl/pinctrl-samsung.h +++ b/drivers/pinctrl/pinctrl-samsung.h @@ -127,6 +127,7 @@ struct samsung_pin_bank_type { * @gpio_chip: GPIO chip of the bank. * @grange: linux gpio pin range supported by this bank. * @slock: spinlock protecting bank registers + * @pm_save: saved register values during suspend */ struct samsung_pin_bank { struct samsung_pin_bank_type *type; @@ -138,12 +139,15 @@ struct samsung_pin_bank { u32 eint_mask; u32 eint_offset; char *name; + void *soc_priv; struct device_node *of_node; struct samsung_pinctrl_drv_data *drvdata; struct irq_domain *irq_domain; struct gpio_chip gpio_chip; struct pinctrl_gpio_range grange; spinlock_t slock; + + u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/ }; /** @@ -184,11 +188,15 @@ struct samsung_pin_ctrl { int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *); int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *); + void (*suspend)(struct samsung_pinctrl_drv_data *); + void (*resume)(struct samsung_pinctrl_drv_data *); + char *label; }; /** * struct samsung_pinctrl_drv_data: wrapper for holding driver data together. + * @node: global list node * @virt_base: register base address of the controller. * @dev: device instance representing the controller. * @irq: interrpt number used by the controller to notify gpio interrupts. @@ -201,6 +209,7 @@ struct samsung_pin_ctrl { * @nr_function: number of such pin functions. */ struct samsung_pinctrl_drv_data { + struct list_head node; void __iomem *virt_base; struct device *dev; int irq; diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 5f2d2bfd356e..b9fa04618601 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1166,7 +1166,8 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, (*map)->data.mux.function = np->name; if (pcs->is_pinconf) { - if (pcs_parse_pinconf(pcs, np, function, map)) + res = pcs_parse_pinconf(pcs, np, function, map); + if (res) goto free_pingroups; *num_maps = 2; } else { diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c index c52fc2c08732..b7d8c890514c 100644 --- a/drivers/pinctrl/pinctrl-sunxi.c +++ b/drivers/pinctrl/pinctrl-sunxi.c @@ -1990,8 +1990,10 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev) } clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); goto gpiochip_error; + } clk_prepare_enable(clk); @@ -2000,7 +2002,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev) return 0; gpiochip_error: - ret = gpiochip_remove(pctl->chip); + if (gpiochip_remove(pctl->chip)) + dev_err(&pdev->dev, "failed to remove gpio chip\n"); pinctrl_error: pinctrl_unregister(pctl->pctl_dev); return ret; diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index f2977cff8366..e92132c76a6b 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c @@ -716,10 +716,6 @@ static int pinmux_xway_probe(struct platform_device *pdev) /* get and remap our register range */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Failed to get resource\n"); - return -ENOENT; - } xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(xway_info.membase[0])) return PTR_ERR(xway_info.membase[0]); diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c index 791a6719d8a9..8cd90e7e945a 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c @@ -2357,27 +2357,48 @@ static const unsigned int sdhi3_wp_mux[] = { }; /* - USB0 ------------------------------------------------------------------- */ static const unsigned int usb0_pins[] = { - /* OVC */ - 150, 154, + /* PENC */ + 154, }; static const unsigned int usb0_mux[] = { - USB_OVC0_MARK, USB_PENC0_MARK, + USB_PENC0_MARK, +}; +static const unsigned int usb0_ovc_pins[] = { + /* USB_OVC */ + 150 +}; +static const unsigned int usb0_ovc_mux[] = { + USB_OVC0_MARK, }; /* - USB1 ------------------------------------------------------------------- */ static const unsigned int usb1_pins[] = { - /* OVC */ - 152, 155, + /* PENC */ + 155, }; static const unsigned int usb1_mux[] = { - USB_OVC1_MARK, USB_PENC1_MARK, + USB_PENC1_MARK, +}; +static const unsigned int usb1_ovc_pins[] = { + /* USB_OVC */ + 152, +}; +static const unsigned int usb1_ovc_mux[] = { + USB_OVC1_MARK, }; /* - USB2 ------------------------------------------------------------------- */ static const unsigned int usb2_pins[] = { - /* OVC, PENC */ - 125, 156, + /* PENC */ + 156, }; static const unsigned int usb2_mux[] = { - USB_OVC2_MARK, USB_PENC2_MARK, + USB_PENC2_MARK, +}; +static const unsigned int usb2_ovc_pins[] = { + /* USB_OVC */ + 125, +}; +static const unsigned int usb2_ovc_mux[] = { + USB_OVC2_MARK, }; static const struct sh_pfc_pin_group pinmux_groups[] = { @@ -2501,8 +2522,11 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(sdhi3_cd), SH_PFC_PIN_GROUP(sdhi3_wp), SH_PFC_PIN_GROUP(usb0), + SH_PFC_PIN_GROUP(usb0_ovc), SH_PFC_PIN_GROUP(usb1), + SH_PFC_PIN_GROUP(usb1_ovc), SH_PFC_PIN_GROUP(usb2), + SH_PFC_PIN_GROUP(usb2_ovc), }; static const char * const du0_groups[] = { @@ -2683,14 +2707,17 @@ static const char * const sdhi3_groups[] = { static const char * const usb0_groups[] = { "usb0", + "usb0_ovc", }; static const char * const usb1_groups[] = { "usb1", + "usb1_ovc", }; static const char * const usb2_groups[] = { "usb2", + "usb2_ovc", }; static const struct sh_pfc_function pinmux_functions[] = { diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8750.c b/drivers/pinctrl/vt8500/pinctrl-wm8750.c index b964cc550568..de43262398db 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wm8750.c +++ b/drivers/pinctrl/vt8500/pinctrl-wm8750.c @@ -53,7 +53,7 @@ static const struct wmt_pinctrl_bank_registers wm8750_banks[] = { #define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6) #define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7) #define WMT_PIN_WAKEUP0 WMT_PIN(0, 16) -#define WMT_PIN_WAKEUP1 WMT_PIN(0, 16) +#define WMT_PIN_WAKEUP1 WMT_PIN(0, 17) #define WMT_PIN_SD0CD WMT_PIN(0, 28) #define WMT_PIN_VDOUT0 WMT_PIN(1, 0) #define WMT_PIN_VDOUT1 WMT_PIN(1, 1) diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c index ab63104e8dc9..70d986e04afb 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wmt.c +++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c @@ -609,8 +609,7 @@ int wmt_pinctrl_probe(struct platform_device *pdev, return 0; fail_range: - err = gpiochip_remove(&data->gpio_chip); - if (err) + if (gpiochip_remove(&data->gpio_chip)) dev_err(&pdev->dev, "failed to remove gpio chip\n"); fail_gpio: pinctrl_unregister(data->pctl_dev); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 3338437b559b..85772616efbf 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -781,4 +781,12 @@ config APPLE_GMUX graphics as well as the backlight. Currently only backlight control is supported by the driver. +config PVPANIC + tristate "pvpanic device support" + depends on ACPI + ---help--- + This driver provides support for the pvpanic device. pvpanic is + a paravirtualized device provided by QEMU; it lets a virtual machine + (guest) communicate panic events to the host. + endif # X86_PLATFORM_DEVICES diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index ace2b38942fe..ef0ec746f78c 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -51,3 +51,5 @@ obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o + +obj-$(CONFIG_PVPANIC) += pvpanic.o diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 210b5b872125..8fcb41e18b9c 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -171,6 +171,15 @@ static struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_x401u, }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X75A", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X75A"), + }, + .driver_data = &quirk_asus_x401u, + }, {}, }; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index fa3ee6209572..1134119521ac 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -284,6 +284,7 @@ static void __init parse_da_table(const struct dmi_header *dm) { /* Final token is a terminator, so we don't want to copy it */ int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1; + struct calling_interface_token *new_da_tokens; struct calling_interface_structure *table = container_of(dm, struct calling_interface_structure, header); @@ -296,12 +297,13 @@ static void __init parse_da_table(const struct dmi_header *dm) da_command_address = table->cmdIOAddress; da_command_code = table->cmdIOCode; - da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * - sizeof(struct calling_interface_token), - GFP_KERNEL); + new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * + sizeof(struct calling_interface_token), + GFP_KERNEL); - if (!da_tokens) + if (!new_da_tokens) return; + da_tokens = new_da_tokens; memcpy(da_tokens+da_num_tokens, table->tokens, sizeof(struct calling_interface_token) * tokens); diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c index 3f945457f71c..bcf8cc6b5537 100644 --- a/drivers/platform/x86/dell-wmi-aio.c +++ b/drivers/platform/x86/dell-wmi-aio.c @@ -34,6 +34,14 @@ MODULE_LICENSE("GPL"); #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4" #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8" +struct dell_wmi_event { + u16 length; + /* 0x000: A hot key pressed or an event occurred + * 0x00F: A sequence of hot keys are pressed */ + u16 type; + u16 event[]; +}; + static const char *dell_wmi_aio_guids[] = { EVENT_GUID1, EVENT_GUID2, @@ -46,15 +54,41 @@ MODULE_ALIAS("wmi:"EVENT_GUID2); static const struct key_entry dell_wmi_aio_keymap[] = { { KE_KEY, 0xc0, { KEY_VOLUMEUP } }, { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } }, + { KE_KEY, 0xe030, { KEY_VOLUMEUP } }, + { KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } }, + { KE_KEY, 0xe020, { KEY_MUTE } }, + { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } }, + { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } }, + { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } }, + { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } }, { KE_END, 0 } }; static struct input_dev *dell_wmi_aio_input_dev; +/* + * The new WMI event data format will follow the dell_wmi_event structure + * So, we will check if the buffer matches the format + */ +static bool dell_wmi_aio_event_check(u8 *buffer, int length) +{ + struct dell_wmi_event *event = (struct dell_wmi_event *)buffer; + + if (event == NULL || length < 6) + return false; + + if ((event->type == 0 || event->type == 0xf) && + event->length >= 2) + return true; + + return false; +} + static void dell_wmi_aio_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; + struct dell_wmi_event *event; acpi_status status; status = wmi_get_event_data(value, &response); @@ -65,7 +99,7 @@ static void dell_wmi_aio_notify(u32 value, void *context) obj = (union acpi_object *)response.pointer; if (obj) { - unsigned int scancode; + unsigned int scancode = 0; switch (obj->type) { case ACPI_TYPE_INTEGER: @@ -75,13 +109,22 @@ static void dell_wmi_aio_notify(u32 value, void *context) scancode, 1, true); break; case ACPI_TYPE_BUFFER: - /* Broken machines return the scancode in a buffer */ - if (obj->buffer.pointer && obj->buffer.length > 0) { - scancode = obj->buffer.pointer[0]; + if (dell_wmi_aio_event_check(obj->buffer.pointer, + obj->buffer.length)) { + event = (struct dell_wmi_event *) + obj->buffer.pointer; + scancode = event->event[0]; + } else { + /* Broken machines return the scancode in a + buffer */ + if (obj->buffer.pointer && + obj->buffer.length > 0) + scancode = obj->buffer.pointer[0]; + } + if (scancode) sparse_keymap_report_event( dell_wmi_aio_input_dev, scancode, 1, true); - } break; } } diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 1a779bbfb87d..d111c8687f9b 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -71,6 +71,14 @@ enum hp_wmi_event_ids { HPWMI_WIRELESS = 5, HPWMI_CPU_BATTERY_THROTTLE = 6, HPWMI_LOCK_SWITCH = 7, + HPWMI_LID_SWITCH = 8, + HPWMI_SCREEN_ROTATION = 9, + HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A, + HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B, + HPWMI_PROXIMITY_SENSOR = 0x0C, + HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D, + HPWMI_PEAKSHIFT_PERIOD = 0x0F, + HPWMI_BATTERY_CHARGE_PERIOD = 0x10, }; struct bios_args { @@ -536,6 +544,22 @@ static void hp_wmi_notify(u32 value, void *context) break; case HPWMI_LOCK_SWITCH: break; + case HPWMI_LID_SWITCH: + break; + case HPWMI_SCREEN_ROTATION: + break; + case HPWMI_COOLSENSE_SYSTEM_MOBILE: + break; + case HPWMI_COOLSENSE_SYSTEM_HOT: + break; + case HPWMI_PROXIMITY_SENSOR: + break; + case HPWMI_BACKLIT_KB_BRIGHTNESS: + break; + case HPWMI_PEAKSHIFT_PERIOD: + break; + case HPWMI_BATTERY_CHARGE_PERIOD: + break; default: pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); break; @@ -679,7 +703,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device) } rfkill_init_sw_state(gps_rfkill, hp_wmi_get_sw_state(HPWMI_GPS)); - rfkill_set_hw_state(bluetooth_rfkill, + rfkill_set_hw_state(gps_rfkill, hp_wmi_get_hw_state(HPWMI_GPS)); err = rfkill_register(gps_rfkill); if (err) diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index e64a7a870d42..a8e43cf70fac 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct device *dev) static int lis3lv02d_resume(struct device *dev) { - return lis3lv02d_poweron(&lis3_dev); + lis3lv02d_poweron(&lis3_dev); + return 0; } static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 17f00b8dc5cb..89c4519d48ac 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -640,7 +640,8 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) for (bit = 0; bit < 16; bit++) { if (test_bit(bit, &value)) { switch (bit) { - case 6: + case 0: /* Z580 */ + case 6: /* Z570 */ /* Thermal Management button */ ideapad_input_report(priv, 65); break; @@ -648,6 +649,9 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) /* OneKey Theater button */ ideapad_input_report(priv, 64); break; + default: + pr_info("Unknown special button: %lu\n", bit); + break; } } } diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c new file mode 100644 index 000000000000..47ae0c47d4b5 --- /dev/null +++ b/drivers/platform/x86/pvpanic.c @@ -0,0 +1,124 @@ +/* + * pvpanic.c - pvpanic Device Support + * + * Copyright (C) 2013 Fujitsu. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> + +MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>"); +MODULE_DESCRIPTION("pvpanic device driver"); +MODULE_LICENSE("GPL"); + +static int pvpanic_add(struct acpi_device *device); +static int pvpanic_remove(struct acpi_device *device); + +static const struct acpi_device_id pvpanic_device_ids[] = { + { "QEMU0001", 0 }, + { "", 0 }, +}; +MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids); + +#define PVPANIC_PANICKED (1 << 0) + +static u16 port; + +static struct acpi_driver pvpanic_driver = { + .name = "pvpanic", + .class = "QEMU", + .ids = pvpanic_device_ids, + .ops = { + .add = pvpanic_add, + .remove = pvpanic_remove, + }, + .owner = THIS_MODULE, +}; + +static void +pvpanic_send_event(unsigned int event) +{ + outb(event, port); +} + +static int +pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, + void *unused) +{ + pvpanic_send_event(PVPANIC_PANICKED); + return NOTIFY_DONE; +} + +static struct notifier_block pvpanic_panic_nb = { + .notifier_call = pvpanic_panic_notify, +}; + + +static acpi_status +pvpanic_walk_resources(struct acpi_resource *res, void *context) +{ + switch (res->type) { + case ACPI_RESOURCE_TYPE_END_TAG: + return AE_OK; + + case ACPI_RESOURCE_TYPE_IO: + port = res->data.io.minimum; + return AE_OK; + + default: + return AE_ERROR; + } +} + +static int pvpanic_add(struct acpi_device *device) +{ + acpi_status status; + u64 ret; + + status = acpi_evaluate_integer(device->handle, "_STA", NULL, + &ret); + + if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B) + return -ENODEV; + + acpi_walk_resources(device->handle, METHOD_NAME__CRS, + pvpanic_walk_resources, NULL); + + if (!port) + return -ENODEV; + + atomic_notifier_chain_register(&panic_notifier_list, + &pvpanic_panic_nb); + + return 0; +} + +static int pvpanic_remove(struct acpi_device *device) +{ + + atomic_notifier_chain_unregister(&panic_notifier_list, + &pvpanic_panic_nb); + return 0; +} + +module_acpi_driver(pvpanic_driver); diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c index 5f770059fd4d..1a90b62a71c6 100644 --- a/drivers/platform/x86/samsung-q10.c +++ b/drivers/platform/x86/samsung-q10.c @@ -176,10 +176,7 @@ static int __init samsungq10_init(void) samsungq10_probe, NULL, 0, NULL, 0); - if (IS_ERR(samsungq10_device)) - return PTR_ERR(samsungq10_device); - - return 0; + return PTR_RET(samsungq10_device); } static void __exit samsungq10_exit(void) diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index d544e3aaf761..2ac045f27f10 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1255,6 +1255,11 @@ static void sony_nc_notify(struct acpi_device *device, u32 event) real_ev = __sony_nc_gfx_switch_status_get(); break; + case 0x015B: + /* Hybrid GFX switching SVS151290S */ + ev_type = GFX_SWITCH; + real_ev = __sony_nc_gfx_switch_status_get(); + break; default: dprintk("Unknown event 0x%x for handle 0x%x\n", event, handle); @@ -1353,6 +1358,7 @@ static void sony_nc_function_setup(struct acpi_device *device, break; case 0x0128: case 0x0146: + case 0x015B: result = sony_nc_gfx_switch_setup(pf_device, handle); if (result) pr_err("couldn't set up GFX Switch status (%d)\n", @@ -1375,6 +1381,7 @@ static void sony_nc_function_setup(struct acpi_device *device, case 0x0143: case 0x014b: case 0x014c: + case 0x0163: result = sony_nc_kbd_backlight_setup(pf_device, handle); if (result) pr_err("couldn't set up keyboard backlight function (%d)\n", @@ -1426,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) break; case 0x0128: case 0x0146: + case 0x015B: sony_nc_gfx_switch_cleanup(pd); break; case 0x0131: @@ -1439,6 +1447,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) case 0x0143: case 0x014b: case 0x014c: + case 0x0163: sony_nc_kbd_backlight_cleanup(pd); break; default: @@ -1485,6 +1494,7 @@ static void sony_nc_function_resume(void) case 0x0143: case 0x014b: case 0x014c: + case 0x0163: sony_nc_kbd_backlight_resume(); break; default: @@ -2390,7 +2400,9 @@ static int __sony_nc_gfx_switch_status_get(void) { unsigned int result; - if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result)) + if (sony_call_snc_handle(gfxs_ctl->handle, + gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100, + &result)) return -EIO; switch (gfxs_ctl->handle) { @@ -2400,6 +2412,12 @@ static int __sony_nc_gfx_switch_status_get(void) */ return result & 0x1 ? SPEED : STAMINA; break; + case 0x015B: + /* 0: discrete GFX (speed) + * 1: integrated GFX (stamina) + */ + return result & 0x1 ? STAMINA : SPEED; + break; case 0x0128: /* it's a more elaborated bitmask, for now: * 2: integrated GFX (stamina) diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 0d0b5d7d19d0..7b8979c63f48 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -152,6 +152,7 @@ config BATTERY_SBS config BATTERY_BQ27x00 tristate "BQ27x00 battery driver" + depends on I2C || I2C=n help Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips. @@ -284,6 +285,7 @@ config CHARGER_LP8788 tristate "TI LP8788 charger driver" depends on MFD_LP8788 depends on LP8788_ADC + depends on IIO help Say Y to enable support for the LP8788 linear charger. diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c index a44175139bbf..fef56e2041b3 100644 --- a/drivers/power/pm2301_charger.c +++ b/drivers/power/pm2301_charger.c @@ -1269,5 +1269,5 @@ module_exit(pm2xxx_charger_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay"); -MODULE_ALIAS("platform:pm2xxx-charger"); +MODULE_ALIAS("i2c:pm2xxx-charger"); MODULE_DESCRIPTION("PM2xxx charger management driver"); diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c index 58cbb009b74f..56fb509f4be0 100644 --- a/drivers/power/wm831x_backup.c +++ b/drivers/power/wm831x_backup.c @@ -207,7 +207,6 @@ static int wm831x_backup_remove(struct platform_device *pdev) struct wm831x_backup *devdata = platform_get_drvdata(pdev); power_supply_unregister(&devdata->backup); - kfree(devdata->backup.name); return 0; } diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c index bea94510ad2d..71a2559278d7 100644 --- a/drivers/ptp/ptp_pch.c +++ b/drivers/ptp/ptp_pch.c @@ -628,9 +628,10 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id) chip->caps = ptp_pch_caps; chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev); - - if (IS_ERR(chip->ptp_clock)) - return PTR_ERR(chip->ptp_clock); + if (IS_ERR(chip->ptp_clock)) { + ret = PTR_ERR(chip->ptp_clock); + goto err_ptp_clock_reg; + } spin_lock_init(&chip->register_lock); @@ -669,6 +670,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_req_irq: ptp_clock_unregister(chip->ptp_clock); +err_ptp_clock_reg: iounmap(chip->regs); chip->regs = NULL; diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c index ec287989eafc..c938bae18812 100644 --- a/drivers/pwm/pwm-imx.c +++ b/drivers/pwm/pwm-imx.c @@ -265,11 +265,6 @@ static int imx_pwm_probe(struct platform_device *pdev) imx->chip.npwm = 1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(imx->mmio_base)) return PTR_ERR(imx->mmio_base); diff --git a/drivers/pwm/pwm-puv3.c b/drivers/pwm/pwm-puv3.c index d1eb499fb15d..ed6007b27585 100644 --- a/drivers/pwm/pwm-puv3.c +++ b/drivers/pwm/pwm-puv3.c @@ -117,11 +117,6 @@ static int pwm_probe(struct platform_device *pdev) return PTR_ERR(puv3->clk); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - puv3->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(puv3->base)) return PTR_ERR(puv3->base); diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c index dee6ab552a0a..dc9717551d39 100644 --- a/drivers/pwm/pwm-pxa.c +++ b/drivers/pwm/pwm-pxa.c @@ -147,11 +147,6 @@ static int pwm_probe(struct platform_device *pdev) pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pwm->mmio_base)) return PTR_ERR(pwm->mmio_base); diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index 3d75f4a88f98..a5402933001f 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -181,11 +181,6 @@ static int tegra_pwm_probe(struct platform_device *pdev) pwm->dev = &pdev->dev; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "no memory resources defined\n"); - return -ENODEV; - } - pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pwm->mmio_base)) return PTR_ERR(pwm->mmio_base); diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index 0d65fb2e02c7..72ca42dfa733 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c @@ -240,11 +240,6 @@ static int ecap_pwm_probe(struct platform_device *pdev) pc->chip.npwm = 1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pc->mmio_base)) return PTR_ERR(pc->mmio_base); diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index 6a217596942f..48a485c2e422 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -471,11 +471,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev) pc->chip.npwm = NUM_PWM_CHANNEL; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pc->mmio_base)) return PTR_ERR(pc->mmio_base); diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c index c9c3d3a1e0eb..3b119bc2c3c6 100644 --- a/drivers/pwm/pwm-tipwmss.c +++ b/drivers/pwm/pwm-tipwmss.c @@ -70,11 +70,6 @@ static int pwmss_probe(struct platform_device *pdev) mutex_init(&info->pwmss_lock); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - info->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(info->mmio_base)) return PTR_ERR(info->mmio_base); diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c index 69effd19afc7..323125abf3f4 100644 --- a/drivers/pwm/pwm-vt8500.c +++ b/drivers/pwm/pwm-vt8500.c @@ -230,11 +230,6 @@ static int vt8500_pwm_probe(struct platform_device *pdev) } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - chip->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(chip->base)) return PTR_ERR(chip->base); diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index 6194d35ebb97..5ab056494bbe 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -47,4 +47,24 @@ config RAPIDIO_DEBUG If you are unsure about this, say N here. +choice + prompt "Enumeration method" + depends on RAPIDIO + default RAPIDIO_ENUM_BASIC + help + There are different enumeration and discovery mechanisms offered + for RapidIO subsystem. You may select single built-in method or + or any number of methods to be built as modules. + Selecting a built-in method disables use of loadable methods. + + If unsure, select Basic built-in. + +config RAPIDIO_ENUM_BASIC + tristate "Basic" + help + This option includes basic RapidIO fabric enumeration and discovery + mechanism similar to one described in RapidIO specification Annex 1. + +endchoice + source "drivers/rapidio/switches/Kconfig" diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile index ec3fb8121004..3036702ffe8b 100644 --- a/drivers/rapidio/Makefile +++ b/drivers/rapidio/Makefile @@ -1,7 +1,8 @@ # # Makefile for RapidIO interconnect services # -obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o +obj-y += rio.o rio-access.o rio-driver.o rio-sysfs.o +obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o obj-$(CONFIG_RAPIDIO) += switches/ obj-$(CONFIG_RAPIDIO) += devices/ diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 6faba406b6e9..a8b2c23a7ef4 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -471,6 +471,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) u32 intval; u32 ch_inte; + /* For MSI mode disable all device-level interrupts */ + if (priv->flags & TSI721_USING_MSI) + iowrite32(0, priv->regs + TSI721_DEV_INTE); + dev_int = ioread32(priv->regs + TSI721_DEV_INT); if (!dev_int) return IRQ_NONE; @@ -560,6 +564,14 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) } } #endif + + /* For MSI mode re-enable device-level interrupts */ + if (priv->flags & TSI721_USING_MSI) { + dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | + TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; + iowrite32(dev_int, priv->regs + TSI721_DEV_INTE); + } + return IRQ_HANDLED; } diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c index 0f4a53bdaa3c..a0c875563d76 100644 --- a/drivers/rapidio/rio-driver.c +++ b/drivers/rapidio/rio-driver.c @@ -164,6 +164,13 @@ void rio_unregister_driver(struct rio_driver *rdrv) driver_unregister(&rdrv->driver); } +void rio_attach_device(struct rio_dev *rdev) +{ + rdev->dev.bus = &rio_bus_type; + rdev->dev.parent = &rio_bus; +} +EXPORT_SYMBOL_GPL(rio_attach_device); + /** * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure * @dev: the standard device structure to match against @@ -200,6 +207,7 @@ struct bus_type rio_bus_type = { .name = "rapidio", .match = rio_match_bus, .dev_attrs = rio_dev_attrs, + .bus_attrs = rio_bus_attrs, .probe = rio_device_probe, .remove = rio_device_remove, }; diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index a965acd3c0e4..4c15dbf81087 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -37,12 +37,8 @@ #include "rio.h" -LIST_HEAD(rio_devices); - static void rio_init_em(struct rio_dev *rdev); -DEFINE_SPINLOCK(rio_global_list_lock); - static int next_destid = 0; static int next_comptag = 1; @@ -327,127 +323,6 @@ static int rio_is_switch(struct rio_dev *rdev) } /** - * rio_switch_init - Sets switch operations for a particular vendor switch - * @rdev: RIO device - * @do_enum: Enumeration/Discovery mode flag - * - * Searches the RIO switch ops table for known switch types. If the vid - * and did match a switch table entry, then call switch initialization - * routine to setup switch-specific routines. - */ -static void rio_switch_init(struct rio_dev *rdev, int do_enum) -{ - struct rio_switch_ops *cur = __start_rio_switch_ops; - struct rio_switch_ops *end = __end_rio_switch_ops; - - while (cur < end) { - if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { - pr_debug("RIO: calling init routine for %s\n", - rio_name(rdev)); - cur->init_hook(rdev, do_enum); - break; - } - cur++; - } - - if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { - pr_debug("RIO: adding STD routing ops for %s\n", - rio_name(rdev)); - rdev->rswitch->add_entry = rio_std_route_add_entry; - rdev->rswitch->get_entry = rio_std_route_get_entry; - rdev->rswitch->clr_table = rio_std_route_clr_table; - } - - if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) - printk(KERN_ERR "RIO: missing routing ops for %s\n", - rio_name(rdev)); -} - -/** - * rio_add_device- Adds a RIO device to the device model - * @rdev: RIO device - * - * Adds the RIO device to the global device list and adds the RIO - * device to the RIO device list. Creates the generic sysfs nodes - * for an RIO device. - */ -static int rio_add_device(struct rio_dev *rdev) -{ - int err; - - err = device_add(&rdev->dev); - if (err) - return err; - - spin_lock(&rio_global_list_lock); - list_add_tail(&rdev->global_list, &rio_devices); - spin_unlock(&rio_global_list_lock); - - rio_create_sysfs_dev_files(rdev); - - return 0; -} - -/** - * rio_enable_rx_tx_port - enable input receiver and output transmitter of - * given port - * @port: Master port associated with the RIO network - * @local: local=1 select local port otherwise a far device is reached - * @destid: Destination ID of the device to check host bit - * @hopcount: Number of hops to reach the target - * @port_num: Port (-number on switch) to enable on a far end device - * - * Returns 0 or 1 from on General Control Command and Status Register - * (EXT_PTR+0x3C) - */ -inline int rio_enable_rx_tx_port(struct rio_mport *port, - int local, u16 destid, - u8 hopcount, u8 port_num) { -#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS - u32 regval; - u32 ext_ftr_ptr; - - /* - * enable rx input tx output port - */ - pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " - "%d, port_num = %d)\n", local, destid, hopcount, port_num); - - ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); - - if (local) { - rio_local_read_config_32(port, ext_ftr_ptr + - RIO_PORT_N_CTL_CSR(0), - ®val); - } else { - if (rio_mport_read_config_32(port, destid, hopcount, - ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) - return -EIO; - } - - if (regval & RIO_PORT_N_CTL_P_TYP_SER) { - /* serial */ - regval = regval | RIO_PORT_N_CTL_EN_RX_SER - | RIO_PORT_N_CTL_EN_TX_SER; - } else { - /* parallel */ - regval = regval | RIO_PORT_N_CTL_EN_RX_PAR - | RIO_PORT_N_CTL_EN_TX_PAR; - } - - if (local) { - rio_local_write_config_32(port, ext_ftr_ptr + - RIO_PORT_N_CTL_CSR(0), regval); - } else { - if (rio_mport_write_config_32(port, destid, hopcount, - ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) - return -EIO; - } -#endif - return 0; -} - -/** * rio_setup_device- Allocates and sets up a RIO device * @net: RIO network * @port: Master port to send transactions @@ -587,8 +462,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, rdev->destid); } - rdev->dev.bus = &rio_bus_type; - rdev->dev.parent = &rio_bus; + rio_attach_device(rdev); device_initialize(&rdev->dev); rdev->dev.release = rio_release_dev; @@ -1260,19 +1134,30 @@ static void rio_pw_enable(struct rio_mport *port, int enable) /** * rio_enum_mport- Start enumeration through a master port * @mport: Master port to send transactions + * @flags: Enumeration control flags * * Starts the enumeration process. If somebody has enumerated our * master port device, then give up. If not and we have an active * link, then start recursive peer enumeration. Returns %0 if * enumeration succeeds or %-EBUSY if enumeration fails. */ -int rio_enum_mport(struct rio_mport *mport) +int rio_enum_mport(struct rio_mport *mport, u32 flags) { struct rio_net *net = NULL; int rc = 0; printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id, mport->name); + + /* + * To avoid multiple start requests (repeat enumeration is not supported + * by this method) check if enumeration/discovery was performed for this + * mport: if mport was added into the list of mports for a net exit + * with error. + */ + if (mport->nnode.next || mport->nnode.prev) + return -EBUSY; + /* If somebody else enumerated our master port device, bail. */ if (rio_enum_host(mport) < 0) { printk(KERN_INFO @@ -1362,14 +1247,16 @@ static void rio_build_route_tables(struct rio_net *net) /** * rio_disc_mport- Start discovery through a master port * @mport: Master port to send transactions + * @flags: discovery control flags * * Starts the discovery process. If we have an active link, - * then wait for the signal that enumeration is complete. + * then wait for the signal that enumeration is complete (if wait + * is allowed). * When enumeration completion is signaled, start recursive * peer discovery. Returns %0 if discovery succeeds or %-EBUSY * on failure. */ -int rio_disc_mport(struct rio_mport *mport) +int rio_disc_mport(struct rio_mport *mport, u32 flags) { struct rio_net *net = NULL; unsigned long to_end; @@ -1379,6 +1266,11 @@ int rio_disc_mport(struct rio_mport *mport) /* If master port has an active link, allocate net and discover peers */ if (rio_mport_is_active(mport)) { + if (rio_enum_complete(mport)) + goto enum_done; + else if (flags & RIO_SCAN_ENUM_NO_WAIT) + return -EAGAIN; + pr_debug("RIO: wait for enumeration to complete...\n"); to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; @@ -1421,3 +1313,41 @@ enum_done: bail: return -EBUSY; } + +static struct rio_scan rio_scan_ops = { + .enumerate = rio_enum_mport, + .discover = rio_disc_mport, +}; + +static bool scan; +module_param(scan, bool, 0); +MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery " + "(default = 0)"); + +/** + * rio_basic_attach: + * + * When this enumeration/discovery method is loaded as a module this function + * registers its specific enumeration and discover routines for all available + * RapidIO mport devices. The "scan" command line parameter controls ability of + * the module to start RapidIO enumeration/discovery automatically. + * + * Returns 0 for success or -EIO if unable to register itself. + * + * This enumeration/discovery method cannot be unloaded and therefore does not + * provide a matching cleanup_module routine. + */ + +static int __init rio_basic_attach(void) +{ + if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops)) + return -EIO; + if (scan) + rio_init_mports(); + return 0; +} + +late_initcall(rio_basic_attach); + +MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index 4dbe360989be..66d4acd5e18f 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c @@ -285,3 +285,48 @@ void rio_remove_sysfs_dev_files(struct rio_dev *rdev) rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE); } } + +static ssize_t bus_scan_store(struct bus_type *bus, const char *buf, + size_t count) +{ + long val; + struct rio_mport *port = NULL; + int rc; + + if (kstrtol(buf, 0, &val) < 0) + return -EINVAL; + + if (val == RIO_MPORT_ANY) { + rc = rio_init_mports(); + goto exit; + } + + if (val < 0 || val >= RIO_MAX_MPORTS) + return -EINVAL; + + port = rio_find_mport((int)val); + + if (!port) { + pr_debug("RIO: %s: mport_%d not available\n", + __func__, (int)val); + return -EINVAL; + } + + if (!port->nscan) + return -EINVAL; + + if (port->host_deviceid >= 0) + rc = port->nscan->enumerate(port, 0); + else + rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT); +exit: + if (!rc) + rc = count; + + return rc; +} + +struct bus_attribute rio_bus_attrs[] = { + __ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store), + __ATTR_NULL +}; diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index d553b5d13722..cb1c08996fbb 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -31,7 +31,11 @@ #include "rio.h" +static LIST_HEAD(rio_devices); +static DEFINE_SPINLOCK(rio_global_list_lock); + static LIST_HEAD(rio_mports); +static DEFINE_MUTEX(rio_mport_list_lock); static unsigned char next_portid; static DEFINE_SPINLOCK(rio_mmap_lock); @@ -53,6 +57,32 @@ u16 rio_local_get_device_id(struct rio_mport *port) } /** + * rio_add_device- Adds a RIO device to the device model + * @rdev: RIO device + * + * Adds the RIO device to the global device list and adds the RIO + * device to the RIO device list. Creates the generic sysfs nodes + * for an RIO device. + */ +int rio_add_device(struct rio_dev *rdev) +{ + int err; + + err = device_add(&rdev->dev); + if (err) + return err; + + spin_lock(&rio_global_list_lock); + list_add_tail(&rdev->global_list, &rio_devices); + spin_unlock(&rio_global_list_lock); + + rio_create_sysfs_dev_files(rdev); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_add_device); + +/** * rio_request_inb_mbox - request inbound mailbox service * @mport: RIO master port from which to allocate the mailbox resource * @dev_id: Device specific pointer to pass on event @@ -489,6 +519,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local, return ext_ftr_ptr; } +EXPORT_SYMBOL_GPL(rio_mport_get_physefb); /** * rio_get_comptag - Begin or continue searching for a RIO device by component tag @@ -521,6 +552,7 @@ exit: spin_unlock(&rio_global_list_lock); return rdev; } +EXPORT_SYMBOL_GPL(rio_get_comptag); /** * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. @@ -545,6 +577,107 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) regval); return 0; } +EXPORT_SYMBOL_GPL(rio_set_port_lockout); + +/** + * rio_switch_init - Sets switch operations for a particular vendor switch + * @rdev: RIO device + * @do_enum: Enumeration/Discovery mode flag + * + * Searches the RIO switch ops table for known switch types. If the vid + * and did match a switch table entry, then call switch initialization + * routine to setup switch-specific routines. + */ +void rio_switch_init(struct rio_dev *rdev, int do_enum) +{ + struct rio_switch_ops *cur = __start_rio_switch_ops; + struct rio_switch_ops *end = __end_rio_switch_ops; + + while (cur < end) { + if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { + pr_debug("RIO: calling init routine for %s\n", + rio_name(rdev)); + cur->init_hook(rdev, do_enum); + break; + } + cur++; + } + + if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { + pr_debug("RIO: adding STD routing ops for %s\n", + rio_name(rdev)); + rdev->rswitch->add_entry = rio_std_route_add_entry; + rdev->rswitch->get_entry = rio_std_route_get_entry; + rdev->rswitch->clr_table = rio_std_route_clr_table; + } + + if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) + printk(KERN_ERR "RIO: missing routing ops for %s\n", + rio_name(rdev)); +} +EXPORT_SYMBOL_GPL(rio_switch_init); + +/** + * rio_enable_rx_tx_port - enable input receiver and output transmitter of + * given port + * @port: Master port associated with the RIO network + * @local: local=1 select local port otherwise a far device is reached + * @destid: Destination ID of the device to check host bit + * @hopcount: Number of hops to reach the target + * @port_num: Port (-number on switch) to enable on a far end device + * + * Returns 0 or 1 from on General Control Command and Status Register + * (EXT_PTR+0x3C) + */ +int rio_enable_rx_tx_port(struct rio_mport *port, + int local, u16 destid, + u8 hopcount, u8 port_num) +{ +#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS + u32 regval; + u32 ext_ftr_ptr; + + /* + * enable rx input tx output port + */ + pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " + "%d, port_num = %d)\n", local, destid, hopcount, port_num); + + ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); + + if (local) { + rio_local_read_config_32(port, ext_ftr_ptr + + RIO_PORT_N_CTL_CSR(0), + ®val); + } else { + if (rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) + return -EIO; + } + + if (regval & RIO_PORT_N_CTL_P_TYP_SER) { + /* serial */ + regval = regval | RIO_PORT_N_CTL_EN_RX_SER + | RIO_PORT_N_CTL_EN_TX_SER; + } else { + /* parallel */ + regval = regval | RIO_PORT_N_CTL_EN_RX_PAR + | RIO_PORT_N_CTL_EN_TX_PAR; + } + + if (local) { + rio_local_write_config_32(port, ext_ftr_ptr + + RIO_PORT_N_CTL_CSR(0), regval); + } else { + if (rio_mport_write_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) + return -EIO; + } +#endif + return 0; +} +EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port); + /** * rio_chk_dev_route - Validate route to the specified device. @@ -610,6 +743,7 @@ rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount) return 0; } +EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access); /** * rio_chk_dev_access - Validate access to the specified device. @@ -941,6 +1075,7 @@ rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, return RIO_GET_BLOCK_ID(reg_val); } } +EXPORT_SYMBOL_GPL(rio_mport_get_efb); /** * rio_mport_get_feature - query for devices' extended features @@ -997,6 +1132,7 @@ rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, return 0; } +EXPORT_SYMBOL_GPL(rio_mport_get_feature); /** * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did @@ -1246,6 +1382,95 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ +/** + * rio_find_mport - find RIO mport by its ID + * @mport_id: number (ID) of mport device + * + * Given a RIO mport number, the desired mport is located + * in the global list of mports. If the mport is found, a pointer to its + * data structure is returned. If no mport is found, %NULL is returned. + */ +struct rio_mport *rio_find_mport(int mport_id) +{ + struct rio_mport *port; + + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id) + goto found; + } + port = NULL; +found: + mutex_unlock(&rio_mport_list_lock); + + return port; +} + +/** + * rio_register_scan - enumeration/discovery method registration interface + * @mport_id: mport device ID for which fabric scan routine has to be set + * (RIO_MPORT_ANY = set for all available mports) + * @scan_ops: enumeration/discovery control structure + * + * Assigns enumeration or discovery method to the specified mport device (or all + * available mports if RIO_MPORT_ANY is specified). + * Returns error if the mport already has an enumerator attached to it. + * In case of RIO_MPORT_ANY ignores ports with valid scan routines and returns + * an error if was unable to find at least one available mport. + */ +int rio_register_scan(int mport_id, struct rio_scan *scan_ops) +{ + struct rio_mport *port; + int rc = -EBUSY; + + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id || mport_id == RIO_MPORT_ANY) { + if (port->nscan && mport_id == RIO_MPORT_ANY) + continue; + else if (port->nscan) + break; + + port->nscan = scan_ops; + rc = 0; + + if (mport_id != RIO_MPORT_ANY) + break; + } + } + mutex_unlock(&rio_mport_list_lock); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_register_scan); + +/** + * rio_unregister_scan - removes enumeration/discovery method from mport + * @mport_id: mport device ID for which fabric scan routine has to be + * unregistered (RIO_MPORT_ANY = set for all available mports) + * + * Removes enumeration or discovery method assigned to the specified mport + * device (or all available mports if RIO_MPORT_ANY is specified). + */ +int rio_unregister_scan(int mport_id) +{ + struct rio_mport *port; + + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id || mport_id == RIO_MPORT_ANY) { + if (port->nscan) + port->nscan = NULL; + if (mport_id != RIO_MPORT_ANY) + break; + } + } + mutex_unlock(&rio_mport_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_unregister_scan); + static void rio_fixup_device(struct rio_dev *dev) { } @@ -1274,7 +1499,7 @@ static void disc_work_handler(struct work_struct *_work) work = container_of(_work, struct rio_disc_work, work); pr_debug("RIO: discovery work for mport %d %s\n", work->mport->id, work->mport->name); - rio_disc_mport(work->mport); + work->mport->nscan->discover(work->mport, 0); } int rio_init_mports(void) @@ -1290,12 +1515,15 @@ int rio_init_mports(void) * First, run enumerations and check if we need to perform discovery * on any of the registered mports. */ + mutex_lock(&rio_mport_list_lock); list_for_each_entry(port, &rio_mports, node) { - if (port->host_deviceid >= 0) - rio_enum_mport(port); - else + if (port->host_deviceid >= 0) { + if (port->nscan) + port->nscan->enumerate(port, 0); + } else n++; } + mutex_unlock(&rio_mport_list_lock); if (!n) goto no_disc; @@ -1322,14 +1550,16 @@ int rio_init_mports(void) } n = 0; + mutex_lock(&rio_mport_list_lock); list_for_each_entry(port, &rio_mports, node) { - if (port->host_deviceid < 0) { + if (port->host_deviceid < 0 && port->nscan) { work[n].mport = port; INIT_WORK(&work[n].work, disc_work_handler); queue_work(rio_wq, &work[n].work); n++; } } + mutex_unlock(&rio_mport_list_lock); flush_workqueue(rio_wq); pr_debug("RIO: destroy discovery workqueue\n"); @@ -1342,8 +1572,6 @@ no_disc: return 0; } -device_initcall_sync(rio_init_mports); - static int hdids[RIO_MAX_MPORTS + 1]; static int rio_get_hdid(int index) @@ -1371,7 +1599,10 @@ int rio_register_mport(struct rio_mport *port) port->id = next_portid++; port->host_deviceid = rio_get_hdid(port->id); + port->nscan = NULL; + mutex_lock(&rio_mport_list_lock); list_add_tail(&port->node, &rio_mports); + mutex_unlock(&rio_mport_list_lock); return 0; } @@ -1386,3 +1617,4 @@ EXPORT_SYMBOL_GPL(rio_request_inb_mbox); EXPORT_SYMBOL_GPL(rio_release_inb_mbox); EXPORT_SYMBOL_GPL(rio_request_outb_mbox); EXPORT_SYMBOL_GPL(rio_release_outb_mbox); +EXPORT_SYMBOL_GPL(rio_init_mports); diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index b1af414f15e6..c14f864dea5c 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h @@ -15,6 +15,7 @@ #include <linux/rio.h> #define RIO_MAX_CHK_RETRY 3 +#define RIO_MPORT_ANY (-1) /* Functions internal to the RIO core code */ @@ -27,8 +28,6 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount); extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); -extern int rio_enum_mport(struct rio_mport *mport); -extern int rio_disc_mport(struct rio_mport *mport); extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port); @@ -39,10 +38,18 @@ extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table); extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); +extern int rio_add_device(struct rio_dev *rdev); +extern void rio_switch_init(struct rio_dev *rdev, int do_enum); +extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u8 port_num); +extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); +extern int rio_unregister_scan(int mport_id); +extern void rio_attach_device(struct rio_dev *rdev); +extern struct rio_mport *rio_find_mport(int mport_id); /* Structures internal to the RIO core code */ extern struct device_attribute rio_dev_attrs[]; -extern spinlock_t rio_global_list_lock; +extern struct bus_attribute rio_bus_attrs[]; extern struct rio_switch_ops __start_rio_switch_ops[]; extern struct rio_switch_ops __end_rio_switch_ops[]; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 6e5017841582..815d6df8bd5f 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1539,7 +1539,10 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev) } /** - * Balance enable_count of each GPIO and actual GPIO pin control. + * regulator_ena_gpio_ctrl - balance enable_count of each GPIO and actual GPIO pin control + * @rdev: regulator_dev structure + * @enable: enable GPIO at initial use? + * * GPIO is enabled in case of initial use. (enable_count is 0) * GPIO is disabled when it is not shared any more. (enable_count <= 1) */ @@ -2702,7 +2705,7 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage); /** * regulator_set_current_limit - set regulator output current limit * @regulator: regulator source - * @min_uA: Minimuum supported current in uA + * @min_uA: Minimum supported current in uA * @max_uA: Maximum supported current in uA * * Sets current sink to the desired output current. This can be set during diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c index 89bd2faaef8c..ce89f7848a57 100644 --- a/drivers/regulator/dbx500-prcmu.c +++ b/drivers/regulator/dbx500-prcmu.c @@ -24,18 +24,6 @@ static int power_state_active_cnt; /* will initialize to zero */ static DEFINE_SPINLOCK(power_state_active_lock); -int power_state_active_get(void) -{ - unsigned long flags; - int cnt; - - spin_lock_irqsave(&power_state_active_lock, flags); - cnt = power_state_active_cnt; - spin_unlock_irqrestore(&power_state_active_lock, flags); - - return cnt; -} - void power_state_active_enable(void) { unsigned long flags; @@ -65,6 +53,18 @@ out: #ifdef CONFIG_REGULATOR_DEBUG +static int power_state_active_get(void) +{ + unsigned long flags; + int cnt; + + spin_lock_irqsave(&power_state_active_lock, flags); + cnt = power_state_active_cnt; + spin_unlock_irqrestore(&power_state_active_lock, flags); + + return cnt; +} + static struct ux500_regulator_debug { struct dentry *dir; struct dentry *status_file; diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index 92ceed0fc65e..3ae44ac12a94 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c @@ -840,7 +840,7 @@ static int palmas_regulators_probe(struct platform_device *pdev) break; } - if ((id == PALMAS_REG_SMPS6) && (id == PALMAS_REG_SMPS8)) + if ((id == PALMAS_REG_SMPS6) || (id == PALMAS_REG_SMPS8)) ramp_delay_support = true; if (ramp_delay_support) { @@ -878,7 +878,7 @@ static int palmas_regulators_probe(struct platform_device *pdev) pmic->desc[id].vsel_mask = SMPS10_VSEL; pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, - PALMAS_SMPS10_STATUS); + PALMAS_SMPS10_CTRL); pmic->desc[id].enable_mask = SMPS10_BOOST_EN; pmic->desc[id].min_uV = 3750000; pmic->desc[id].uV_step = 1250000; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 0c81915b1997..b9838130a7b0 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -20,7 +20,6 @@ if RTC_CLASS config RTC_HCTOSYS bool "Set system time from RTC on startup and resume" default y - depends on !ALWAYS_USE_PERSISTENT_CLOCK help If you say yes here, the system time (wall clock) will be set using the value read from a specified RTC device. This is useful to avoid @@ -29,7 +28,6 @@ config RTC_HCTOSYS config RTC_SYSTOHC bool "Set the RTC time based on NTP synchronization" default y - depends on !ALWAYS_USE_PERSISTENT_CLOCK help If you say yes here, the system time (wall clock) will be stored in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11 diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 0eab77b22340..f296f3f7db9b 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -25,6 +25,7 @@ #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/interrupt.h> +#include <linux/spinlock.h> #include <linux/ioctl.h> #include <linux/completion.h> #include <linux/io.h> @@ -42,10 +43,65 @@ #define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ +struct at91_rtc_config { + bool use_shadow_imr; +}; + +static const struct at91_rtc_config *at91_rtc_config; static DECLARE_COMPLETION(at91_rtc_updated); static unsigned int at91_alarm_year = AT91_RTC_EPOCH; static void __iomem *at91_rtc_regs; static int irq; +static DEFINE_SPINLOCK(at91_rtc_lock); +static u32 at91_rtc_shadow_imr; + +static void at91_rtc_write_ier(u32 mask) +{ + unsigned long flags; + + spin_lock_irqsave(&at91_rtc_lock, flags); + at91_rtc_shadow_imr |= mask; + at91_rtc_write(AT91_RTC_IER, mask); + spin_unlock_irqrestore(&at91_rtc_lock, flags); +} + +static void at91_rtc_write_idr(u32 mask) +{ + unsigned long flags; + + spin_lock_irqsave(&at91_rtc_lock, flags); + at91_rtc_write(AT91_RTC_IDR, mask); + /* + * Register read back (of any RTC-register) needed to make sure + * IDR-register write has reached the peripheral before updating + * shadow mask. + * + * Note that there is still a possibility that the mask is updated + * before interrupts have actually been disabled in hardware. The only + * way to be certain would be to poll the IMR-register, which is is + * the very register we are trying to emulate. The register read back + * is a reasonable heuristic. + */ + at91_rtc_read(AT91_RTC_SR); + at91_rtc_shadow_imr &= ~mask; + spin_unlock_irqrestore(&at91_rtc_lock, flags); +} + +static u32 at91_rtc_read_imr(void) +{ + unsigned long flags; + u32 mask; + + if (at91_rtc_config->use_shadow_imr) { + spin_lock_irqsave(&at91_rtc_lock, flags); + mask = at91_rtc_shadow_imr; + spin_unlock_irqrestore(&at91_rtc_lock, flags); + } else { + mask = at91_rtc_read(AT91_RTC_IMR); + } + + return mask; +} /* * Decode time/date into rtc_time structure @@ -110,9 +166,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm) cr = at91_rtc_read(AT91_RTC_CR); at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); - at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); + at91_rtc_write_ier(AT91_RTC_ACKUPD); wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ - at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); + at91_rtc_write_idr(AT91_RTC_ACKUPD); at91_rtc_write(AT91_RTC_TIMR, bin2bcd(tm->tm_sec) << 0 @@ -144,7 +200,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); tm->tm_year = at91_alarm_year - 1900; - alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) + alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM) ? 1 : 0; dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, @@ -169,7 +225,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) tm.tm_min = alrm->time.tm_min; tm.tm_sec = alrm->time.tm_sec; - at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); + at91_rtc_write_idr(AT91_RTC_ALARM); at91_rtc_write(AT91_RTC_TIMALR, bin2bcd(tm.tm_sec) << 0 | bin2bcd(tm.tm_min) << 8 @@ -182,7 +238,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) if (alrm->enabled) { at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); - at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); + at91_rtc_write_ier(AT91_RTC_ALARM); } dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, @@ -198,9 +254,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) if (enabled) { at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); - at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); + at91_rtc_write_ier(AT91_RTC_ALARM); } else - at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); + at91_rtc_write_idr(AT91_RTC_ALARM); return 0; } @@ -209,7 +265,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) */ static int at91_rtc_proc(struct device *dev, struct seq_file *seq) { - unsigned long imr = at91_rtc_read(AT91_RTC_IMR); + unsigned long imr = at91_rtc_read_imr(); seq_printf(seq, "update_IRQ\t: %s\n", (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); @@ -229,7 +285,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) unsigned int rtsr; unsigned long events = 0; - rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); + rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr(); if (rtsr) { /* this interrupt is shared! Is it ours? */ if (rtsr & AT91_RTC_ALARM) events |= (RTC_AF | RTC_IRQF); @@ -250,6 +306,43 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) return IRQ_NONE; /* not handled */ } +static const struct at91_rtc_config at91rm9200_config = { +}; + +static const struct at91_rtc_config at91sam9x5_config = { + .use_shadow_imr = true, +}; + +#ifdef CONFIG_OF +static const struct of_device_id at91_rtc_dt_ids[] = { + { + .compatible = "atmel,at91rm9200-rtc", + .data = &at91rm9200_config, + }, { + .compatible = "atmel,at91sam9x5-rtc", + .data = &at91sam9x5_config, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids); +#endif + +static const struct at91_rtc_config * +at91_rtc_get_config(struct platform_device *pdev) +{ + const struct of_device_id *match; + + if (pdev->dev.of_node) { + match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node); + if (!match) + return NULL; + return (const struct at91_rtc_config *)match->data; + } + + return &at91rm9200_config; +} + static const struct rtc_class_ops at91_rtc_ops = { .read_time = at91_rtc_readtime, .set_time = at91_rtc_settime, @@ -268,6 +361,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev) struct resource *regs; int ret = 0; + at91_rtc_config = at91_rtc_get_config(pdev); + if (!at91_rtc_config) + return -ENODEV; + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(&pdev->dev, "no mmio resource defined\n"); @@ -290,7 +387,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev) at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */ /* Disable all interrupts */ - at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | + at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV); @@ -335,7 +432,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) struct rtc_device *rtc = platform_get_drvdata(pdev); /* Disable all interrupts */ - at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | + at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV); free_irq(irq, pdev); @@ -358,13 +455,13 @@ static int at91_rtc_suspend(struct device *dev) /* this IRQ is shared with DBGU and other hardware which isn't * necessarily doing PM like we are... */ - at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) + at91_rtc_imr = at91_rtc_read_imr() & (AT91_RTC_ALARM|AT91_RTC_SECEV); if (at91_rtc_imr) { if (device_may_wakeup(dev)) enable_irq_wake(irq); else - at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); + at91_rtc_write_idr(at91_rtc_imr); } return 0; } @@ -375,7 +472,7 @@ static int at91_rtc_resume(struct device *dev) if (device_may_wakeup(dev)) disable_irq_wake(irq); else - at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); + at91_rtc_write_ier(at91_rtc_imr); } return 0; } @@ -383,12 +480,6 @@ static int at91_rtc_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume); -static const struct of_device_id at91_rtc_dt_ids[] = { - { .compatible = "atmel,at91rm9200-rtc" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids); - static struct platform_driver at91_rtc_driver = { .remove = __exit_p(at91_rtc_remove), .driver = { diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index cc5bea9c4b1c..f1cb706445c7 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -854,6 +854,9 @@ static int cmos_resume(struct device *dev) } spin_lock_irq(&rtc_lock); + if (device_may_wakeup(dev)) + hpet_rtc_timer_init(); + do { CMOS_WRITE(tmp, RTC_CONTROL); hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK); @@ -869,7 +872,6 @@ static int cmos_resume(struct device *dev) rtc_update_irq(cmos->rtc, 1, mask); tmp &= ~RTC_AIE; hpet_mask_rtc_irq_bit(RTC_AIE); - hpet_rtc_timer_init(); } while (mask & RTC_AIE); spin_unlock_irq(&rtc_lock); } diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c index 48b6612fae7f..d5af7baa48b5 100644 --- a/drivers/rtc/rtc-max8998.c +++ b/drivers/rtc/rtc-max8998.c @@ -285,7 +285,7 @@ static int max8998_rtc_probe(struct platform_device *pdev) info->irq, ret); dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name); - if (pdata->rtc_delay) { + if (pdata && pdata->rtc_delay) { info->lp3974_bug_workaround = true; dev_warn(&pdev->dev, "LP3974 with RTC REGERR option." " RTC updates will be extremely slow.\n"); diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c index f5dfb6e5e7d9..d592e2fe43f7 100644 --- a/drivers/rtc/rtc-nuc900.c +++ b/drivers/rtc/rtc-nuc900.c @@ -234,11 +234,6 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev) return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "platform_get_resource failed\n"); - return -ENXIO; - } - nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(nuc900_rtc->rtc_reg)) return PTR_ERR(nuc900_rtc->rtc_reg); diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index 4e1bdb832e37..b0ba3fc991ea 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -347,11 +347,6 @@ static int __init omap_rtc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - pr_debug("%s: RTC resource data missing\n", pdev->name); - return -ENOENT; - } - rtc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(rtc_base)) return PTR_ERR(rtc_base); diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 8900ea784817..0f0609b1aa2c 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -306,7 +306,7 @@ static int pl031_remove(struct amba_device *adev) struct pl031_local *ldata = dev_get_drvdata(&adev->dev); amba_set_drvdata(adev, NULL); - free_irq(adev->irq[0], ldata->rtc); + free_irq(adev->irq[0], ldata); rtc_device_unregister(ldata->rtc); iounmap(ldata->base); kfree(ldata); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 14040b22888d..0b495e8b8e66 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -477,11 +477,6 @@ static int s3c_rtc_probe(struct platform_device *pdev) /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "failed to get memory region resource\n"); - return -ENOENT; - } - s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(s3c_rtc_base)) return PTR_ERR(s3c_rtc_base); diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c index a34315d25478..76af92ad5a8a 100644 --- a/drivers/rtc/rtc-tegra.c +++ b/drivers/rtc/rtc-tegra.c @@ -322,12 +322,6 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, - "Unable to allocate resources for device.\n"); - return -EBUSY; - } - info->rtc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(info->rtc_base)) return PTR_ERR(info->rtc_base); diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c index 459c2ffc95a6..426901cef14f 100644 --- a/drivers/rtc/rtc-tps6586x.c +++ b/drivers/rtc/rtc-tps6586x.c @@ -273,6 +273,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) return ret; } + device_init_wakeup(&pdev->dev, 1); + platform_set_drvdata(pdev, rtc); rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev), &tps6586x_rtc_ops, THIS_MODULE); @@ -292,7 +294,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) goto fail_rtc_register; } disable_irq(rtc->irq); - device_set_wakeup_capable(&pdev->dev, 1); return 0; fail_rtc_register: diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 8751a5240c99..b2eab34f38d9 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -524,6 +524,7 @@ static int twl_rtc_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, rtc); + device_init_wakeup(&pdev->dev, 1); return 0; out2: diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 4361d9772c42..d72a9216ee2e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3440,8 +3440,16 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) device->path_data.opm &= ~eventlpm; device->path_data.ppm &= ~eventlpm; device->path_data.npm &= ~eventlpm; - if (oldopm && !device->path_data.opm) - dasd_generic_last_path_gone(device); + if (oldopm && !device->path_data.opm) { + dev_warn(&device->cdev->dev, + "No verified channel paths remain " + "for the device\n"); + DBF_DEV_EVENT(DBF_WARNING, device, + "%s", "last verified path gone"); + dasd_eer_write(device, NULL, DASD_EER_NOPATH); + dasd_device_set_stop_bits(device, + DASD_STOPPED_DC_WAIT); + } } if (path_event[chp] & PE_PATH_AVAILABLE) { device->path_data.opm &= ~eventlpm; diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 690c3338a8ae..464dd29d06c0 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -343,6 +343,7 @@ static int __init xpram_setup_blkdev(void) put_disk(xpram_disks[i]); goto out; } + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_logical_block_size(xpram_queues[i], 4096); } diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 21fabc6d5a9c..6c440d4349d4 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -352,12 +352,48 @@ static ssize_t chp_shared_show(struct device *dev, static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); +static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + ssize_t rc; + + mutex_lock(&chp->lock); + if (chp->desc_fmt1.flags & 0x10) + rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid); + else + rc = 0; + mutex_unlock(&chp->lock); + + return rc; +} +static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL); + +static ssize_t chp_chid_external_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + ssize_t rc; + + mutex_lock(&chp->lock); + if (chp->desc_fmt1.flags & 0x10) + rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0); + else + rc = 0; + mutex_unlock(&chp->lock); + + return rc; +} +static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL); + static struct attribute *chp_attrs[] = { &dev_attr_status.attr, &dev_attr_configure.attr, &dev_attr_type.attr, &dev_attr_cmg.attr, &dev_attr_shared.attr, + &dev_attr_chid.attr, + &dev_attr_chid_external.attr, NULL, }; static struct attribute_group chp_attr_group = { diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 349d5fc47196..e7ef2a683b8f 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -43,7 +43,9 @@ struct channel_path_desc_fmt1 { u8 chpid; u32:24; u8 chpp; - u32 unused[3]; + u32 unused[2]; + u16 chid; + u32:16; u16 mdc; u16:13; u8 r:1; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index db95c547c09d..86af29f53bbe 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1353,6 +1353,8 @@ config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI select SCSI_FC_ATTRS + select GENERIC_CSUM + select CRC_T10DIF help This lpfc driver supports the Emulex LightPulse Family of Fibre Channel PCI host adapters. diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c index 64136c56e706..33072388ea16 100644 --- a/drivers/scsi/aic94xx/aic94xx_dev.c +++ b/drivers/scsi/aic94xx/aic94xx_dev.c @@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev) struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb = (int) (unsigned long) dev->lldd_dev; - if (dev->dev_type == SATA_PM_PORT) + if (dev->dev_type == SAS_SATA_PM_PORT) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); else if (dev->tproto) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); @@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev) int ddb = (int) (unsigned long) dev->lldd_dev; u32 qdepth = 0; - if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) { + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) { if (ata_id_has_ncq(ata_dev->id)) qdepth = ata_id_queue_depth(ata_dev->id); asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, @@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev) int ddb = (int) (unsigned long) dev->lldd_dev; asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); - if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || - dev->dev_type == SATA_PM_PORT) { + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM || + dev->dev_type == SAS_SATA_PM_PORT) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); @@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); if (dev->port->oob_mode != SATA_OOB_MODE) { flags |= OPEN_REQUIRED; - if ((dev->dev_type == SATA_DEV) || + if ((dev->dev_type == SAS_SATA_DEV) || (dev->tproto & SAS_PROTOCOL_STP)) { struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; if (rps_resp->frame_type == SMP_RESPONSE && @@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev) } else { flags |= CONCURRENT_CONN_SUPP; if (!dev->parent && - (dev->dev_type == EDGE_DEV || - dev->dev_type == FANOUT_DEV)) + (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)) asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, 4); else @@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); } } - if (dev->dev_type == SATA_PM) + if (dev->dev_type == SAS_SATA_PM) flags |= SATA_MULTIPORT; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); @@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { i = asd_init_sata(dev); if (i < 0) { asd_free_ddb(asd_ha, ddb); @@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev) } } - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); if (rdev->I_T_nexus_loss_timeout > 0) asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, @@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev) spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); switch (dev->dev_type) { - case SATA_PM: + case SAS_SATA_PM: res = asd_init_sata_pm_ddb(dev); break; - case SATA_PM_PORT: + case SAS_SATA_PM_PORT: res = asd_init_sata_pm_port_ddb(dev); break; default: diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c index 81b736c76fff..4df867e07b20 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy) memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); - phy->identify_frame->dev_type = SAS_END_DEV; + phy->identify_frame->dev_type = SAS_END_DEVICE; if (phy->sas_phy.role & PHY_ROLE_INITIATOR) phy->identify_frame->initiator_bits = phy->sas_phy.iproto; if (phy->sas_phy.role & PHY_ROLE_TARGET) diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index cf9040933da6..d4c35df3d4ae 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev) struct sas_phy *phy = sas_get_local_phy(dev); /* Standard mandates link reset for ATA (type 0) and * hard reset for SSP (type 1) */ - int reset_type = (dev->dev_type == SATA_DEV || + int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index f1733dfa3ae2..777e7c0bbb4b 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 5c87768c109c..e66aa7c11a8a 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint16_t status = 0, addl_status = 0, wrb_num = 0; struct be_mcc_wrb *temp_wrb; struct be_cmd_req_hdr *ioctl_hdr; + struct be_cmd_resp_hdr *ioctl_resp_hdr; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; if (beiscsi_error(phba)) @@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, ioctl_hdr->subsystem, ioctl_hdr->opcode, status, addl_status); + + if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { + ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr; + if (ioctl_resp_hdr->response_length) + goto release_mcc_tag; + } rc = -EAGAIN; } @@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); struct be_cmd_req_hdr *hdr = embedded_payload(wrb); + struct be_cmd_resp_hdr *resp_hdr; be_dws_le_to_cpu(compl, 4); @@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, hdr->subsystem, hdr->opcode, compl_status, extd_status); + if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) { + resp_hdr = (struct be_cmd_resp_hdr *) hdr; + if (resp_hdr->response_length) + return 0; + } return -EBUSY; } return 0; @@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) void beiscsi_async_link_state_process(struct beiscsi_hba *phba, struct be_async_event_link_state *evt) { - switch (evt->port_link_status) { - case ASYNC_EVENT_LINK_DOWN: + if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) || + ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && + (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) { + phba->state = BE_ADAPTER_LINK_DOWN; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link Down on Physical Port %d\n", + "BC_%d : Link Down on Port %d\n", evt->physical_port); - phba->state |= BE_ADAPTER_LINK_DOWN; iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); - break; - case ASYNC_EVENT_LINK_UP: + } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || + ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && + (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { phba->state = BE_ADAPTER_UP; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link UP on Physical Port %d\n", - evt->physical_port); - break; - default: - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Unexpected Async Notification %d on" - "Physical Port %d\n", - evt->port_link_status, + "BC_%d : Link UP on Port %d\n", evt->physical_port); } } @@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) { void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); - int wait = 0; + uint32_t wait = 0; u32 ready; do { @@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl) struct be_mcc_compl *compl = &mbox->compl; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + status = be_mbox_db_ready_wait(ctrl); + if (status) + return status; + val &= ~MPU_MAILBOX_DB_RDY_MASK; val |= MPU_MAILBOX_DB_HI_MASK; val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba) struct be_mcc_compl *compl = &mbox->compl; struct be_ctrl_info *ctrl = &phba->ctrl; + status = be_mbox_db_ready_wait(ctrl); + if (status) + return status; + val |= MPU_MAILBOX_DB_HI_MASK; /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, return status; } +/** + * be_cmd_fw_initialize()- Initialize FW + * @ctrl: Pointer to function control structure + * + * Send FW initialize pattern for the function. + * + * return + * Success: 0 + * Failure: Non-Zero value + **/ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); @@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) return status; } +/** + * be_cmd_fw_uninit()- Uinitialize FW + * @ctrl: Pointer to function control structure + * + * Send FW uninitialize pattern for the function + * + * return + * Success: 0 + * Failure: Non-Zero value + **/ +int be_cmd_fw_uninit(struct be_ctrl_info *ctrl) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + int status; + u8 *endian_check; + + spin_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + endian_check = (u8 *) wrb; + *endian_check++ = 0xFF; + *endian_check++ = 0xAA; + *endian_check++ = 0xBB; + *endian_check++ = 0xFF; + *endian_check++ = 0xFF; + *endian_check++ = 0xCC; + *endian_check++ = 0xDD; + *endian_check = 0xFF; + + be_dws_cpu_to_le(wrb, sizeof(*wrb)); + + status = be_mbox_notify(ctrl); + if (status) + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BC_%d : be_cmd_fw_uninit Failed\n"); + + spin_unlock(&ctrl->mbox_lock); + return status; +} + int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *eq, bool sol_evts, bool no_delay, int coalesce_wm) @@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, OPCODE_COMMON_CQ_CREATE, sizeof(*req)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); - if (chip_skh_r(ctrl->pdev)) { - req->hdr.version = MBX_CMD_VER2; - req->page_size = 1; - AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, - ctxt, coalesce_wm); - AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, - ctxt, no_delay); - AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, - __ilog2_u32(cq->len / 256)); - AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); - AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); - } else { + if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); @@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context, func, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + } else { + req->hdr.version = MBX_CMD_VER2; + req->page_size = 1; + AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, + ctxt, coalesce_wm); + AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, + ctxt, no_delay); + AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, + __ilog2_u32(cq->len / 256)); + AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); + AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_defq_create_req *req = embedded_payload(wrb); struct be_dma_mem *q_mem = &dq->dma_mem; + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); void *ctxt = &req->context; int status; @@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); - AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0); - AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt, - 1); - AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt, - PCI_FUNC(ctrl->pdev->devfn)); - AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt, - be_encoded_q_len(length / sizeof(struct phys_addr))); - AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size, - ctxt, entry_size); - AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt, - cq->id); + + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + cq_id_recv, ctxt, cq->id); + } else { + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + cq_id_recv, ctxt, cq->id); + } be_dws_cpu_to_le(ctxt, sizeof(req->context)); diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 23397d51ac54..99073086dfe0 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -52,6 +52,10 @@ struct be_mcc_wrb { /* Completion Status */ #define MCC_STATUS_SUCCESS 0x0 +#define MCC_STATUS_FAILED 0x1 +#define MCC_STATUS_ILLEGAL_REQUEST 0x2 +#define MCC_STATUS_ILLEGAL_FIELD 0x3 +#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4 #define CQE_STATUS_COMPL_MASK 0xFFFF #define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ @@ -118,7 +122,8 @@ struct be_async_event_trailer { enum { ASYNC_EVENT_LINK_DOWN = 0x0, - ASYNC_EVENT_LINK_UP = 0x1 + ASYNC_EVENT_LINK_UP = 0x1, + ASYNC_EVENT_LOGICAL = 0x2 }; /** @@ -130,6 +135,9 @@ struct be_async_event_link_state { u8 port_link_status; u8 port_duplex; u8 port_speed; +#define BEISCSI_PHY_LINK_FAULT_NONE 0x00 +#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01 +#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02 u8 port_fault; u8 rsvd0[7]; struct be_async_event_trailer trailer; @@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va); /*ISCSI Functuions */ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); +int be_cmd_fw_uninit(struct be_ctrl_info *ctrl); struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); @@ -751,6 +760,18 @@ struct amap_be_default_pdu_context { u8 rsvd4[32]; /* dword 3 */ } __packed; +struct amap_default_pdu_context_ext { + u8 rsvd0[16]; /* dword 0 */ + u8 ring_size[4]; /* dword 0 */ + u8 rsvd1[12]; /* dword 0 */ + u8 rsvd2[22]; /* dword 1 */ + u8 rx_pdid[9]; /* dword 1 */ + u8 rx_pdid_valid; /* dword 1 */ + u8 default_buffer_size[16]; /* dword 2 */ + u8 cq_id_recv[16]; /* dword 2 */ + u8 rsvd3[32]; /* dword 3 */ +} __packed; + struct be_defq_create_req { struct be_cmd_req_hdr hdr; u16 num_pages; @@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 { * stack to notify the * controller of a posted Work Request Block */ -#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */ +#define DB_WRB_POST_CID_MASK 0xFFFF /* bits 0 - 16 */ #define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */ #define DB_DEF_PDU_WRB_INDEX_SHIFT 16 diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 9014690fe841..ef36be003f67 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, struct beiscsi_conn *beiscsi_conn, unsigned int cid) { - if (phba->conn_table[cid]) { + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + if (phba->conn_table[cri_index]) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Connection table already occupied. Detected clash\n"); @@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, } else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n", - cid, beiscsi_conn); + cri_index, beiscsi_conn); - phba->conn_table[cid] = beiscsi_conn; + phba->conn_table[cri_index] = beiscsi_conn; } return 0; } @@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) { struct beiscsi_hba *phba = beiscsi_ep->phba; + struct beiscsi_conn *beiscsi_conn; beiscsi_put_cid(phba, beiscsi_ep->ep_cid); beiscsi_ep->phba = NULL; + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = NULL; + + /** + * Check if any connection resource allocated by driver + * is to be freed.This case occurs when target redirection + * or connection retry is done. + **/ + if (!beiscsi_ep->conn) + return; + + beiscsi_conn = beiscsi_ep->conn; + if (beiscsi_conn->login_in_progress) { + beiscsi_free_mgmt_task_handles(beiscsi_conn, + beiscsi_conn->task); + beiscsi_conn->login_in_progress = 0; + } } /** @@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, { struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; struct beiscsi_hba *phba = beiscsi_ep->phba; - struct be_mcc_wrb *wrb; struct tcp_connect_and_offload_out *ptcpcnct_out; struct be_dma_mem nonemb_cmd; unsigned int tag; @@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : In beiscsi_open_conn, ep_cid=%d\n", beiscsi_ep->ep_cid); - phba->ep_array[beiscsi_ep->ep_cid - - phba->fw_config.iscsi_cid_start] = ep; - if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + - phba->params.cxns_per_ctrl * 2)) { - - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BS_%d : Failed in allocate iscsi cid\n"); - goto free_ep; - } + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = ep; beiscsi_ep->cid_vld = 0; nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, @@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : Failed to allocate memory for" " mgmt_open_connection\n"); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); + beiscsi_free_ep(beiscsi_ep); return -ENOMEM; } nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); memset(nonemb_cmd.va, 0, nonemb_cmd.size); tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); - if (!tag) { + if (tag <= 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : mgmt_open_connection Failed for cid=%d\n", beiscsi_ep->ep_cid); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); + beiscsi_free_ep(beiscsi_ep); return -EAGAIN; } - ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, @@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); - goto free_ep; + beiscsi_free_ep(beiscsi_ep); + return -EBUSY; } - ptcpcnct_out = embedded_payload(wrb); + ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va; beiscsi_ep = ep->dd_data; beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; beiscsi_ep->cid_vld = 1; @@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return 0; - -free_ep: - beiscsi_free_ep(beiscsi_ep); - return -EBUSY; } /** @@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, return ERR_PTR(ret); } + if (beiscsi_error(phba)) { + ret = -EIO; + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BS_%d : The FW state Not Stable!!!\n"); + return ERR_PTR(ret); + } + if (phba->state != BE_ADAPTER_UP) { ret = -EBUSY; beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, @@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba, unsigned int cid) { - if (phba->conn_table[cid]) - phba->conn_table[cid] = NULL; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + if (phba->conn_table[cri_index]) + phba->conn_table[cri_index] = NULL; else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : Connection table Not occupied.\n"); diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index 38eab7232159..31ddc8494398 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 4e2733d23003..d24a2867bc21 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00, DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); +DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); +DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL); struct device_attribute *beiscsi_attrs[] = { &dev_attr_beiscsi_log_enable, &dev_attr_beiscsi_drvr_ver, &dev_attr_beiscsi_adapter_family, + &dev_attr_beiscsi_fw_ver, + &dev_attr_beiscsi_active_cid_count, NULL, }; @@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba) + BE2_TMFS + BE2_NOPOUT_REQ)); phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; - phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; + phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count; phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; @@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba, static unsigned int beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, - unsigned short cid, struct pdu_base *ppdu, unsigned long pdu_len, void *pbuffer, unsigned long buf_len) @@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[cid]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (pwrb_context->wrb_handles_available >= 2) { pwrb_handle = pwrb_context->pwrb_handle_base[ pwrb_context->alloc_index]; @@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, hdr->t2retain = 0; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; - hdr->exp_cmdsn = csol_cqe->exp_cmdsn; - hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->dlength[0] = 0; hdr->dlength[1] = 0; @@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; - hdr->exp_cmdsn = csol_cqe->exp_cmdsn; - hdr->max_cmdsn = (csol_cqe->exp_cmdsn + - csol_cqe->cmd_wnd - 1); + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); @@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, struct hwi_controller *phwi_ctrlr; struct iscsi_task *task; struct beiscsi_io_task *io_task; - struct iscsi_conn *conn = beiscsi_conn->conn; - struct iscsi_session *session = conn->session; - uint16_t wrb_index, cid; + uint16_t wrb_index, cid, cri_index; phwi_ctrlr = phba->phwi_ctrlr; - if (chip_skh_r(phba->pcidev)) { - wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + if (is_chip_be2_be3r(phba)) { + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, wrb_idx, psol); - cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, cid, psol); } else { - wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, wrb_idx, psol); - cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, cid, psol); } - pwrb_context = &phwi_ctrlr->wrb_context[ - cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; task = pwrb_handle->pio_handle; io_task = task->dd_data; - spin_lock_bh(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, io_task->psgl_handle); - spin_unlock_bh(&phba->mgmt_sgl_lock); - spin_lock_bh(&session->lock); - free_wrb_handle(phba, pwrb_context, pwrb_handle); - spin_unlock_bh(&session->lock); + memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb)); + iscsi_put_task(task); } static void @@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, hdr = (struct iscsi_nopin *)task->hdr; hdr->flags = csol_cqe->i_flags; hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); - hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn + - csol_cqe->cmd_wnd - 1); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->opcode = ISCSI_OP_NOOP_IN; hdr->itt = io_task->libiscsi_itt; @@ -1418,7 +1417,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, struct sol_cqe *psol, struct common_sol_cqe *csol_cqe) { - if (chip_skh_r(phba->pcidev)) { + if (is_chip_be2_be3r(phba)) { + csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, + i_exp_cmd_sn, psol); + csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, + i_res_cnt, psol); + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + i_cmd_wnd, psol); + csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, + wrb_index, psol); + csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, + cid, psol); + csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, + hw_sts, psol); + csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, + i_resp, psol); + csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, + i_sts, psol); + csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, + i_flags, psol); + } else { csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_exp_cmd_sn, psol); csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, @@ -1429,7 +1447,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, cid, psol); csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, hw_sts, psol); - csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_cmd_wnd, psol); if (AMAP_GET_BITS(struct amap_sol_cqe_v2, cmd_cmpl, psol)) @@ -1445,25 +1463,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, if (AMAP_GET_BITS(struct amap_sol_cqe_v2, o, psol)) csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; - } else { - csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, - i_exp_cmd_sn, psol); - csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, - i_res_cnt, psol); - csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, - i_cmd_wnd, psol); - csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, - wrb_index, psol); - csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, - cid, psol); - csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, - hw_sts, psol); - csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, - i_resp, psol); - csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, - i_sts, psol); - csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, - i_flags, psol); } } @@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; struct common_sol_cqe csol_cqe = {0}; + uint16_t cri_index = 0; phwi_ctrlr = phba->phwi_ctrlr; /* Copy the elements to a common structure */ adapter_get_sol_cqe(phba, psol, &csol_cqe); - pwrb_context = &phwi_ctrlr->wrb_context[ - csol_cqe.cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[ csol_cqe.wrb_index]; @@ -1561,15 +1561,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba, unsigned char is_header = 0; unsigned int index, dpl; - if (chip_skh_r(phba->pcidev)) { - dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + if (is_chip_be2_be3r(phba)) { + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, dpl, pdpdu_cqe); - index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, index, pdpdu_cqe); } else { - dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, dpl, pdpdu_cqe); - index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, index, pdpdu_cqe); } @@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba, WARN_ON(!pasync_handle); - pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start; + pasync_handle->cri = + BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); pasync_handle->is_header = is_header; pasync_handle->buffer_len = dpl; *pcq_index = index; @@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, } status = beiscsi_process_async_pdu(beiscsi_conn, phba, - (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start), phdr, hdr_len, pfirst_buffer, offset); @@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) unsigned int num_processed = 0; unsigned int tot_nump = 0; unsigned short code = 0, cid = 0; + uint16_t cri_index = 0; struct beiscsi_conn *beiscsi_conn; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; @@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 32] & CQE_CODE_MASK); /* Get the CID */ - if (chip_skh_r(phba->pcidev)) { + if (is_chip_be2_be3r(phba)) { + cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } else { if ((code == DRIVERMSG_NOTIFY) || (code == UNSOL_HDR_NOTIFY) || (code == UNSOL_DATA_NOTIFY)) @@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) else cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, cid, sol); - } else - cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } - ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(cid); + ep = phba->ep_array[cri_index]; beiscsi_ep = ep->dd_data; beiscsi_conn = beiscsi_ep->conn; @@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work) static int be_iopoll(struct blk_iopoll *iop, int budget) { - static unsigned int ret; + unsigned int ret; struct beiscsi_hba *phba; struct be_eq_obj *pbe_eq; @@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) /* Check for the data_count */ dsp_value = (task->data_count) ? 1 : 0; - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, dsp_value); else - AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, dsp_value); /* Map addr only if there is data_count */ @@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) static int beiscsi_alloc_mem(struct beiscsi_hba *phba) { - struct be_mem_descriptor *mem_descr; dma_addr_t bus_add; + struct hwi_controller *phwi_ctrlr; + struct be_mem_descriptor *mem_descr; struct mem_array *mem_arr, *mem_arr_orig; unsigned int i, j, alloc_size, curr_alloc_size; @@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) if (!phba->phwi_ctrlr) return -ENOMEM; + /* Allocate memory for wrb_context */ + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * + phba->params.cxns_per_ctrl, + GFP_KERNEL); + if (!phwi_ctrlr->wrb_context) + return -ENOMEM; + phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), GFP_KERNEL); if (!phba->init_mem) { + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) GFP_KERNEL); if (!mem_arr_orig) { kfree(phba->init_mem); + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2628,6 +2640,7 @@ free_mem: } kfree(mem_arr_orig); kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba) static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; + struct hwi_context_memory *phwi_ctxt; struct wrb_handle *pwrb_handle = NULL; struct hwi_controller *phwi_ctrlr; struct hwi_wrb_context *pwrb_context; @@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) mem_descr_wrb += HWI_MEM_WRB; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + /* Allocate memory for WRBQ */ + phwi_ctxt = phwi_ctrlr->phwi_ctxt; + phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * + phba->fw_config.iscsi_cid_count, + GFP_KERNEL); + if (!phwi_ctxt->be_wrbq) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : WRBQ Mem Alloc Failed\n"); + return -ENOMEM; + } + + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; pwrb_context->pwrb_handle_base = kzalloc(sizeof(struct wrb_handle *) * @@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) } } idx = 0; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; if (!num_cxn_wrb) { pwrb = mem_descr_wrb->mem_array[idx].virtual_address; @@ -2752,7 +2777,7 @@ init_wrb_hndl_failed: return -ENOMEM; } -static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) +static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) { struct hwi_controller *phwi_ctrlr; struct hba_parameters *p = &phba->params; @@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; memset(pasync_ctx, 0, sizeof(*pasync_ctx)); + pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) * + phba->fw_config.iscsi_cid_count, + GFP_KERNEL); + if (!pasync_ctx->async_entry) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n"); + return -ENOMEM; + } + pasync_ctx->num_entries = p->asyncpdus_per_ctrl; pasync_ctx->buffer_size = p->defpdu_hdr_sz; @@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx->async_header.ep_read_ptr = -1; pasync_ctx->async_data.host_write_ptr = 0; pasync_ctx->async_data.ep_read_ptr = -1; + + return 0; } static int @@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, void *wrb_vaddr; struct be_dma_mem sgl; struct be_mem_descriptor *mem_descr; + struct hwi_wrb_context *pwrb_context; int status; idx = 0; @@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, kfree(pwrb_arr); return status; } - phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. - id; + pwrb_context = &phwi_ctrlr->wrb_context[i]; + pwrb_context->cid = phwi_context->be_wrbq[i].id; + BE_SET_CID_TO_CRI(i, pwrb_context->cid); } kfree(pwrb_arr); return 0; @@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba) struct hwi_wrb_context *pwrb_context; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; kfree(pwrb_context->pwrb_handle_base); kfree(pwrb_context->pwrb_handle_basestd); @@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba) struct be_ctrl_info *ctrl = &phba->ctrl; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; + struct hwi_async_pdu_context *pasync_ctx; int i, eq_num; phwi_ctrlr = phba->phwi_ctrlr; @@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba) if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); } + kfree(phwi_context->be_wrbq); free_wrb_handles(phba); q = &phwi_context->be_def_hdrq; @@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); } be_mcc_queues_destroy(phba); + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; + kfree(pasync_ctx->async_entry); + be_cmd_fw_uninit(ctrl); } static int be_mcc_queues_create(struct beiscsi_hba *phba, @@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba) if (beiscsi_init_wrb_handle(phba)) return -ENOMEM; - hwi_init_async_pdu_ctx(phba); + if (hwi_init_async_pdu_ctx(phba)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx failed\n"); + return -ENOMEM; + } + if (hwi_init_port(phba) != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : hwi_init_controller failed\n"); @@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba) mem_descr++; } kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); } @@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) static int hba_setup_cid_tbls(struct beiscsi_hba *phba) { - int i, new_cid; + int i; phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, GFP_KERNEL); @@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) return -ENOMEM; } phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * - phba->params.cxns_per_ctrl * 2, GFP_KERNEL); + phba->params.cxns_per_ctrl, GFP_KERNEL); if (!phba->ep_array) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Failed to allocate memory in " "hba_setup_cid_tbls\n"); kfree(phba->cid_array); + phba->cid_array = NULL; return -ENOMEM; } - new_cid = phba->fw_config.iscsi_cid_start; - for (i = 0; i < phba->params.cxns_per_ctrl; i++) { - phba->cid_array[i] = new_cid; - new_cid += 2; + + phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * + phba->params.cxns_per_ctrl, GFP_KERNEL); + if (!phba->conn_table) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory in" + "hba_setup_cid_tbls\n"); + + kfree(phba->cid_array); + kfree(phba->ep_array); + phba->cid_array = NULL; + phba->ep_array = NULL; + return -ENOMEM; } + + for (i = 0; i < phba->params.cxns_per_ctrl; i++) + phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid; + phba->avlbl_cids = phba->params.cxns_per_ctrl; return 0; } @@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba) kfree(phba->eh_sgl_hndl_base); kfree(phba->cid_array); kfree(phba->ep_array); + kfree(phba->conn_table); +} + +/** + * beiscsi_free_mgmt_task_handles()- Free driver CXN resources + * @beiscsi_conn: ptr to the conn to be cleaned up + * @task: ptr to iscsi_task resource to be freed. + * + * Free driver mgmt resources binded to CXN. + **/ +void +beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task) +{ + struct beiscsi_io_task *io_task; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + + io_task = task->dd_data; + + if (io_task->pwrb_handle) { + memset(io_task->pwrb_handle->pwrb, 0, + sizeof(struct iscsi_wrb)); + free_wrb_handle(phba, pwrb_context, + io_task->pwrb_handle); + io_task->pwrb_handle = NULL; + } + + if (io_task->psgl_handle) { + spin_lock_bh(&phba->mgmt_sgl_lock); + free_mgmt_sgl_handle(phba, + io_task->psgl_handle); + io_task->psgl_handle = NULL; + spin_unlock_bh(&phba->mgmt_sgl_lock); + } + + if (io_task->mtask_addr) + pci_unmap_single(phba->pcidev, + io_task->mtask_addr, + io_task->mtask_data_count, + PCI_DMA_TODEVICE); } /** @@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->cmd_bhs) { pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, @@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) io_task->psgl_handle = NULL; } } else { - if (!beiscsi_conn->login_in_progress) { - if (io_task->pwrb_handle) { - free_wrb_handle(phba, pwrb_context, - io_task->pwrb_handle); - io_task->pwrb_handle = NULL; - } - if (io_task->psgl_handle) { - spin_lock(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, - io_task->psgl_handle); - spin_unlock(&phba->mgmt_sgl_lock); - io_task->psgl_handle = NULL; - } - if (io_task->mtask_addr) { - pci_unmap_single(phba->pcidev, - io_task->mtask_addr, - io_task->mtask_data_count, - PCI_DMA_TODEVICE); - io_task->mtask_addr = 0; - } - } + if (!beiscsi_conn->login_in_progress) + beiscsi_free_mgmt_task_handles(beiscsi_conn, task); } } @@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, beiscsi_cleanup_task(task); spin_unlock_bh(&session->lock); - pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start)); + pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); /* Check for the adapter family */ - if (chip_skh_r(phba->pcidev)) - beiscsi_offload_cxn_v2(params, pwrb_handle); - else + if (is_chip_be2_be3r(phba)) beiscsi_offload_cxn_v0(params, pwrb_handle, phba->init_mem); + else + beiscsi_offload_cxn_v2(params, pwrb_handle); be_dws_le_to_cpu(pwrb_handle->pwrb, sizeof(struct iscsi_target_context_update_wrb)); @@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; itt_t itt; + uint16_t cri_index = 0; struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; dma_addr_t paddr; @@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) goto free_hndls; } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) } else { io_task->scsi_cmnd = NULL; if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { + beiscsi_conn->task = task; if (!beiscsi_conn->login_in_progress) { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = (struct sgl_handle *) @@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->psgl_handle; io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | @@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->pwrb_handle = beiscsi_conn->plogin_wrb_handle; } - beiscsi_conn->task = task; } else { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); @@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4324,12 +4414,13 @@ free_io_hndls: free_mgmt_hndls: spin_lock(&phba->mgmt_sgl_lock); free_mgmt_sgl_handle(phba, io_task->psgl_handle); + io_task->psgl_handle = NULL; spin_unlock(&phba->mgmt_sgl_lock); free_hndls: phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[ - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->pwrb_handle) free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); io_task->pwrb_handle = NULL; @@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, unsigned int doorbell = 0; pwrb = io_task->pwrb_handle->pwrb; - memset(pwrb, 0, sizeof(*pwrb)); io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; io_task->bhs_len = sizeof(struct be_cmd_bhs); @@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task) pwrb = io_task->pwrb_handle->pwrb; memset(pwrb, 0, sizeof(*pwrb)); - if (chip_skh_r(phba->pcidev)) { - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, - be32_to_cpu(task->cmdsn)); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, - io_task->pwrb_handle->wrb_index); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, - io_task->psgl_handle->sgl_index); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, - task->data_count); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, - io_task->pwrb_handle->nxt_wrb_index); - pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; - } else { + if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, be32_to_cpu(task->cmdsn)); AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, @@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task) AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, io_task->pwrb_handle->nxt_wrb_index); pwrb_typeoffset = BE_WRB_TYPE_OFFSET; + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, + io_task->psgl_handle->sgl_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, + task->data_count); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, + io_task->pwrb_handle->nxt_wrb_index); + pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; } @@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task) case ISCSI_OP_NOOP_OUT: if (task->hdr->ttt != ISCSI_RESERVED_TAG) { ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); else - AMAP_SET_BITS(struct amap_iscsi_wrb, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 1); } else { ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); else - AMAP_SET_BITS(struct amap_iscsi_wrb, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 0); } hwi_write_buffer(pwrb, task); @@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task) } /* Set the task type */ - io_task->wrb_type = (chip_skh_r(phba->pcidev)) ? - AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) : - AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb); + io_task->wrb_type = (is_chip_be2_be3r(phba)) ? + AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : + AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); doorbell |= cid & DB_WRB_POST_CID_MASK; doorbell |= (io_task->pwrb_handle->wrb_index & @@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, case OC_SKH_ID1: phba->generation = BE_GEN4; phba->iotask_fn = beiscsi_iotask_v2; + break; default: phba->generation = 0; } diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 5946577d79d6..2c06ef3c02ac 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -36,7 +36,7 @@ #include "be.h" #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.0.272.0" +#define BUILD_STR "10.0.467.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -66,8 +66,9 @@ #define MAX_CPUS 64 #define BEISCSI_MAX_NUM_CPUS 7 -#define OC_SKH_MAX_NUM_CPUS 63 +#define OC_SKH_MAX_NUM_CPUS 31 +#define BEISCSI_VER_STRLEN 32 #define BEISCSI_SGLIST_ELEMENTS 30 @@ -265,7 +266,9 @@ struct invalidate_command_table { unsigned short cid; } __packed; -#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1) +#define chip_be2(phba) (phba->generation == BE_GEN2) +#define chip_be3_r(phba) (phba->generation == BE_GEN3) +#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba))) struct beiscsi_hba { struct hba_parameters params; struct hwi_controller *phwi_ctrlr; @@ -304,10 +307,15 @@ struct beiscsi_hba { unsigned short avlbl_cids; unsigned short cid_alloc; unsigned short cid_free; - struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2]; struct list_head hba_queue; +#define BE_MAX_SESSION 2048 +#define BE_SET_CID_TO_CRI(cri_index, cid) \ + (phba->cid_to_cri_map[cid] = cri_index) +#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) + unsigned short cid_to_cri_map[BE_MAX_SESSION]; unsigned short *cid_array; struct iscsi_endpoint **ep_array; + struct beiscsi_conn **conn_table; struct iscsi_boot_kset *boot_kset; struct Scsi_Host *shost; struct iscsi_iface *ipv4_iface; @@ -339,6 +347,7 @@ struct beiscsi_hba { struct delayed_work beiscsi_hw_check_task; u8 mac_address[ETH_ALEN]; + char fw_ver_str[BEISCSI_VER_STRLEN]; char wq_name[20]; struct workqueue_struct *wq; /* The actuak work queue */ struct be_ctrl_info ctrl; @@ -563,7 +572,7 @@ struct hwi_async_pdu_context { * This is a varying size list! Do not add anything * after this entry!! */ - struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2]; + struct hwi_async_entry *async_entry; }; #define PDUCQE_CODE_MASK 0x0000003F @@ -749,6 +758,8 @@ void free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); void beiscsi_process_all_cqs(struct work_struct *work); +void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task); static inline bool beiscsi_error(struct beiscsi_hba *phba) { @@ -933,7 +944,7 @@ struct hwi_controller { struct sgl_handle *psgl_handle_base; unsigned int wrb_mem_index; - struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2]; + struct hwi_wrb_context *wrb_context; struct mcc_wrb *pmcc_wrb_base; struct be_ring default_pdu_hdr; struct be_ring default_pdu_data; @@ -970,9 +981,7 @@ struct hwi_context_memory { struct be_queue_info be_def_hdrq; struct be_queue_info be_def_dataq; - struct be_queue_info be_wrbq[BE2_MAX_SESSIONS]; - struct be_mcc_wrb_context *pbe_mcc_context; - + struct be_queue_info *be_wrbq; struct hwi_async_pdu_context *pasync_ctx; }; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 55cc9902263d..245a9595a93a 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : phba->fw_config.iscsi_features = %d\n", phba->fw_config.iscsi_features); + memcpy(phba->fw_ver_str, resp->params.hba_attribs. + firmware_version_string, BEISCSI_VER_STRLEN); } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in mgmt_check_supported_fw\n"); @@ -1260,6 +1262,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, } /** + * beiscsi_fw_ver_disp()- Display Firmware Version + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Firmware version + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str); +} + +/** + * beiscsi_active_cid_disp()- Display Sessions Active + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Session Count + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", + (phba->params.cxns_per_ctrl - phba->avlbl_cids)); +} + +/** * beiscsi_adap_family_disp()- Display adapter family. * @dev: ptr to device to get priv structure * @attr: device attribute, not used. diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 2e4968add799..04af7e74fe48 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -156,25 +156,25 @@ union invalidate_commands_params { } __packed; struct mgmt_hba_attributes { - u8 flashrom_version_string[32]; - u8 manufacturer_name[32]; + u8 flashrom_version_string[BEISCSI_VER_STRLEN]; + u8 manufacturer_name[BEISCSI_VER_STRLEN]; u32 supported_modes; u8 seeprom_version_lo; u8 seeprom_version_hi; u8 rsvd0[2]; u32 fw_cmd_data_struct_version; u32 ep_fw_data_struct_version; - u32 future_reserved[12]; + u8 ncsi_version_string[12]; u32 default_extended_timeout; - u8 controller_model_number[32]; + u8 controller_model_number[BEISCSI_VER_STRLEN]; u8 controller_description[64]; - u8 controller_serial_number[32]; - u8 ip_version_string[32]; - u8 firmware_version_string[32]; - u8 bios_version_string[32]; - u8 redboot_version_string[32]; - u8 driver_version_string[32]; - u8 fw_on_flash_version_string[32]; + u8 controller_serial_number[BEISCSI_VER_STRLEN]; + u8 ip_version_string[BEISCSI_VER_STRLEN]; + u8 firmware_version_string[BEISCSI_VER_STRLEN]; + u8 bios_version_string[BEISCSI_VER_STRLEN]; + u8 redboot_version_string[BEISCSI_VER_STRLEN]; + u8 driver_version_string[BEISCSI_VER_STRLEN]; + u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN]; u32 functionalities_supported; u16 max_cdblength; u8 asic_revision; @@ -190,7 +190,8 @@ struct mgmt_hba_attributes { u32 firmware_post_status; u32 hba_mtu[8]; u8 iscsi_features; - u8 future_u8[3]; + u8 asic_generation; + u8 future_u8[2]; u32 future_u32[3]; } __packed; @@ -207,7 +208,7 @@ struct mgmt_controller_attributes { u64 unique_identifier; u8 netfilters; u8 rsvd0[3]; - u8 future_u32[4]; + u32 future_u32[4]; } __packed; struct be_mgmt_controller_attributes { @@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag); ssize_t beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, char *buf); +ssize_t beiscsi_fw_ver_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_active_cid_disp(struct device *dev, + struct device_attribute *attr, char *buf); + ssize_t beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 11596b2c4702..08b22a901c25 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -2,7 +2,7 @@ #define _BNX2FC_H_ /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -64,10 +64,12 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.13" +#define BNX2FC_VERSION "1.0.14" #define PFX "bnx2fc: " +#define BCM_CHIP_LEN 16 + #define BNX2X_DOORBELL_PCI_BAR 2 #define BNX2FC_MAX_BD_LEN 0xffff @@ -241,6 +243,8 @@ struct bnx2fc_hba { int wait_for_link_down; int num_ofld_sess; struct list_head vports; + + char chip_num[BCM_CHIP_LEN]; }; struct bnx2fc_interface { diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index bdbbb13b8534..b1c9a4f8caee 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -3,7 +3,7 @@ * This file contains helper routines that handle ELS requests * and responses. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 7dffec1e5715..69ac55495c1d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -3,7 +3,7 @@ * cnic modules to create FCoE instances, send/receive non-offloaded * FIP/FCoE packets, listen to link events etc. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Dec 21, 2012" +#define DRV_MODULE_RELDATE "Mar 08, 2013" static char version[] = @@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct Scsi_Host *shost = lport->host; int rc = 0; @@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) } if (!lport->vport) fc_host_max_npiv_vports(lport->host) = USHRT_MAX; - sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", - BNX2FC_NAME, BNX2FC_VERSION, + snprintf(fc_host_symbolic_name(lport->host), 256, + "%s (Broadcom %s) v%s over %s", + BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, interface->netdev->name); return 0; @@ -1656,23 +1658,60 @@ mem_err: static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) { struct cnic_dev *cnic; + struct pci_dev *pdev; if (!hba->cnic) { printk(KERN_ERR PFX "cnic is NULL\n"); return -ENODEV; } cnic = hba->cnic; - hba->pcidev = cnic->pcidev; - if (hba->pcidev) - pci_dev_get(hba->pcidev); + pdev = hba->pcidev = cnic->pcidev; + if (!hba->pcidev) + return -ENODEV; + switch (pdev->device) { + case PCI_DEVICE_ID_NX2_57710: + strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57711: + strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57712: + case PCI_DEVICE_ID_NX2_57712_MF: + case PCI_DEVICE_ID_NX2_57712_VF: + strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57800: + case PCI_DEVICE_ID_NX2_57800_MF: + case PCI_DEVICE_ID_NX2_57800_VF: + strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57810: + case PCI_DEVICE_ID_NX2_57810_MF: + case PCI_DEVICE_ID_NX2_57810_VF: + strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57840: + case PCI_DEVICE_ID_NX2_57840_MF: + case PCI_DEVICE_ID_NX2_57840_VF: + case PCI_DEVICE_ID_NX2_57840_2_20: + case PCI_DEVICE_ID_NX2_57840_4_10: + strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN); + break; + default: + pr_err(PFX "Unknown device id 0x%x\n", pdev->device); + break; + } + pci_dev_get(hba->pcidev); return 0; } static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) { - if (hba->pcidev) + if (hba->pcidev) { + hba->chip_num[0] = '\0'; pci_dev_put(hba->pcidev); + } hba->pcidev = NULL; } diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 50510ffe1bf5..c0d035a8f8f9 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -2,7 +2,7 @@ * This file contains the code that low level functions that interact * with 57712 FCoE firmware. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) fcoe_init3.error_bit_map_lo = 0xffffffff; fcoe_init3.error_bit_map_hi = 0xffffffff; - fcoe_init3.perf_config = 1; + /* + * enable both cached connection and cached tasks + * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both + */ + fcoe_init3.perf_config = 3; kwqe_arr[0] = (struct kwqe *) &fcoe_init1; kwqe_arr[1] = (struct kwqe *) &fcoe_init2; diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 723a9a8ba5ee..575142e92d9c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1,7 +1,7 @@ /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. * IO manager and SCSI IO processing. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_comp = 0; - if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, - &io_req->req_flags))) { + if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); + rc = SUCCESS; + } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { /* Let the scsi-ml try to recover this command */ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", io_req->xid); diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index c57a3bb8a9fb..4d93177dfb53 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -2,7 +2,7 @@ * Handles operations such as session offload/upload etc, and manages * session resources such as connection id and qp resources. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h index 0f9c04175b11..372a67d122d3 100644 --- a/drivers/scsi/csiostor/csio_lnode.h +++ b/drivers/scsi/csiostor/csio_lnode.h @@ -114,7 +114,7 @@ struct csio_lnode_stats { uint32_t n_rnode_match; /* matched rnode */ uint32_t n_dev_loss_tmo; /* Device loss timeout */ uint32_t n_fdmi_err; /* fdmi err */ - uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ uint32_t n_rnode_alloc; /* rnode allocated */ uint32_t n_rnode_free; /* rnode freed */ diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h index 65940096a80d..433434221222 100644 --- a/drivers/scsi/csiostor/csio_rnode.h +++ b/drivers/scsi/csiostor/csio_rnode.h @@ -63,7 +63,7 @@ struct csio_rnode_stats { uint32_t n_err_nomem; /* error nomem */ uint32_t n_evt_unexp; /* unexpected event */ uint32_t n_evt_drop; /* unexpected event */ - uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ uint32_t n_lun_rst; /* Number of resets of * of LUNs under this diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 98436c363035..b6d1f92ed33c 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -38,7 +38,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.5.0.2" +#define DRV_VERSION "1.5.0.22" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " @@ -192,6 +192,18 @@ enum fnic_state { struct mempool; +enum fnic_evt { + FNIC_EVT_START_VLAN_DISC = 1, + FNIC_EVT_START_FCF_DISC = 2, + FNIC_EVT_MAX, +}; + +struct fnic_event { + struct list_head list; + struct fnic *fnic; + enum fnic_evt event; +}; + /* Per-instance private data structure */ struct fnic { struct fc_lport *lport; @@ -254,6 +266,18 @@ struct fnic { struct sk_buff_head frame_queue; struct sk_buff_head tx_queue; + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); + struct work_struct fip_frame_work; + struct sk_buff_head fip_frame_queue; + struct timer_list fip_timer; + struct list_head vlans; + spinlock_t vlans_lock; + + struct work_struct event_work; + struct list_head evlist; + /*** FIP related data members -- end ***/ + /* copy work queue cache line section */ ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; /* completion queue cache line section */ @@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) } extern struct workqueue_struct *fnic_event_queue; +extern struct workqueue_struct *fnic_fip_queue; extern struct device_attribute *fnic_attrs[]; void fnic_clear_intr_mode(struct fnic *fnic); @@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *); void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); void fnic_handle_frame(struct work_struct *work); void fnic_handle_link(struct work_struct *work); +void fnic_handle_event(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); @@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic); int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); +void fnic_handle_fip_frame(struct work_struct *work); +void fnic_handle_fip_event(struct fnic *fnic); +void fnic_fcoe_reset_vlans(struct fnic *fnic); +void fnic_fcoe_evlist_free(struct fnic *fnic); +extern void fnic_handle_fip_timer(struct fnic *fnic); + static inline int fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) { diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 483eb9dbe663..006fa92a02df 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -31,12 +31,20 @@ #include <scsi/libfc.h> #include "fnic_io.h" #include "fnic.h" +#include "fnic_fip.h" #include "cq_enet_desc.h" #include "cq_exch_desc.h" +static u8 fcoe_all_fcfs[ETH_ALEN]; +struct workqueue_struct *fnic_fip_queue; struct workqueue_struct *fnic_event_queue; static void fnic_set_eth_mode(struct fnic *); +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); void fnic_handle_link(struct work_struct *work) { @@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work) FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); fcoe_ctlr_link_down(&fnic->ctlr); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + return; + } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); fcoe_ctlr_link_up(&fnic->ctlr); @@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work) } else if (fnic->link_status) { /* DOWN -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + return; + } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); fcoe_ctlr_link_up(&fnic->ctlr); } else { @@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work) } } +void fnic_fcoe_evlist_free(struct fnic *fnic) +{ + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + list_del(&fevt->list); + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +void fnic_handle_event(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, event_work); + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + if (fnic->stop_rx_link_events) { + list_del(&fevt->list); + kfree(fevt); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_del(&fevt->list); + switch (fevt->event) { + case FNIC_EVT_START_VLAN_DISC: + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fcoe_send_vlan_req(fnic); + spin_lock_irqsave(&fnic->fnic_lock, flags); + break; + case FNIC_EVT_START_FCF_DISC: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start FCF Discovery\n"); + fnic_fcoe_start_fcf_disc(fnic); + break; + default: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Unknown event 0x%x\n", fevt->event); + break; + } + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +/** + * Check if the Received FIP FLOGI frame is rejected + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is rejected with unsupported cmd with + * insufficient resource els explanation. + */ +static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, + struct sk_buff *skb) +{ + struct fc_lport *lport = fip->lp; + struct fip_header *fiph; + struct fc_frame_header *fh = NULL; + struct fip_desc *desc; + struct fip_encaps *els; + enum fip_desc_type els_dtype = 0; + u16 op; + u8 els_op; + u8 sub; + + size_t els_len = 0; + size_t rlen; + size_t dlen = 0; + + if (skb_linearize(skb)) + return 0; + + if (skb->len < sizeof(*fiph)) + return 0; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (op != FIP_OP_LS) + return 0; + + if (sub != FIP_SC_REP) + return 0; + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return 0; + + desc = (struct fip_desc *)(fiph + 1); + dlen = desc->fip_dlen * FIP_BPW; + + if (desc->fip_dtype == FIP_DT_FLOGI) { + + shost_printk(KERN_DEBUG, lport->host, + " FIP TYPE FLOGI: fab name:%llx " + "vfid:%d map:%x\n", + fip->sel_fcf->fabric_name, fip->sel_fcf->vfid, + fip->sel_fcf->fc_map); + if (dlen < sizeof(*els) + sizeof(*fh) + 1) + return 0; + + els_len = dlen - sizeof(*els); + els = (struct fip_encaps *)desc; + fh = (struct fc_frame_header *)(els + 1); + els_dtype = desc->fip_dtype; + + if (!fh) + return 0; + + /* + * ELS command code, reason and explanation should be = Reject, + * unsupported command and insufficient resource + */ + els_op = *(u8 *)(fh + 1); + if (els_op == ELS_LS_RJT) { + shost_printk(KERN_INFO, lport->host, + "Flogi Request Rejected by Switch\n"); + return 1; + } + shost_printk(KERN_INFO, lport->host, + "Flogi Request Accepted by Switch\n"); + } + return 0; +} + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct sk_buff *skb; + char *eth_fr; + int fr_len; + struct fip_vlan *vlan; + u64 vlan_tov; + + fnic_fcoe_reset_vlans(fnic); + fnic->set_vlan(fnic, 0); + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Sending VLAN request...\n"); + skb = dev_alloc_skb(sizeof(struct fip_vlan)); + if (!skb) + return; + + fr_len = sizeof(*vlan); + eth_fr = (char *)skb->data; + vlan = (struct fip_vlan *)eth_fr; + + memset(vlan, 0, sizeof(*vlan)); + memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); + vlan->eth.h_proto = htons(ETH_P_FIP); + + vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + vlan->fip.fip_op = htons(FIP_OP_VLAN); + vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); + + vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; + memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); + + skb_put(skb, sizeof(*vlan)); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + fip->send(fip, skb); + + /* set a timer so that we can retry if there no response */ + vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); + mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); +} + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct fip_header *fiph; + struct fip_desc *desc; + u16 vid; + size_t rlen; + size_t dlen; + struct fcoe_vlan *vlan; + u64 sol_time; + unsigned long flags; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response...\n"); + + fiph = (struct fip_header *) skb->data; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", + ntohs(fiph->fip_op), fiph->fip_subcode); + + rlen = ntohs(fiph->fip_dl_len) * 4; + fnic_fcoe_reset_vlans(fnic); + spin_lock_irqsave(&fnic->vlans_lock, flags); + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_VLAN: + vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); + shost_printk(KERN_INFO, fnic->lport->host, + "process_vlan_resp: FIP VLAN %d\n", vid); + vlan = kmalloc(sizeof(*vlan), + GFP_ATOMIC); + if (!vlan) { + /* retry from timer */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + goto out; + } + memset(vlan, 0, sizeof(struct fcoe_vlan)); + vlan->vid = vid & 0x0fff; + vlan->state = FIP_VLAN_AVAIL; + list_add_tail(&vlan->list, &fnic->vlans); + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + /* any VLAN descriptors present ? */ + if (list_empty(&fnic->vlans)) { + /* retry from timer */ + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "No VLAN descriptors in FIP VLAN response\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + goto out; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count++; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(fip); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +out: + return; +} + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + u64 sol_time; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count = 1; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(&fnic->ctlr); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +} + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) +{ + unsigned long flags; + struct fcoe_vlan *fvlan; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; + } + + fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + if (fvlan->state == FIP_VLAN_USED) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + + if (fvlan->state == FIP_VLAN_SENT) { + fvlan->state = FIP_VLAN_USED; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; +} + +static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) +{ + struct fnic_event *fevt; + unsigned long flags; + + fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); + if (!fevt) + return; + + fevt->fnic = fnic; + fevt->event = ev; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&fevt->list, &fnic->evlist); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + schedule_work(&fnic->event_work); +} + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) +{ + struct fip_header *fiph; + int ret = 1; + u16 op; + u8 sub; + + if (!skb || !(skb->data)) + return -1; + + if (skb_linearize(skb)) + goto drop; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) + goto drop; + + if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) + goto drop; + + if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { + if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) + goto drop; + /* pass it on to fcoe */ + ret = 1; + } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) { + /* set the vlan as used */ + fnic_fcoe_process_vlan_resp(fnic, skb); + ret = 0; + } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { + /* received CVL request, restart vlan disc */ + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + /* pass it on to fcoe */ + ret = 1; + } +drop: + return ret; +} + +void fnic_handle_fip_frame(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); + unsigned long flags; + struct sk_buff *skb; + struct ethhdr *eh; + + while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb(skb); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + skb_queue_head(&fnic->fip_frame_queue, skb); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + eh = (struct ethhdr *)skb->data; + if (eh->h_proto == htons(ETH_P_FIP)) { + skb_pull(skb, sizeof(*eh)); + if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { + dev_kfree_skb(skb); + continue; + } + /* + * If there's FLOGI rejects - clear all + * fcf's & restart from scratch + */ + if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { + shost_printk(KERN_INFO, fnic->lport->host, + "Trigger a Link down - VLAN Disc\n"); + fcoe_ctlr_link_down(&fnic->ctlr); + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + dev_kfree_skb(skb); + continue; + } + fcoe_ctlr_recv(&fnic->ctlr, skb); + continue; + } + } +} + /** * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. * @fnic: fnic instance. @@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) skb_reset_mac_header(skb); } if (eh->h_proto == htons(ETH_P_FIP)) { - skb_pull(skb, sizeof(*eh)); - fcoe_ctlr_recv(&fnic->ctlr, skb); + if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { + printk(KERN_ERR "Dropped FIP frame, as firmware " + "uses non-FIP mode, Enable FIP " + "using UCSM\n"); + goto drop; + } + skb_queue_tail(&fnic->fip_frame_queue, skb); + queue_work(fnic_fip_queue, &fnic->fip_frame_work); return 1; /* let caller know packet was used */ } if (eh->h_proto != htons(ETH_P_FCOE)) @@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) dev_kfree_skb(fp_skb(fp)); buf->os_buf = NULL; } + +void fnic_fcoe_reset_vlans(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + struct fcoe_vlan *next; + + /* + * indicate a link down to fcoe so that all fcf's are free'd + * might not be required since we did this before sending vlan + * discovery request + */ + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (!list_empty(&fnic->vlans)) { + list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { + list_del(&vlan->list); + kfree(vlan); + } + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); +} + +void fnic_handle_fip_timer(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + u64 sol_time; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->ctlr.mode == FIP_ST_NON_FIP) + return; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + /* no vlans available, try again */ + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + shost_printk(KERN_DEBUG, fnic->lport->host, + "fip_timer: vlan %d state %d sol_count %d\n", + vlan->vid, vlan->state, vlan->sol_count); + switch (vlan->state) { + case FIP_VLAN_USED: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "FIP VLAN is selected for FC transaction\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + break; + case FIP_VLAN_FAILED: + /* if all vlans are in failed state, restart vlan disc */ + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + break; + case FIP_VLAN_SENT: + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + /* + * no response on this vlan, remove from the list. + * Try the next vlan + */ + shost_printk(KERN_INFO, fnic->lport->host, + "Dequeue this VLAN ID %d from list\n", + vlan->vid); + list_del(&vlan->list); + kfree(vlan); + vlan = NULL; + if (list_empty(&fnic->vlans)) { + /* we exhausted all vlans, restart vlan disc */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + shost_printk(KERN_INFO, fnic->lport->host, + "fip_timer: vlan list empty, " + "trigger vlan disc\n"); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + /* check the next vlan */ + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, + list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + vlan->sol_count++; + sol_time = jiffies + msecs_to_jiffies + (FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + break; + } +} diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h new file mode 100644 index 000000000000..87e74c2ab971 --- /dev/null +++ b/drivers/scsi/fnic/fnic_fip.h @@ -0,0 +1,68 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _FNIC_FIP_H_ +#define _FNIC_FIP_H_ + + +#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */ +#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */ +#define FCOE_CTLR_MAX_SOL 8 + +#define FINC_MAX_FLOGI_REJECTS 8 + +/* + * FIP_DT_VLAN descriptor. + */ +struct fip_vlan_desc { + struct fip_desc fd_desc; + __be16 fd_vlan; +} __attribute__((packed)); + +struct vlan { + __be16 vid; + __be16 type; +}; + +/* + * VLAN entry. + */ +struct fcoe_vlan { + struct list_head list; + u16 vid; /* vlan ID */ + u16 sol_count; /* no. of sols sent */ + u16 state; /* state */ +}; + +enum fip_vlan_state { + FIP_VLAN_AVAIL = 0, /* don't do anything */ + FIP_VLAN_SENT = 1, /* sent */ + FIP_VLAN_USED = 2, /* succeed */ + FIP_VLAN_FAILED = 3, /* failed to response */ +}; + +struct fip_vlan { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + } desc; +}; + +#endif /* __FINC_FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index d601ac543c52..5f09d1814d26 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -39,6 +39,7 @@ #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" +#include "fnic_fip.h" #include "fnic.h" #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 @@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data) round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); } +static void fnic_fip_notify_timer(unsigned long data) +{ + struct fnic *fnic = (struct fnic *)data; + + fnic_handle_fip_timer(fnic); +} + static void fnic_notify_timer_start(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { @@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport) return fnic->data_src_addr; } +static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) +{ + u16 old_vlan; + old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id); +} + static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *host; @@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + fnic->set_vlan = fnic_set_vlan; fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); + setup_timer(&fnic->fip_timer, fnic_fip_notify_timer, + (unsigned long)fnic); + spin_lock_init(&fnic->vlans_lock); + INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); + INIT_WORK(&fnic->event_work, fnic_handle_event); + skb_queue_head_init(&fnic->fip_frame_queue); + spin_lock_irqsave(&fnic_list_lock, flags); + if (!fnic_fip_queue) { + fnic_fip_queue = + create_singlethread_workqueue("fnic_fip_q"); + if (!fnic_fip_queue) { + spin_unlock_irqrestore(&fnic_list_lock, flags); + printk(KERN_ERR PFX "fnic FIP work queue " + "create failed\n"); + err = -ENOMEM; + goto err_out_free_max_pool; + } + } + spin_unlock_irqrestore(&fnic_list_lock, flags); + INIT_LIST_HEAD(&fnic->evlist); + INIT_LIST_HEAD(&fnic->vlans); } else { shost_printk(KERN_INFO, fnic->lport->host, "firmware uses non-FIP mode\n"); @@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev) skb_queue_purge(&fnic->frame_queue); skb_queue_purge(&fnic->tx_queue); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + del_timer_sync(&fnic->fip_timer); + skb_queue_purge(&fnic->fip_frame_queue); + fnic_fcoe_reset_vlans(fnic); + fnic_fcoe_evlist_free(fnic); + } + /* * Log off the fabric. This stops all remote ports, dns port, * logs off the fabric. This flushes all rport, disc, lport work @@ -889,8 +932,8 @@ static int __init fnic_init_module(void) len = sizeof(struct fnic_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, - SLAB_HWCACHE_ALIGN, - NULL); + SLAB_HWCACHE_ALIGN, + NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); err = -ENOMEM; @@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void) { pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); + if (fnic_fip_queue) { + flush_workqueue(fnic_fip_queue); + destroy_workqueue(fnic_fip_queue); + } kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index b576be734e2e..9795d6f3e197 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg) return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); } +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan) +{ + u64 a0 = new_default_vlan, a1 = 0; + int wait = 1000; + int old_vlan = 0; + + old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait); + return (u16)old_vlan; +} + int vnic_dev_link_status(struct vnic_dev *vdev) { if (vdev->linkstatus) diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h index f9935a8a5a09..40d4195f562b 100644 --- a/drivers/scsi/fnic/vnic_dev.h +++ b/drivers/scsi/fnic/vnic_dev.h @@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev); int vnic_dev_open(struct vnic_dev *vdev, int arg); int vnic_dev_open_done(struct vnic_dev *vdev, int *done); int vnic_dev_init(struct vnic_dev *vdev, int arg); +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, + u16 new_default_vlan); int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); void vnic_dev_set_intr_mode(struct vnic_dev *vdev, diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h index 7c9ccbd4134b..3e2fcbda6aed 100644 --- a/drivers/scsi/fnic/vnic_devcmd.h +++ b/drivers/scsi/fnic/vnic_devcmd.h @@ -196,6 +196,73 @@ enum vnic_devcmd_cmd { /* undo initialize of virtual link */ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* check fw capability of a cmd: + * in: (u32)a0=cmd + * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* persistent binding info + * in: (u64)a0=paddr of arg + * (u32)a1=CMD_PERBI_XXX */ + CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37), + + /* Interrupt Assert Register functionality + * in: (u16)a0=interrupt number to assert + */ + CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), + + /* initiate hangreset, like softreset after hang detected */ + CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39), + + /* hangreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40), + + /* + * Set hw ingress packet vlan rewrite mode: + * in: (u32)a0=new vlan rewrite mode + * out: (u32)a0=old vlan rewrite mode */ + CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41), + + /* + * in: (u16)a0=bdf of target vnic + * (u32)a1=cmd to proxy + * a2-a15=args to cmd in a1 + * out: (u32)a0=status of proxied cmd + * a1-a15=out args of proxied cmd */ + CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), + + /* + * As for BY_BDF except a0 is index of hvnlink subordinate vnic + * or SR-IOV virtual vnic + */ + CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), + + /* + * For HPP toggle: + * adapter-info-get + * in: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=size of buffer specified in a0. + * out: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or + * 0 if no VIF-CONFIG-INFO TLV was ever received. */ + CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), + + /* + * INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx + */ + CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45), + + /* + * Set default vlan: + * in: (u16)a0=new default vlan + * (u16)a1=zero for overriding vlan with param a0, + * non-zero for resetting vlan to the default + * out: (u16)a0=old default vlan + */ + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46) }; /* flags for CMD_OPEN */ diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index cc82d0f322b6..4e31caa21ddf 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) return 0; } - if (vhost->state == IBMVFC_ACTIVE) { + if (vhost->logged_in) { evt = ibmvfc_get_event(vhost); ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); @@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) tmf->common.length = sizeof(*tmf); tmf->scsi_id = rport->port_id; int_to_scsilun(sdev->lun, &tmf->lun); - tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS)) + type &= ~IBMVFC_TMF_SUPPRESS_ABTS; + if (vhost->state == IBMVFC_ACTIVE) + tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + else + tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID); tmf->cancel_key = (unsigned long)sdev->hostdata; tmf->my_cancel_key = (unsigned long)starget->hostdata; @@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) timeout = wait_for_completion_timeout(&evt->comp, timeout); if (!timeout) { - rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + rc = ibmvfc_cancel_all(sdev, 0); if (!rc) { rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); if (rc == SUCCESS) @@ -2383,24 +2388,30 @@ out: * @cmd: scsi command to abort * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); - int cancel_rc, abort_rc; + int cancel_rc, block_rc; int rc = FAILED; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); - abort_rc = ibmvfc_abort_task_set(sdev); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + ibmvfc_abort_task_set(sdev); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); - if (!cancel_rc && !abort_rc) + if (!cancel_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } @@ -2410,29 +2421,47 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) * @cmd: scsi command struct * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); - int cancel_rc, reset_rc; + int cancel_rc, block_rc, reset_rc = 0; int rc = FAILED; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } /** + * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function + * @sdev: scsi device struct + * @data: return code + * + **/ +static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data) +{ + unsigned long *rc = data; + *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); +} + +/** * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function * @sdev: scsi device struct * @data: return code @@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) * @cmd: scsi command struct * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); struct scsi_target *starget = scsi_target(sdev); - int reset_rc; + int block_rc; + int reset_rc = 0; int rc = FAILED; unsigned long cancel_rc = 0; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + if (block_rc != FAST_IO_FAIL) { + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + } else + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } @@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) **/ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) { - int rc; + int rc, block_rc; struct ibmvfc_host *vhost = shost_priv(cmd->device->host); - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); dev_err(vhost->dev, "Resetting connection due to error recovery\n"); rc = ibmvfc_issue_fc_host_lip(vhost->host); + + if (block_rc == FAST_IO_FAIL) + return FAST_IO_FAIL; + return rc ? FAILED : SUCCESS; } @@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) dev_rport = starget_to_rport(scsi_target(sdev)); if (dev_rport != rport) continue; - ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); - ibmvfc_abort_task_set(sdev); + ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); } rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 3be8af624e6f..017a5290e8c1 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.10" -#define IBMVFC_DRIVER_DATE "(August 24, 2012)" +#define IBMVFC_DRIVER_VERSION "1.0.11" +#define IBMVFC_DRIVER_DATE "(April 12, 2013)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 @@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp { u16 error; u32 flags; #define IBMVFC_NATIVE_FC 0x01 -#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 reserved; u64 capabilities; #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 +#define IBMVFC_CAN_SUPPRESS_ABTS 0x10 u32 max_cmds; u32 scsi_id_sz; u64 max_dma_len; @@ -351,6 +351,7 @@ struct ibmvfc_tmf { #define IBMVFC_TMF_LUN_RESET 0x10 #define IBMVFC_TMF_TGT_RESET 0x20 #define IBMVFC_TMF_LUA_VALID 0x40 +#define IBMVFC_TMF_SUPPRESS_ABTS 0x80 u32 cancel_key; u32 my_cancel_key; u32 pad; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 2197b57fb225..82a3c1ec8706 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd) ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (!ioa_cfg->in_reset_reload) { + if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); dev_err(&ioa_cfg->pdev->dev, "Adapter being reset as a result of error recovery.\n"); @@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, { u32 ioadl_flags = 0; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; struct ipr_ioadl64_desc *last_ioadl64 = NULL; int len = qc->nbytes; struct scatterlist *sg; @@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); ioarcb->u.sis64_addr_data.data_ioadl_addr = - cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64)); for_each_sg(qc->sg, sg, qc->n_elem, si) { ioadl64->flags = cpu_to_be32(ioadl_flags); @@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int i; ENTER; if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { @@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) ioa_cfg->in_reset_reload = 0; ioa_cfg->reset_retries = 0; + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].ioa_is_dead = 1; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); wake_up_all(&ioa_cfg->reset_wait_q); LEAVE; @@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev) spin_lock_irqsave(ioa_cfg->host->host_lock, flags); if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) ioa_cfg->sdt_state = ABORT_DUMP; - ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; + ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; ioa_cfg->in_ioa_bringdown = 1; for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 21a6ff1ed5c6..a1fb840596ef 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs { /* 22 bytes */ u8 hob_lbam; u8 hob_lbah; u8 ctl; -}__attribute__ ((packed, aligned(4))); +}__attribute__ ((packed, aligned(2))); struct ipr_ioadl_desc { __be32 flags_and_data_len; diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index c3aa6c5457b9..96a26f454673 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s struct isci_host *ihost = idev->owning_port->owning_controller; struct domain_device *dev = idev->domain_dev; - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); } else if (dev_is_expander(dev)) { sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); @@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct domain_device *dev = idev->domain_dev; - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { struct isci_host *ihost = idev->owning_port->owning_controller; isci_remote_device_not_ready(ihost, idev, diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 7674caae1d88..47a013fffae7 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte static inline bool dev_is_expander(struct domain_device *dev) { - return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; + return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE; } static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 9594ab62702b..e3e3bcbd5a9f 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm) /* all unaccelerated request types (non ssp or ncq) handled with * substates */ - if (!task && dev->dev_type == SAS_END_DEV) { + if (!task && dev->dev_type == SAS_END_DEVICE) { state = SCI_REQ_TASK_WAIT_TC_COMP; } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { state = SCI_REQ_SMP_WAIT_RESP; @@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost, if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; - if (dev->dev_type == SAS_END_DEV) + if (dev->dev_type == SAS_END_DEVICE) /* pass */; else if (dev_is_sata(dev)) memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); @@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost, /* Build the common part of the request */ sci_general_request_construct(ihost, idev, ireq); - if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { + if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { set_bit(IREQ_TMF, &ireq->flags); memset(ireq->tc, 0, sizeof(struct scu_task_context)); diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index b6f19a1db780..9bb020ac089c 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, } /* XXX convert to get this from task->tproto like other drivers */ - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { isci_tmf->proto = SAS_PROTOCOL_SSP; status = sci_task_request_construct_ssp(ireq); if (status != SCI_SUCCESS) diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index bdb81cda8401..161c98efade9 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) if (phy->attached_tproto & SAS_PROTOCOL_STP) dev->tproto = phy->attached_tproto; if (phy->attached_sata_dev) - dev->tproto |= SATA_DEV; + dev->tproto |= SAS_SATA_DEV; - if (phy->attached_dev_type == SATA_PENDING) - dev->dev_type = SATA_PENDING; + if (phy->attached_dev_type == SAS_SATA_PENDING) + dev->dev_type = SAS_SATA_PENDING; else { int res; - dev->dev_type = SATA_DEV; + dev->dev_type = SAS_SATA_DEV; res = sas_get_report_phy_sata(dev->parent, phy->phy_id, &dev->sata_dev.rps_resp); if (res) { @@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy) int res; /* we weren't pending, so successfully end the reset sequence now */ - if (dev->dev_type != SATA_PENDING) + if (dev->dev_type != SAS_SATA_PENDING) return 1; /* hmmm, if this succeeds do we need to repost the domain_device to the @@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link) return 0; switch (ex_phy->attached_dev_type) { - case SATA_PENDING: + case SAS_SATA_PENDING: return 0; - case SAS_END_DEV: + case SAS_END_DEVICE: if (ex_phy->attached_sata_dev) return sas_ata_clear_pending(dev, ex_phy); default: @@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev) struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; - if (dev->dev_type == SATA_PENDING) + if (dev->dev_type == SAS_SATA_PENDING) return; if ((fis->sector_count == 1 && /* ATA */ @@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev) { int res; - if (dev->dev_type == SATA_PM) + if (dev->dev_type == SAS_SATA_PM) return -ENODEV; sas_get_ata_command_set(dev); diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index a0c3003e0c7d..62b58d38ce2e 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -39,11 +39,11 @@ void sas_init_dev(struct domain_device *dev) { switch (dev->dev_type) { - case SAS_END_DEV: + case SAS_END_DEVICE: INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node); break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: INIT_LIST_HEAD(&dev->ex_dev.children); mutex_init(&dev->ex_dev.cmd_mutex); break; @@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port) if (fis->interrupt_reason == 1 && fis->lbal == 1 && fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 && (fis->device & ~0x10) == 0) - dev->dev_type = SATA_PM; + dev->dev_type = SAS_SATA_PM; else - dev->dev_type = SATA_DEV; + dev->dev_type = SAS_SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; } else { struct sas_identify_frame *id = @@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port) dev->port = port; switch (dev->dev_type) { - case SATA_DEV: + case SAS_SATA_DEV: rc = sas_ata_init(dev); if (rc) { rphy = NULL; break; } /* fall through */ - case SAS_END_DEV: + case SAS_END_DEVICE: rphy = sas_end_device_alloc(port->port); break; - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(port->port, SAS_EDGE_EXPANDER_DEVICE); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(port->port, SAS_FANOUT_EXPANDER_DEVICE); break; @@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port) dev->rphy = rphy; get_device(&dev->rphy->dev); - if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) + if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE) list_add_tail(&dev->disco_list_node, &port->disco_list); else { spin_lock_irq(&port->dev_list_lock); @@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref) dev->phy = NULL; /* remove the phys and ports, everything else should be gone */ - if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) kfree(dev->ex_dev.ex_phy); if (dev_is_sata(dev) && dev->sata_dev.ap) { @@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d spin_unlock_irq(&port->dev_list_lock); spin_lock_irq(&ha->lock); - if (dev->dev_type == SAS_END_DEV && + if (dev->dev_type == SAS_END_DEVICE && !list_empty(&dev->ssp_dev.eh_list_node)) { list_del_init(&dev->ssp_dev.eh_list_node); ha->eh_active--; @@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work) task_pid_nr(current)); switch (dev->dev_type) { - case SAS_END_DEV: + case SAS_END_DEVICE: error = sas_discover_end_dev(dev); break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: error = sas_discover_root_expander(dev); break; - case SATA_DEV: - case SATA_PM: + case SAS_SATA_DEV: + case SAS_SATA_PM: #ifdef CONFIG_SCSI_SAS_ATA error = sas_discover_sata(dev); break; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f42b0e15410f..446b85110a1f 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) } } -static enum sas_dev_type to_dev_type(struct discover_resp *dr) +static enum sas_device_type to_dev_type(struct discover_resp *dr) { /* This is detecting a failure to transmit initial dev to host * FIS as described in section J.5 of sas-2 r16 */ - if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev && + if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev && dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) - return SATA_PENDING; + return SAS_SATA_PENDING; else return dr->attached_dev_type; } static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) { - enum sas_dev_type dev_type; + enum sas_device_type dev_type; enum sas_linkrate linkrate; u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; @@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) /* Handle vacant phy - rest of dr data is not valid so skip it */ if (phy->phy_state == PHY_VACANT) { memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); - phy->attached_dev_type = NO_DEVICE; + phy->attached_dev_type = SAS_PHY_UNUSED; if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { phy->phy_id = phy_id; goto skip; @@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) /* help some expanders that fail to zero sas_address in the 'no * device' case */ - if (phy->attached_dev_type == NO_DEVICE || + if (phy->attached_dev_type == SAS_PHY_UNUSED || phy->linkrate < SAS_LINK_RATE_1_5_GBPS) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else @@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) out: switch (phy->attached_dev_type) { - case SATA_PENDING: + case SAS_SATA_PENDING: type = "stp pending"; break; - case NO_DEVICE: + case SAS_PHY_UNUSED: type = "no device"; break; - case SAS_END_DEV: + case SAS_END_DEVICE: if (phy->attached_iproto) { if (phy->attached_tproto) type = "host+target"; @@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) type = "ssp"; } break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: type = "smp"; break; default: @@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev( } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { - child->dev_type = SAS_END_DEV; + child->dev_type = SAS_END_DEVICE; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) @@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander( switch (phy->attached_dev_type) { - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; @@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); - if (ex_phy->attached_dev_type == NO_DEVICE) { + if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); @@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; - if (ex_phy->attached_dev_type != SAS_END_DEV && - ex_phy->attached_dev_type != FANOUT_DEV && - ex_phy->attached_dev_type != EDGE_DEV && - ex_phy->attached_dev_type != SATA_PENDING) { + if (ex_phy->attached_dev_type != SAS_END_DEVICE && + ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_SATA_PENDING) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), @@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } switch (ex_phy->attached_dev_type) { - case SAS_END_DEV: - case SATA_PENDING: + case SAS_END_DEVICE: + case SAS_SATA_PENDING: child = sas_ex_discover_end_dev(dev, phy_id); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", @@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; default: @@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) phy->phy_state == PHY_NOT_PRESENT) continue; - if ((phy->attached_dev_type == EDGE_DEV || - phy->attached_dev_type == FANOUT_DEV) && + if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE || + phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); @@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev) u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { - if (child->dev_type != EDGE_DEV && - child->dev_type != FANOUT_DEV) + if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE && + child->dev_type != SAS_FANOUT_EXPANDER_DEVICE) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); @@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev) int i; u8 *sub_sas_addr = NULL; - if (dev->dev_type != EDGE_DEV) + if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE) return 0; for (i = 0; i < ex->num_phys; i++) { @@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev) phy->phy_state == PHY_NOT_PRESENT) continue; - if ((phy->attached_dev_type == FANOUT_DEV || - phy->attached_dev_type == EDGE_DEV) && + if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE || + phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) @@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *child_phy) { static const char *ex_type[] = { - [EDGE_DEV] = "edge", - [FANOUT_DEV] = "fanout", + [SAS_EDGE_EXPANDER_DEVICE] = "edge", + [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", }; struct domain_device *parent = child->parent; @@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child) if (!child->parent) return 0; - if (child->parent->dev_type != EDGE_DEV && - child->parent->dev_type != FANOUT_DEV) + if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE && + child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE) return 0; parent_ex = &child->parent->ex_dev; @@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child) child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { - case EDGE_DEV: - if (child->dev_type == FANOUT_DEV) { + case SAS_EDGE_EXPANDER_DEVICE: + if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); @@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child) } } break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); @@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { - if (dev->dev_type == EDGE_DEV || - dev->dev_type == FANOUT_DEV) { + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); @@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev, } static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, - u8 *sas_addr, enum sas_dev_type *type) + u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_resp *disc_resp; @@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { - if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { + if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { res = sas_find_bcast_dev(ch, src_dev); if (*src_dev) return res; @@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi list_for_each_entry_safe(child, n, &ex->children, siblings) { set_bit(SAS_DEV_GONE, &child->state); - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(port, child); else sas_unregister_dev(port, child); @@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { set_bit(SAS_DEV_GONE, &child->state); - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(parent->port, child); else sas_unregister_dev(parent->port, child); @@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root, int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) { + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); @@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) res = sas_discover_bfs_by_root(child); break; } @@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) return res; } -static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old) +static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old) { if (old == new) return true; /* treat device directed resets as flutter, if we went - * SAS_END_DEV to SATA_PENDING the link needs recovery + * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery */ - if ((old == SATA_PENDING && new == SAS_END_DEV) || - (old == SAS_END_DEV && new == SATA_PENDING)) + if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) || + (old == SAS_END_DEVICE && new == SAS_SATA_PENDING)) return true; return false; @@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; - enum sas_dev_type type = NO_DEVICE; + enum sas_device_type type = SAS_PHY_UNUSED; u8 sas_addr[8]; int res; @@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) sas_ex_phy_discover(dev, phy_id); - if (ata_dev && phy->attached_dev_type == SATA_PENDING) + if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 1de67964e5a1..7e7ba83f0a21 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev, rphy->identify.initiator_port_protocols = dev->iproto; rphy->identify.target_port_protocols = dev->tproto; switch (dev->dev_type) { - case SATA_DEV: + case SAS_SATA_DEV: /* FIXME: need sata device type */ - case SAS_END_DEV: - case SATA_PENDING: + case SAS_END_DEVICE: + case SAS_SATA_PENDING: rphy->identify.device_type = SAS_END_DEVICE; break; - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE; break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE; break; default: diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 1398b714c018..d3c5297c6c89 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy) continue; } - if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { dev->ex_dev.ex_change_count = -1; for (i = 0; i < dev->ex_dev.num_phys; i++) { struct ex_phy *phy = &dev->ex_dev.ex_phy[i]; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 7706c99ec8bb..bcc56cac4fd8 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -46,10 +46,15 @@ struct lpfc_sli2_slim; #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi cmnd for menlo needs nearly twice as for firmware downloads using bsg */ -#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ + +#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ +#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ +#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ + #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ -#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ @@ -66,8 +71,10 @@ struct lpfc_sli2_slim; * queue depths when there are driver resource error or Firmware * resource error. */ -#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */ -#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */ +/* 1 Second */ +#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) +/* 5 minutes */ +#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300)) /* Number of exchanges reserved for discovery to complete */ #define LPFC_DISC_IOCB_BUFF_COUNT 20 @@ -671,6 +678,7 @@ struct lpfc_hba { uint32_t lmt; uint32_t fc_topology; /* link topology, from LINK INIT */ + uint32_t fc_topology_changed; /* link topology, from LINK INIT */ struct lpfc_stats fc_stat; @@ -701,9 +709,11 @@ struct lpfc_hba { uint32_t cfg_poll_tmo; uint32_t cfg_use_msi; uint32_t cfg_fcp_imax; + uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_wq_count; uint32_t cfg_fcp_eq_count; uint32_t cfg_fcp_io_channel; + uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -804,8 +814,10 @@ struct lpfc_hba { uint64_t bg_reftag_err_cnt; /* fastpath list. */ - spinlock_t scsi_buf_list_lock; - struct list_head lpfc_scsi_buf_list; + spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */ + spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */ + struct list_head lpfc_scsi_buf_list_get; + struct list_head lpfc_scsi_buf_list_put; uint32_t total_scsi_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 9290713af253..3c5625b8b1f4 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) int i; int rc; + if (phba->pport->fc_flag & FC_OFFLINE_MODE) + return 0; + init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); @@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba) int status = 0; int rc; - if (!phba->cfg_enable_hba_reset) + if ((!phba->cfg_enable_hba_reset) || + (phba->pport->fc_flag & FC_OFFLINE_MODE)) return -EACCES; status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); @@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) @@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3054 lpfc_topology changed from %d to %d\n", prev_val, val); + if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) + phba->fc_topology_changed = 1; err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_topology = prev_val; @@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, lpfc_fcp_imax_show, lpfc_fcp_imax_store); +/** + * lpfc_state_show - Display current driver CPU affinity + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains text describing the state of the link. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vector_map_info *cpup; + int idx, len = 0; + + if ((phba->sli_rev != LPFC_SLI_REV4) || + (phba->intr_type != MSIX)) + return len; + + switch (phba->cfg_fcp_cpu_map) { + case 0: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: No mapping (%d)\n", + phba->cfg_fcp_cpu_map); + return len; + case 1: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: HBA centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + case 2: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: Driver centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + } + + cpup = phba->sli4_hba.cpu_map; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d\n", + idx, cpup->channel_id, cpup->phys_id, + cpup->core_id); + else + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d IRQ %d\n", + idx, cpup->channel_id, cpup->phys_id, + cpup->core_id, cpup->irq); + + cpup++; + } + return len; +} + +/** + * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: one or more lpfc_polling_flags values. + * @count: not used. + * + * Returns: + * -EINVAL - Not implemented yet. + **/ +static ssize_t +lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int status = -EINVAL; + return status; +} + +/* +# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors +# for the HBA. +# +# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2). +# 0 - Do not affinitze IRQ vectors +# 1 - Affintize HBA vectors with respect to each HBA +# (start with CPU0 for each HBA) +# 2 - Affintize HBA vectors with respect to the entire driver +# (round robin thru all CPUs across all HBAs) +*/ +static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; +module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_fcp_cpu_map, + "Defines how to map CPUs to IRQ vectors per HBA"); + +/** + * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [0-2], then affinitze the adapter's + * MSIX vectors. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) +{ + if (phba->sli_rev != LPFC_SLI_REV4) { + phba->cfg_fcp_cpu_map = 0; + return 0; + } + + if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { + phba->cfg_fcp_cpu_map = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3326 fcp_cpu_map: %d out of range, using default\n", + val); + phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; + + return 0; +} + +static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR, + lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store); + /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. @@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); # 0 = disabled (default) # 1 = enabled # Value range is [0,1]. Default value is 0. +# +# This feature in under investigation and may be supported in the future. */ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; -module_param(lpfc_fcp_look_ahead, uint, S_IRUGO); -MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions"); - /* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the @@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery, /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count - * This value can be set to values between 64 and 256. The default value is + * This value can be set to values between 64 and 4096. The default value is * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). + * Because of the additional overhead involved in setting up T10-DIF, + * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 + * and will be limited to 512 if BlockGuard is enabled under SLI3. */ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); -LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, - LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, - "Max Protection Scatter Gather Segment Count"); +/* + * This parameter will be depricated, the driver cannot limit the + * protection data s/g list. + */ +LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, + LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, + "Max Protection Scatter Gather Segment Count"); struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_bg_info, @@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, &dev_attr_lpfc_fcp_imax, + &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_wq_count, &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_fcp_io_channel, @@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); + lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 888666892004..094be2cad65b 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, unsigned int transfer_bytes, bytes_copied = 0; unsigned int sg_offset, dma_offset; unsigned char *dma_address, *sg_address; - struct scatterlist *sgel; LIST_HEAD(temp_list); - + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + bool sg_valid; list_splice_init(&dma_buffers->list, &temp_list); list_add(&dma_buffers->list, &temp_list); sg_offset = 0; - sgel = bsg_buffers->sg_list; + if (to_buffers) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, + sg_flags); + local_irq_save(flags); + sg_valid = sg_miter_next(&miter); list_for_each_entry(mp, &temp_list, list) { dma_offset = 0; - while (bytes_to_transfer && sgel && + while (bytes_to_transfer && sg_valid && (dma_offset < LPFC_BPL_SIZE)) { dma_address = mp->virt + dma_offset; if (sg_offset) { /* Continue previous partial transfer of sg */ - sg_address = sg_virt(sgel) + sg_offset; - transfer_bytes = sgel->length - sg_offset; + sg_address = miter.addr + sg_offset; + transfer_bytes = miter.length - sg_offset; } else { - sg_address = sg_virt(sgel); - transfer_bytes = sgel->length; + sg_address = miter.addr; + transfer_bytes = miter.length; } if (bytes_to_transfer < transfer_bytes) transfer_bytes = bytes_to_transfer; @@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, sg_offset += transfer_bytes; bytes_to_transfer -= transfer_bytes; bytes_copied += transfer_bytes; - if (sg_offset >= sgel->length) { + if (sg_offset >= miter.length) { sg_offset = 0; - sgel = sg_next(sgel); + sg_valid = sg_miter_next(&miter); } } } + sg_miter_stop(&miter); + local_irq_restore(flags); list_del_init(&dma_buffers->list); list_splice(&temp_list, &dma_buffers->list); return bytes_copied; @@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) cmdiocbq->context1 = dd_data; cmdiocbq->context2 = cmp; cmdiocbq->context3 = bmp; + cmdiocbq->context_un.ndlp = ndlp; dd_data->type = TYPE_IOCB; dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; @@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, ctiocb->context1 = dd_data; ctiocb->context2 = cmp; ctiocb->context3 = bmp; + ctiocb->context_un.ndlp = ndlp; ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; dd_data->type = TYPE_IOCB; @@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { @@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 7631893ae005..d41456e5f814 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); +void lpfc_sli4_offline_eratt(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 7bff3a19af56..ae1a07c57cae 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) if (init_utsname()->nodename[0] != '\0') lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, jiffies + + msecs_to_jiffies(1000 * 60)); } return; } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index bbed8471bf0b..3cae0a92e8bd 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -29,6 +29,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, icmd->un.elsreq64.remoteID = did; /* DID */ icmd->ulpCommand = CMD_ELS_REQUEST64_CR; - icmd->ulpTimeout = phba->fc_ratov * 2; + if (elscmd == ELS_CMD_FLOGI) + icmd->ulpTimeout = FF_DEF_RATOV * 2; + else + icmd->ulpTimeout = phba->fc_ratov * 2; } else { icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); @@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, /* Xmit ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0116 Xmit ELS command x%x to remote " - "NPORT x%x I/O tag: x%x, port state: x%x\n", + "NPORT x%x I/O tag: x%x, port state:x%x" + " fc_flag:x%x\n", elscmd, did, elsiocb->iotag, - vport->port_state); + vport->port_state, + vport->fc_flag); } else { /* Xmit ELS response <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0117 Xmit ELS response x%x to remote " - "NPORT x%x I/O tag: x%x, size: x%x\n", + "NPORT x%x I/O tag: x%x, size: x%x " + "port_state x%x fc_flag x%x\n", elscmd, ndlp->nlp_DID, elsiocb->iotag, - cmdSize); + cmdSize, vport->port_state, + vport->fc_flag); } return elsiocb; @@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PT2PT; spin_unlock_irq(shost->host_lock); + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + + /* The FC_VFI_REGISTERED flag will get clear in the cmpl + * handler for unreg_vfi, but if we don't force the + * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be + * built with the update bit set instead of just the vp bit to + * change the Nport ID. We need to have the vp set and the + * Upd cleared on topology changes. + */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } /* Start discovery - this should just do CLEAR_LA */ lpfc_disc_start(vport); @@ -1030,9 +1055,19 @@ stop_rr_fcf_flogi: vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; if ((phba->sli_rev == LPFC_SLI_REV4) && (!(vport->fc_flag & FC_VFI_REGISTERED) || - (vport->fc_prevDID != vport->fc_myDID))) { - if (vport->fc_flag & FC_VFI_REGISTERED) - lpfc_sli4_unreg_all_rpis(vport); + (vport->fc_prevDID != vport->fc_myDID) || + phba->fc_topology_changed)) { + if (vport->fc_flag & FC_VFI_REGISTERED) { + if (phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + } else { + lpfc_sli4_unreg_all_rpis(vport); + } + } lpfc_issue_reg_vfi(vport); lpfc_nlp_put(ndlp); goto out; @@ -1054,10 +1089,11 @@ stop_rr_fcf_flogi: /* FLOGI completes successfully */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0101 FLOGI completes successfully " - "Data: x%x x%x x%x x%x\n", + "0101 FLOGI completes successfully, I/O tag:x%x, " + "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, irsp->un.ulpWord[4], sp->cmn.e_d_tov, - sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); + sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, + vport->port_state, vport->fc_flag); if (vport->port_state == LPFC_FLOGI) { /* @@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct ls_rjt stat; uint32_t cmd, did; int rc; + uint32_t fc_flag = 0; + uint32_t port_state = 0; cmd = *lp++; sp = (struct serv_parm *) lp; @@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, * will be. */ vport->fc_myDID = PT2PT_LocalID; - } + } else + vport->fc_myDID = PT2PT_RemoteID; /* * The vport state should go to LPFC_FLOGI only * AFTER we issue a FLOGI, not receive one. */ spin_lock_irq(shost->host_lock); + fc_flag = vport->fc_flag; + port_state = vport->port_state; vport->fc_flag |= FC_PT2PT; vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->port_state = LPFC_FLOGI; spin_unlock_irq(shost->host_lock); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3311 Rcv Flogi PS x%x new PS x%x " + "fc_flag x%x new fc_flag x%x\n", + port_state, vport->port_state, + fc_flag, vport->fc_flag); /* * We temporarily set fc_myDID to make it look like we are @@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) } if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); } /** @@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* ELS command <elsCmd> received from NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0112 ELS command x%x received from NPORT x%x " - "Data: x%x\n", cmd, did, vport->port_state); + "Data: x%x x%x x%x x%x\n", + cmd, did, vport->port_state, vport->fc_flag, + vport->fc_myDID, vport->fc_prevDID); switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvPLOGI++; ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->pport->fc_flag & FC_PT2PT)) { + vport->fc_prevDID = vport->fc_myDID; + /* Our DID needs to be updated before registering + * the vfi. This is done in lpfc_rcv_plogi but + * that is called after the reg_vfi. + */ + vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3312 Remote port assigned DID x%x " + "%x\n", vport->fc_myDID, + vport->fc_prevDID); + } lpfc_send_els_event(vport, ndlp, payload); @@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; break; } + shost = lpfc_shost_from_vport(vport); if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { @@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * another NPort and the other side has initiated * the PLOGI before responding to our FLOGI. */ + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->fc_topology_changed || + vport->fc_myDID != vport->fc_prevDID)) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } } - shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; spin_unlock_irq(shost->host_lock); @@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_DISC_DELAYED) { spin_unlock_irq(shost->host_lock); + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "3334 Delay fc port discovery for %d seconds\n", + phba->fc_ratov); mod_timer(&vport->delayed_disc_tmo, - jiffies + HZ * phba->fc_ratov); + jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); return; } spin_unlock_irq(shost->host_lock); @@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) return; shost = lpfc_shost_from_vport(phba->pport); - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba) blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); /* Start a timer to unblock fabric iocbs after 100ms */ if (!blocked) - mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); + mod_timer(&phba->fabric_block_timer, + jiffies + msecs_to_jiffies(100)); return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 326e05a65a73..0f6e2548f35d 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) if (!list_empty(&evtp->evt_listp)) return; + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + spin_lock_irq(&phba->hbalock); /* We need to hold the node by incrementing the reference * count until this queued work is done */ - evtp->evt_arg1 = lpfc_nlp_get(ndlp); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); @@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); - if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && - (phba->sli_rev < LPFC_SLI_REV4)) - lpfc_issue_clear_la(phba, phba->pport); return 0; } @@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba) if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_TS_INPROG; - if (phba->pport->port_state != LPFC_FLOGI) { + if (phba->pport->port_state != LPFC_FLOGI && + phba->pport->fc_flag & FC_FABRIC) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_initial_flogi(phba->pport); @@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2836 New FCF matches in-use " - "FCF (x%x)\n", - phba->fcf.current_rec.fcf_indx); + "FCF (x%x), port_state:x%x, " + "fc_flag:x%x\n", + phba->fcf.current_rec.fcf_indx, + phba->pport->port_state, + phba->pport->fc_flag); goto out; } else lpfc_printf_log(phba, KERN_ERR, LOG_FIP, @@ -2796,7 +2798,19 @@ void lpfc_issue_init_vpi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; - int rc; + int rc, vpi; + + if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { + vpi = lpfc_alloc_vpi(vport->phba); + if (!vpi) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_MBOX, + "3303 Failed to obtain vport vpi\n"); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + vport->vpi = vpi; + } mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { @@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto out_free_mem; } - /* If the VFI is already registered, there is nothing else to do */ + /* If the VFI is already registered, there is nothing else to do + * Unless this was a VFI update and we are in PT2PT mode, then + * we should drop through to set the port state to ready. + */ if (vport->fc_flag & FC_VFI_REGISTERED) - goto out_free_mem; + if (!(phba->sli_rev == LPFC_SLI_REV4 && + vport->fc_flag & FC_PT2PT)) + goto out_free_mem; /* The VPI is implicitly registered when the VFI is registered */ spin_lock_irq(shost->host_lock); @@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto out_free_mem; } + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " + "alpacnt:%d LinkState:%x topology:%x\n", + vport->port_state, vport->fc_flag, vport->fc_myDID, + vport->phba->alpa_map[0], + phba->link_state, phba->fc_topology); + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { /* * For private loop or for NPort pt2pt, @@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ - lpfc_disc_start(vport); + if (vport->fc_flag & FC_PT2PT) + vport->port_state = LPFC_VPORT_READY; + else + lpfc_disc_start(vport); } else { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); @@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) break; } + if (phba->fc_topology && + phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3314 Toplogy changed was 0x%x is 0x%x\n", + phba->fc_topology, + bf_get(lpfc_mbx_read_top_topology, la)); + phba->fc_topology_changed = 1; + } + phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; @@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport) tmo, vport->port_state, vport->fc_flag); } - mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); + mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_TMO; spin_unlock_irq(shost->host_lock); @@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport) uint32_t clear_la_pending; int did_changed; - if (!lpfc_is_link_up(phba)) + if (!lpfc_is_link_up(phba)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3315 Link is not up %x\n", + phba->link_state); return; + } if (phba->link_state == LPFC_CLEAR_LA) clear_la_pending = 1; @@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport) if (num_sent) return; - /* Register the VPI for SLI3, NON-NPIV only. */ + /* Register the VPI for SLI3, NPIV only. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_issue_clear_la(phba, vport); lpfc_issue_reg_vpi(phba, vport); return; } @@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (vport->cfg_fdmi_on == 1) lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, + jiffies + msecs_to_jiffies(1000 * 60)); /* decrement the node reference count held for this callback * function. @@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; - int i, rc; + int i = 0, rc; /* Unregister RPIs */ if (lpfc_fcf_inuse(phba)) @@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); + if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp) + lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); + lpfc_cleanup_pending_mbox(phba->pport); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(phba->pport); + lpfc_mbx_unreg_vpi(phba->pport); + shost = lpfc_shost_from_vport(phba->pport); + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; + spin_unlock_irq(shost->host_lock); + } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index e8c476031703..83700c18f468 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type { #define BG_OP_IN_CSUM_OUT_CSUM 0x5 #define BG_OP_IN_CRC_OUT_CSUM 0x6 #define BG_OP_IN_CSUM_OUT_CRC 0x7 +#define BG_OP_RAW_MODE 0x8 struct lpfc_pde5 { uint32_t word0; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1dd2f6f0a127..713a4613ec3a 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -200,6 +200,11 @@ struct lpfc_sli_intf { #define LPFC_MAX_IMAX 5000000 #define LPFC_DEF_IMAX 50000 +#define LPFC_MIN_CPU_MAP 0 +#define LPFC_MAX_CPU_MAP 2 +#define LPFC_HBA_CPU_MAP 1 +#define LPFC_DRIVER_CPU_MAP 2 /* Default */ + /* PORT_CAPABILITIES constants. */ #define LPFC_MAX_SUPPORTED_PAGES 8 @@ -621,7 +626,7 @@ struct lpfc_register { #define lpfc_sliport_status_rdy_SHIFT 23 #define lpfc_sliport_status_rdy_MASK 0x1 #define lpfc_sliport_status_rdy_WORD word0 -#define MAX_IF_TYPE_2_RESETS 1000 +#define MAX_IF_TYPE_2_RESETS 6 #define LPFC_CTL_PORT_CTL_OFFSET 0x408 #define lpfc_sliport_ctrl_end_SHIFT 30 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 90b8b0515e23..cb465b253910 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/firmware.h> #include <linux/miscdevice.h> +#include <linux/percpu.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -58,6 +59,9 @@ char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; +/* Used when mapping IRQ vectors in a driver centric manner */ +uint16_t lpfc_used_cpu[LPFC_MAX_CPU]; + static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_verify(struct lpfc_hba *); @@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); /* Set up heart beat (HB) timer */ - mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + mod_timer(&phba->hb_tmofunc, + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, @@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; } - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_splice(&aborts, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); return 0; } @@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) !(phba->link_state == LPFC_HBA_ERROR) && !(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); return; } @@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) spin_lock_irq(&phba->pport->work_port_lock); - if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, - jiffies)) { + if (time_after(phba->last_completion_time + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (!phba->hb_outstanding) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); else mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } spin_unlock_irq(&phba->pport->work_port_lock); @@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) if (!pmboxq) { mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } @@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->mbox_mem_pool); mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } phba->skipped_hb = 0; @@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->skipped_hb = jiffies; mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } else { /* @@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) jiffies_to_msecs(jiffies - phba->last_completion_time)); mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); } } } @@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba) * This routine is called to bring a SLI4 HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ -static void +void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); @@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba) struct lpfc_vport *vport; struct lpfc_vport **vports; int i; + bool vpis_cleared = false; if (!phba) return 0; @@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba) lpfc_unblock_mgmt_io(phba); return 1; } + spin_lock_irq(&phba->hbalock); + if (!phba->sli4_hba.max_cfg_param.vpi_used) + vpis_cleared = true; + spin_unlock_irq(&phba->hbalock); } else { if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); @@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba) vports[i]->fc_flag &= ~FC_OFFLINE_MODE; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + if ((vpis_cleared) && + (vports[i]->port_type != + LPFC_PHYSICAL_PORT)) + vports[i]->vpi = 0; + } spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); @@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba) struct lpfc_iocbq *io, *io_next; spin_lock_irq(&phba->hbalock); + /* Release all the lpfc_scsi_bufs maintained by this host. */ - spin_lock(&phba->scsi_buf_list_lock); - list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { + + spin_lock(&phba->scsi_buf_list_put_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, + list) { list_del(&sb->list); pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } - spin_unlock(&phba->scsi_buf_list_lock); + spin_unlock(&phba->scsi_buf_list_put_lock); + + spin_lock(&phba->scsi_buf_list_get_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, + list) { + list_del(&sb->list); + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, + sb->dma_handle); + kfree(sb); + phba->total_scsi_bufs--; + } + spin_unlock(&phba->scsi_buf_list_get_lock); /* Release all the lpfc_iocbq entries maintained by this host. */ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { @@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) phba->sli4_hba.scsi_xri_cnt, phba->sli4_hba.scsi_xri_max); - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); + list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { /* max scsi xri shrinked below the allocated scsi buffers */ @@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->dma_handle); kfree(psb); } - spin_lock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } /* update xris associated to remaining allocated scsi buffers */ @@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->cur_iocbq.sli4_lxritag = lxri; psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); return 0; @@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) stat = 1; goto finished; } - if (time >= 30 * HZ) { + if (time >= msecs_to_jiffies(30 * 1000)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } - if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { + if (time >= msecs_to_jiffies(15 * 1000) && + phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " "seconds. Continuing initialization\n"); @@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) goto finished; if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; - if (vport->fc_map_cnt == 0 && time < 2 * HZ) + if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; @@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) return -ENOMEM; /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + /* Initialize the host templates the configured values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ if (phba->cfg_enable_bg) { - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; - phba->cfg_sg_dma_buf_size += - phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a BDE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough BDEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; + + /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a BDE for each, and a BDE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + + /* Total BDEs in BPL for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; } - /* Also reinitialize the host templates with new values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after config_port mbox */ @@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { + struct lpfc_vector_map_info *cpup; struct lpfc_sli *psli; LPFC_MBOXQ_t *mboxq; - int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; + int rc, i, hbq_count, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; - int longs, sli_family; - int sges_per_segment; + int longs; /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); @@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; - /* With BlockGuard we can have multiple SGEs per Data Segemnt */ - sges_per_segment = 1; - if (phba->cfg_enable_bg) - sges_per_segment = 2; - /* * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. @@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) sizeof(struct lpfc_sli_ring), GFP_KERNEL); if (!phba->sli.ring) return -ENOMEM; + /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * It doesn't matter what family our adapter is in, we are + * limited to 2 Pages, 512 SGEs, for our SGL. + * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp + */ + max_buf_size = (2 * SLI4_PAGE_SIZE); + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) + phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; + + /* + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. - * To insure that the scsi sgl does not cross a 4k page boundary only - * sgl sizes of must be a power of 2. */ - buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) * - sizeof(struct sli4_sge))); - - sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); - max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; - switch (sli_family) { - case LPFC_SLI_INTF_FAMILY_BE2: - case LPFC_SLI_INTF_FAMILY_BE3: - /* There is a single hint for BE - 2 pages per BPL. */ - if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_SLI_HINT1_1) - max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; - break; - case LPFC_SLI_INTF_FAMILY_LNCR_A0: - case LPFC_SLI_INTF_FAMILY_LNCR_B0: - default: - break; + + if (phba->cfg_enable_bg) { + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a SGE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough SGEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + max_buf_size; + + /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a SGE for each, and a SGE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + + /* Total SGEs for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; + /* + * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need + * to post 1 page for the SGL. + */ } - for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; - dma_buf_size < max_buf_size && buf_size > dma_buf_size; - dma_buf_size = dma_buf_size << 1) - ; - if (dma_buf_size == max_buf_size) - phba->cfg_sg_seg_cnt = (dma_buf_size - - sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - - (2 * sizeof(struct sli4_sge))) / - sizeof(struct sli4_sge); - phba->cfg_sg_dma_buf_size = dma_buf_size; + /* Initialize the host templates with the updated values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) + phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; + else + phba->cfg_sg_dma_buf_size = + SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); /* Initialize buffer queue management fields */ hbq_count = lpfc_sli_hbq_count(); @@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcp_eq_hdl; } + phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu), + GFP_KERNEL); + if (!phba->sli4_hba.cpu_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3327 Failed allocate memory for msi-x " + "interrupt vector mapping\n"); + rc = -ENOMEM; + goto out_free_msix; + } + /* Initialize io channels for round robin */ + cpup = phba->sli4_hba.cpu_map; + rc = 0; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + cpup->channel_id = rc; + rc++; + if (rc >= phba->cfg_fcp_io_channel) + rc = 0; + } + /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; +out_free_msix: + kfree(phba->sli4_hba.msix_entries); out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); out_free_fcf_rr_bmask: @@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + /* Free memory allocated for msi-x interrupt vector to CPU mapping */ + kfree(phba->sli4_hba.cpu_map); + phba->sli4_hba.num_present_cpu = 0; + phba->sli4_hba.num_online_cpu = 0; + /* Free memory allocated for msi-x interrupt vector entries */ kfree(phba->sli4_hba.msix_entries); @@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) init_waitqueue_head(&phba->work_waitq); /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); @@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) int cfg_fcp_io_channel; uint32_t cpu; uint32_t i = 0; + uint32_t j = 0; /* @@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) /* Sanity check on HBA EQ parameters */ cfg_fcp_io_channel = phba->cfg_fcp_io_channel; - /* It doesn't make sense to have more io channels then CPUs */ - for_each_online_cpu(cpu) { - i++; + /* It doesn't make sense to have more io channels then online CPUs */ + for_each_present_cpu(cpu) { + if (cpu_online(cpu)) + i++; + j++; } + phba->sli4_hba.num_online_cpu = i; + phba->sli4_hba.num_present_cpu = j; + if (i < cfg_fcp_io_channel) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3188 Reducing IO channels to match number of " - "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); + "online CPUs: from %d to %d\n", + cfg_fcp_io_channel, i); cfg_fcp_io_channel = i; } @@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) out: /* Catch the not-ready port failure after a port reset. */ - if (num_resets >= MAX_IF_TYPE_2_RESETS) + if (num_resets >= MAX_IF_TYPE_2_RESETS) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3317 HBA not functional: IP Reset Failed " + "after (%d) retries, try: " + "echo fw_reset > board_mode\n", num_resets); rc = -ENODEV; + } return rc; } @@ -8209,6 +8349,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) } /** + * lpfc_find_next_cpu - Find next available CPU that matches the phys_id + * @phba: pointer to lpfc hba data structure. + * + * Find next available CPU to use for IRQ to CPU affinity. + */ +static int +lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) +{ + struct lpfc_vector_map_info *cpup; + int cpu; + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + + /* + * If we get here, we have used ALL CPUs for the specific + * phys_id. Now we need to clear out lpfc_used_cpu and start + * reusing CPUs. + */ + + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + if (lpfc_used_cpu[cpu] == phys_id) + lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; + } + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + return LPFC_VECTOR_MAP_EMPTY; +} + +/** + * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors + * @phba: pointer to lpfc hba data structure. + * @vectors: number of HBA vectors + * + * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector + * affinization across multple physical CPUs (numa nodes). + * In addition, this routine will assign an IO channel for each CPU + * to use when issuing I/Os. + */ +static int +lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) +{ + int i, idx, saved_chann, used_chann, cpu, phys_id; + int max_phys_id, num_io_channel, first_cpu; + struct lpfc_vector_map_info *cpup; +#ifdef CONFIG_X86 + struct cpuinfo_x86 *cpuinfo; +#endif + struct cpumask *mask; + uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; + + /* If there is no mapping, just return */ + if (!phba->cfg_fcp_cpu_map) + return 1; + + /* Init cpu_map array */ + memset(phba->sli4_hba.cpu_map, 0xff, + (sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu)); + + max_phys_id = 0; + phys_id = 0; + num_io_channel = 0; + first_cpu = LPFC_VECTOR_MAP_EMPTY; + + /* Update CPU map with physical id and core id of each CPU */ + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { +#ifdef CONFIG_X86 + cpuinfo = &cpu_data(cpu); + cpup->phys_id = cpuinfo->phys_proc_id; + cpup->core_id = cpuinfo->cpu_core_id; +#else + /* No distinction between CPUs for other platforms */ + cpup->phys_id = 0; + cpup->core_id = 0; +#endif + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3328 CPU physid %d coreid %d\n", + cpup->phys_id, cpup->core_id); + + if (cpup->phys_id > max_phys_id) + max_phys_id = cpup->phys_id; + cpup++; + } + + /* Now associate the HBA vectors with specific CPUs */ + for (idx = 0; idx < vectors; idx++) { + cpup = phba->sli4_hba.cpu_map; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) { + + /* Try for all phys_id's */ + for (i = 1; i < max_phys_id; i++) { + phys_id++; + if (phys_id > max_phys_id) + phys_id = 0; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) + continue; + goto found; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3329 Cannot set affinity:" + "Error mapping vector %d (%d)\n", + idx, vectors); + return 0; + } +found: + cpup += cpu; + if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) + lpfc_used_cpu[cpu] = phys_id; + + /* Associate vector with selected CPU */ + cpup->irq = phba->sli4_hba.msix_entries[idx].vector; + + /* Associate IO channel with selected CPU */ + cpup->channel_id = idx; + num_io_channel++; + + if (first_cpu == LPFC_VECTOR_MAP_EMPTY) + first_cpu = cpu; + + /* Now affinitize to the selected CPU */ + mask = &cpup->maskbits; + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. + vector, mask); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3330 Set Affinity: CPU %d channel %d " + "irq %d (%x)\n", + cpu, cpup->channel_id, + phba->sli4_hba.msix_entries[idx].vector, i); + + /* Spread vector mapping across multple physical CPU nodes */ + phys_id++; + if (phys_id > max_phys_id) + phys_id = 0; + } + + /* + * Finally fill in the IO channel for any remaining CPUs. + * At this point, all IO channels have been assigned to a specific + * MSIx vector, mapped to a specific CPU. + * Base the remaining IO channel assigned, to IO channels already + * assigned to other CPUs on the same phys_id. + */ + for (i = 0; i <= max_phys_id; i++) { + /* + * If there are no io channels already mapped to + * this phys_id, just round robin thru the io_channels. + * Setup chann[] for round robin. + */ + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + chann[idx] = idx; + + saved_chann = 0; + used_chann = 0; + + /* + * First build a list of IO channels already assigned + * to this phys_id before reassigning the same IO + * channels to the remaining CPUs. + */ + cpup = phba->sli4_hba.cpu_map; + cpu = first_cpu; + cpup += cpu; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; + idx++) { + if (cpup->phys_id == i) { + /* + * Save any IO channels that are + * already mapped to this phys_id. + */ + if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { + chann[saved_chann] = + cpup->channel_id; + saved_chann++; + goto out; + } + + /* See if we are using round-robin */ + if (saved_chann == 0) + saved_chann = + phba->cfg_fcp_io_channel; + + /* Associate next IO channel with CPU */ + cpup->channel_id = chann[used_chann]; + num_io_channel++; + used_chann++; + if (used_chann == saved_chann) + used_chann = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3331 Set IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } +out: + cpu++; + if (cpu >= phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpu = 0; + } else { + cpup++; + } + } + } + + if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { + cpup->channel_id = 0; + num_io_channel++; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3332 Assign IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } + cpup++; + } + } + + /* Sanity check */ + if (num_io_channel != phba->sli4_hba.num_present_cpu) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3333 Set affinity mismatch:" + "%d chann != %d cpus: %d vactors\n", + num_io_channel, phba->sli4_hba.num_present_cpu, + vectors); + + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; + return 1; +} + + +/** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * @@ -8259,9 +8662,7 @@ enable_msix_vectors: phba->sli4_hba.msix_entries[index].vector, phba->sli4_hba.msix_entries[index].entry); - /* - * Assign MSI-X vectors to interrupt handlers - */ + /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { memset(&phba->sli4_hba.handler_name[index], 0, 16); sprintf((char *)&phba->sli4_hba.handler_name[index], @@ -8289,6 +8690,8 @@ enable_msix_vectors: phba->cfg_fcp_io_channel, vectors); phba->cfg_fcp_io_channel = vectors; } + + lpfc_sli4_set_affinity(phba, vectors); return rc; cfg_fail_out: @@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); @@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) lpfc_sli4_disable_intr(phba); lpfc_sli4_queue_destroy(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = { static int __init lpfc_init(void) { + int cpu; int error = 0; printk(LPFC_MODULE_DESC "\n"); @@ -10561,6 +10965,11 @@ lpfc_init(void) return -ENOMEM; } } + + /* Initialize in case vector mapping is needed */ + for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++) + lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; + error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index baf53e6c2bd1..2a4e5d21eab2 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -37,6 +37,7 @@ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ #define LOG_FIP 0x00020000 /* FIP events */ #define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ +#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index a7a9fa468308..41363db7d426 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) /* Only FC supports upd bit */ if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && - (vport->fc_flag & FC_VFI_REGISTERED)) { + (vport->fc_flag & FC_VFI_REGISTERED) && + (!phba->fc_topology_changed)) { bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); } lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, "3134 Register VFI, mydid:x%x, fcfi:%d, " - " vfi:%d, vpi:%d, fc_pname:%x%x\n", + " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x" + " port_state:x%x topology chg:%d\n", vport->fc_myDID, phba->fcf.fcfi, phba->sli4_hba.vfi_ids[vport->vfi], phba->vpi_ids[vport->vpi], - reg_vfi->wwn[0], reg_vfi->wwn[1]); + reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag, + vport->port_state, phba->fc_topology_changed); } /** diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index cd86069a0ba8..812d0cd7c86d 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int i; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { + /* Calculate alignment */ + if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) + i = phba->cfg_sg_dma_buf_size; + else + i = SLI4_PAGE_SIZE; + phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, - phba->cfg_sg_dma_buf_size, + i, 0); - else + } else { phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, align, 0); + } + if (!phba->lpfc_scsi_dma_buf_pool) goto fail; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 82f4d3542289..31e9b92f5a9b 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, - ndlp->nlp_rpi); + ndlp->nlp_rpi, vport->port_state, + vport->fc_flag); if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) ndlp->nlp_fcp_info |= CLASS2; @@ -574,7 +576,7 @@ out: lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, if ((irsp->ulpStatus) || (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; ndlp->nlp_flag &= ~NLP_NPR_ADISC; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 74b8710e1e90..8523b278ec9d 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -24,6 +24,8 @@ #include <linux/export.h> #include <linux/delay.h> #include <asm/unaligned.h> +#include <linux/crc-t10dif.h> +#include <net/checksum.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -48,7 +50,7 @@ #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 -int _dump_buf_done; +int _dump_buf_done = 1; static char *dif_op_str[] = { "PROT_NORMAL", @@ -66,6 +68,10 @@ struct scsi_dif_tuple { __be32 ref_tag; /* Target LBA or indirect LBA */ }; +#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK) +#define scsi_prot_flagged(sc, flg) sc +#endif + static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void @@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_bpl; uint16_t iotag; - int bcnt; + int bcnt, bpl_size; + + bpl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp), bpl_size); for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); @@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, struct list_head *post_sblist, int sb_count) { struct lpfc_scsi_buf *psb, *psb_next; - int status; + int status, sgl_size; int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; dma_addr_t pdma_phys_bpl1; int last_xritag = NO_XRI; @@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, if (sb_count <= 0) return -EINVAL; + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + list_for_each_entry_safe(psb, psb_next, post_sblist, list) { list_del_init(&psb->list); block_cnt++; @@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, post_cnt = block_cnt; } else if (block_cnt == 1) { /* last single sgl with non-contiguous xri */ - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + if (sgl_size > SGL_PAGE_SIZE) pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; else @@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) int num_posted, rc = 0; /* get all SCSI buffers need to repost to a local list */ - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); + list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); /* post the list of scsi buffer sgls to port if available */ if (!list_empty(&post_sblist)) { @@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) IOCB_t *iocb; dma_addr_t pdma_phys_fcp_cmd; dma_addr_t pdma_phys_fcp_rsp; - dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; + dma_addr_t pdma_phys_bpl; uint16_t iotag, lxri = 0; - int bcnt, num_posted; + int bcnt, num_posted, sgl_size; LIST_HEAD(prep_sblist); LIST_HEAD(post_sblist); LIST_HEAD(scsi_sblist); + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp)); + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); if (!psb) @@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) } memset(psb->data, 0, phba->cfg_sg_dma_buf_size); + /* Page alignment is CRITICAL, double check to be sure */ + if (((unsigned long)(psb->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + break; + } + /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { @@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; psb->fcp_bpl = psb->data; - psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + psb->fcp_cmnd = (psb->data + sgl_size); psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + sizeof(struct fcp_cmnd)); /* Initialize local short-hand pointers. */ sgl = (struct sli4_sge *)psb->fcp_bpl; pdma_phys_bpl = psb->dma_handle; - pdma_phys_fcp_cmd = - (psb->dma_handle + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size); pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); /* @@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) iocb->ulpLe = 1; iocb->ulpClass = CLASS3; psb->cur_iocbq.context1 = psb; - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; psb->dma_phys_bpl = pdma_phys_bpl; /* add the scsi buffer to a post list */ list_add_tail(&psb->list, &post_sblist); - spin_lock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt++; - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } lpfc_printf_log(phba, KERN_INFO, LOG_BG, "3021 Allocate %d out of %d requested new SCSI " @@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf * lpfc_cmd = NULL; - struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; - unsigned long iflag = 0; - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); - if (lpfc_cmd) { - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; + struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; + unsigned long gflag = 0; + unsigned long pflag = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); + list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, + list); + if (!lpfc_cmd) { + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + list_remove_head(scsi_buf_list_get, lpfc_cmd, + struct lpfc_scsi_buf, list); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); return lpfc_cmd; } /** @@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf *lpfc_cmd ; - unsigned long iflag = 0; + unsigned long gflag = 0; + unsigned long pflag = 0; int found = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, - list) { + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) { if (lpfc_test_rrq_active(phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) continue; list_del(&lpfc_cmd->list); found = 1; - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; break; } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, - iflag); + if (!found) { + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, + list) { + if (lpfc_test_rrq_active( + phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_cmd->list); + found = 1; + break; + } + } + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); if (!found) return NULL; - else - return lpfc_cmd; + return lpfc_cmd; } /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA @@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } /** @@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + if (psb->exch_busy) { spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); @@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); } else { - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } } @@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); + + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ if (datadir == DMA_FROM_DEVICE) { - bf_set(pde6_ce, pde6, checking); - bf_set(pde6_re, pde6, checking); + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); } bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); @@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_bde >= (phba->cfg_total_seg_cnt - 2)) + return num_bde + 3; + /* setup PDE5 with what we have */ pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); @@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); - bf_set(pde6_ce, pde6, checking); - bf_set(pde6_re, pde6, checking); + + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); + bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); bf_set(pde6_apptagval, pde6, 0); @@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_bde >= phba->cfg_total_seg_cnt) + return num_bde + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9065 BLKGRD:%s Invalid data segment\n", @@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge_diseed *diseed = NULL; dma_addr_t physaddr; int i = 0, num_sge = 0, status; - int datadir = sc->sc_data_direction; uint32_t reftag; unsigned blksize; uint8_t txop, rxop; @@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + } + /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); - if (datadir == DMA_FROM_DEVICE) { - bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - bf_set(lpfc_sli4_sge_dif_re, diseed, checking); - } + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); @@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_sge >= (phba->cfg_total_seg_cnt - 2)) + return num_sge + 3; + /* setup DISEED with what we have */ diseed = (struct sli4_sge_diseed *) sgl; memset(diseed, 0, sizeof(struct sli4_sge_diseed)); @@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) { + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + + } else { + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + /* + * When in this mode, the hardware will replace + * the guard tag from the host with a + * newly generated good CRC for the wire. + * Switch to raw mode here to avoid this + * behavior. What the host sends gets put on the wire. + */ + if (txop == BG_OP_IN_CRC_OUT_CRC) { + txop = BG_OP_RAW_MODE; + rxop = BG_OP_RAW_MODE; + } + } + + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); - bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); @@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_sge >= phba->cfg_total_seg_cnt) + return num_sge + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9086 BLKGRD:%s Invalid data segment\n", @@ -2670,6 +2801,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) } /** + * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be adjusted. + * + * Adjust the data length to account for how much data + * is actually on the wire. + * + * returns the adjusted data length + **/ +static int +lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *sc = lpfc_cmd->pCmd; + int fcpdl; + + fcpdl = scsi_bufflen(sc); + + /* Check if there is protection data on the wire */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + /* Read */ + if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) + return fcpdl; + + } else { + /* Write */ + if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) + return fcpdl; + } + + /* + * If we are in DIF Type 1 mode every data block has a 8 byte + * DIF (trailer) attached to it. Must ajust FCP data length. + */ + if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI)) + fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8; + + return fcpdl; +} + +/** * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be prep'ed. @@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, uint32_t num_bde = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; - int diflen, fcpdl; - unsigned blksize; + int fcpdl; /* * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd @@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, return 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9067 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: + + /* Here we need to add a PDE5 and PDE6 to the count */ + if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) + goto err; + num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, datasegcnt); /* we should have 2 or more entries in buffer list */ if (num_bde < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9068 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* + * There is a minimun of 4 BPLs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 4) > + (phba->cfg_total_seg_cnt - 2)) + goto err; num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, datasegcnt, protsegcnt); /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_bde < 3) || + (num_bde > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9022 Unexpected protection group %i\n", prot_group_type); @@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; - fcpdl = scsi_bufflen(scsi_cmnd); - - if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) { - /* - * We are in DIF Type 1 mode - * Every data block has a 8 byte DIF (trailer) - * attached to it. Must ajust FCP data length - */ - blksize = lpfc_cmd_blksize(scsi_cmnd); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - } + fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -2812,14 +2969,234 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9023 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", + "9023 Cannot setup S/G List for HBA" + "IO segs %d/%d BPL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, prot_group_type, num_bde); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } /* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CRC algorithmn + * using crc_t10dif. + */ +uint16_t +lpfc_bg_crc(uint8_t *data, int count) +{ + uint16_t crc = 0; + uint16_t x; + + crc = crc_t10dif(data, count); + x = cpu_to_be16(crc); + return x; +} + +/* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CSUM algorithmn + * using ip_compute_csum. + */ +uint16_t +lpfc_bg_csum(uint8_t *data, int count) +{ + uint16_t ret; + + ret = ip_compute_csum(data, count); + return ret; +} + +/* + * This function examines the protection data to try to determine + * what type of T10-DIF error occurred. + */ +void +lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scatterlist *sgpe; /* s/g prot entry */ + struct scatterlist *sgde; /* s/g data entry */ + struct scsi_cmnd *cmd = lpfc_cmd->pCmd; + struct scsi_dif_tuple *src = NULL; + uint8_t *data_src = NULL; + uint16_t guard_tag, guard_type; + uint16_t start_app_tag, app_tag; + uint32_t start_ref_tag, ref_tag; + int prot, protsegcnt; + int err_type, len, data_len; + int chk_ref, chk_app, chk_guard; + uint16_t sum; + unsigned blksize; + + err_type = BGS_GUARD_ERR_MASK; + sum = 0; + guard_tag = 0; + + /* First check to see if there is protection data to examine */ + prot = scsi_get_prot_op(cmd); + if ((prot == SCSI_PROT_READ_STRIP) || + (prot == SCSI_PROT_WRITE_INSERT) || + (prot == SCSI_PROT_NORMAL)) + goto out; + + /* Currently the driver just supports ref_tag and guard_tag checking */ + chk_ref = 1; + chk_app = 0; + chk_guard = 0; + + /* Setup a ptr to the protection data provided by the SCSI host */ + sgpe = scsi_prot_sglist(cmd); + protsegcnt = lpfc_cmd->prot_seg_cnt; + + if (sgpe && protsegcnt) { + + /* + * We will only try to verify guard tag if the segment + * data length is a multiple of the blksize. + */ + sgde = scsi_sglist(cmd); + blksize = lpfc_cmd_blksize(cmd); + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + guard_type = scsi_host_get_guard(cmd->device->host); + + start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */ + start_app_tag = src->app_tag; + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + while (src && protsegcnt) { + while (len) { + + /* + * First check to see if a protection data + * check is valid + */ + if ((src->ref_tag == 0xffffffff) || + (src->app_tag == 0xffff)) { + start_ref_tag++; + goto skipit; + } + + /* App Tag checking */ + app_tag = src->app_tag; + if (chk_app && (app_tag != start_app_tag)) { + err_type = BGS_APPTAG_ERR_MASK; + goto out; + } + + /* Reference Tag checking */ + ref_tag = be32_to_cpu(src->ref_tag); + if (chk_ref && (ref_tag != start_ref_tag)) { + err_type = BGS_REFTAG_ERR_MASK; + goto out; + } + start_ref_tag++; + + /* Guard Tag checking */ + if (chk_guard) { + guard_tag = src->guard_tag; + if (guard_type == SHOST_DIX_GUARD_IP) + sum = lpfc_bg_csum(data_src, + blksize); + else + sum = lpfc_bg_crc(data_src, + blksize); + if ((guard_tag != sum)) { + err_type = BGS_GUARD_ERR_MASK; + goto out; + } + } +skipit: + len -= sizeof(struct scsi_dif_tuple); + if (len < 0) + len = 0; + src++; + + data_src += blksize; + data_len -= blksize; + + /* + * Are we at the end of the Data segment? + * The data segment is only used for Guard + * tag checking. + */ + if (chk_guard && (data_len == 0)) { + chk_guard = 0; + sgde = sg_next(sgde); + if (!sgde) + goto out; + + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + } + } + + /* Goto the next Protection data segment */ + sgpe = sg_next(sgpe); + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + } else { + src = NULL; + } + protsegcnt--; + } + } +out: + if (err_type == BGS_GUARD_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x1); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + phba->bg_guard_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + sum, guard_tag); + + } else if (err_type == BGS_REFTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x3); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_reftag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + ref_tag, start_ref_tag); + + } else if (err_type == BGS_APPTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x2); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_apptag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9041 BLKGRD: LBA %lx app_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + app_tag, start_app_tag); + } +} + + +/* * This function checks for BlockGuard errors detected by * the HBA. In case of errors, the ASC/ASCQ fields in the * sense buffer will be set accordingly, paired with @@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, uint32_t bgstat = bgf->bgstat; uint64_t failing_sector = 0; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" - " 0x%x lba 0x%llx blk cnt 0x%x " - "bgstat=0x%x bghm=0x%x\n", - cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); - spin_lock(&_dump_buf_lock); if (!_dump_buf_done) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" @@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (lpfc_bgs_get_invalid_prof(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" - " BlockGuard profile. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9072 BLKGRD: Invalid BG Profile in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } if (lpfc_bgs_get_uninit_dif_block(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " - "Invalid BlockGuard DIF Block. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9073 BLKGRD: Invalid BG PDIF Block in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } @@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, cmd->result = DRIVER_SENSE << 24 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_guard_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9055 BLKGRD: guard_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9055 BLKGRD: Guard Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_reftag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9056 BLKGRD: ref_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9056 BLKGRD: Ref Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_apptag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9061 BLKGRD: app_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9061 BLKGRD: App Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (!ret) { /* No error was reported - problem in FW? */ - cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9057 BLKGRD: Unknown error reported!\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9057 BLKGRD: Unknown error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); + + /* Calcuate what type of error it was */ + lpfc_calc_bg_err(phba, lpfc_cmd); } - out: return ret; } @@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -3094,45 +3489,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) } /** - * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard - * @phba: The Hba for which this call is being executed. - * @lpfc_cmd: The scsi buffer which is going to be adjusted. - * - * Adjust the data length to account for how much data - * is actually on the wire. - * - * returns the adjusted data length - **/ -static int -lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd) -{ - struct scsi_cmnd *sc = lpfc_cmd->pCmd; - int diflen, fcpdl; - unsigned blksize; - - fcpdl = scsi_bufflen(sc); - - /* Check if there is protection data on the wire */ - if (sc->sc_data_direction == DMA_FROM_DEVICE) { - /* Read */ - if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) - return fcpdl; - - } else { - /* Write */ - if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) - return fcpdl; - } - - /* If protection data on the wire, adjust the count accordingly */ - blksize = lpfc_cmd_blksize(sc); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - return fcpdl; -} - -/** * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. @@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; - uint32_t num_bde = 0; + uint32_t num_sge = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; /* * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd - * fcp_rsp regions to the first data bde entry + * fcp_rsp regions to the first data sge entry */ if (scsi_sg_count(scsi_cmnd)) { /* @@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, sgl += 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9087 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: - num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, + /* Here we need to add a DISEED to the count */ + if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt) + goto err; + + num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, datasegcnt); + /* we should have 2 or more entries in buffer list */ - if (num_bde < 2) + if (num_sge < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9088 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + /* + * There is a minimun of 3 SGEs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 3) > + (phba->cfg_total_seg_cnt - 2)) + goto err; - num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, + num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, datasegcnt, protsegcnt); + /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_sge < 3) || + (num_sge > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9083 Unexpected protection group %i\n", prot_group_type); @@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9084 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", - prot_group_type, num_bde); + "9084 Cannot setup S/G List for HBA" + "IO segs %d/%d SGL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, + prot_group_type, num_sge); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } @@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, "9033 BLKGRD: rcvd %s cmd:x%x " "sector x%llx cnt %u pt %x\n", dif_op_str[scsi_get_prot_op(cmnd)], @@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, "9038 BLKGRD: rcvd PROT_NORMAL cmd: " "x%x sector x%llx cnt %u pt %x\n", cmnd->cmnd[0], @@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) /* Wait for abort to complete */ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), - (2*vport->cfg_devloss_tmo*HZ)); + msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); lpfc_cmd->waitq = NULL; if (lpfc_cmd->pCmd == cmnd) { @@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_hba *phba = vport->phba; int rc, ret = SUCCESS; + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3172 SCSI layer issued Host Reset Data:\n"); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); rc = lpfc_sli_brdrestart(phba); if (rc) ret = FAILED; - lpfc_online(phba); + rc = lpfc_online(phba); + if (rc) + ret = FAILED; lpfc_unblock_mgmt_io(phba); - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "3172 SCSI layer issued Host Reset Data: x%x\n", ret); + if (ret == FAILED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3323 Failed host reset, bring it offline\n"); + lpfc_sli4_offline_eratt(phba); + } return ret; } @@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev) } num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); if (num_to_alloc != num_allocated) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "0708 Allocation request of %d " - "command buffers did not succeed. " - "Allocated %d buffers.\n", - num_to_alloc, num_allocated); + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0708 Allocation request of %d " + "command buffers did not succeed. " + "Allocated %d buffers.\n", + num_to_alloc, num_allocated); } if (num_allocated > 0) phba->total_scsi_bufs += num_allocated; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 35dd17eb0f27..572579f87de4 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov + 1); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (time_after(jiffies, rrq->rrq_stop_time)) @@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) return; spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov * 2); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)); list_splice_init(&phba->active_rrq_list, &rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, else rrq->send_rrq = 0; rrq->xritag = xritag; - rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); + rrq->rrq_stop_time = jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); rrq->ndlp = ndlp; rrq->nlp_DID = ndlp->nlp_DID; rrq->vport = ndlp->vport; @@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) ndlp = piocbq->context_un.ndlp; - else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) && - (piocbq->iocb_flag & LPFC_IO_LIBDFC)) + else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) ndlp = piocbq->context_un.ndlp; else ndlp = piocbq->context1; @@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, BUG(); else mod_timer(&piocb->vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov << 1)); + jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); } @@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Mailbox cmd <cmd> Cmpl <cmpl> */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), @@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) pmbox->un.varWords[4], pmbox->un.varWords[5], pmbox->un.varWords[6], - pmbox->un.varWords[7]); + pmbox->un.varWords[7], + pmbox->un.varWords[8], + pmbox->un.varWords[9], + pmbox->un.varWords[10]); if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba,pmb); @@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr) lpfc_worker_wake_up(phba); else /* Restart the timer for next eratt poll */ - mod_timer(&phba->eratt_poll, jiffies + - HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); return; } @@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) list_del_init(&rsrc_blk->list); kfree(rsrc_blk); } + phba->sli4_hba.max_cfg_param.vpi_used = 0; break; case LPFC_RSC_TYPE_FCOE_XRI: kfree(phba->sli4_hba.xri_bmask); @@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); } else { kfree(phba->vpi_bmask); + phba->sli4_hba.max_cfg_param.vpi_used = 0; kfree(phba->vpi_ids); bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); kfree(phba->sli4_hba.xri_bmask); @@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) struct lpfc_sglq *sglq_entry = NULL; struct lpfc_sglq *sglq_entry_next = NULL; struct lpfc_sglq *sglq_entry_first = NULL; - int status, post_cnt = 0, num_posted = 0, block_cnt = 0; + int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; int last_xritag = NO_XRI; LIST_HEAD(prep_sgl_list); LIST_HEAD(blck_sgl_list); @@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); spin_unlock_irq(&phba->hbalock); + total_cnt = phba->sli4_hba.els_xri_cnt; list_for_each_entry_safe(sglq_entry, sglq_entry_next, &allc_sgl_list, list) { list_del_init(&sglq_entry->list); @@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) sglq_entry->sli4_xritag); list_add_tail(&sglq_entry->list, &free_sgl_list); - spin_lock_irq(&phba->hbalock); - phba->sli4_hba.els_xri_cnt--; - spin_unlock_irq(&phba->hbalock); + total_cnt--; } } } @@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) (sglq_entry_first->sli4_xritag + post_cnt - 1)); list_splice_init(&blck_sgl_list, &free_sgl_list); - spin_lock_irq(&phba->hbalock); - phba->sli4_hba.els_xri_cnt -= post_cnt; - spin_unlock_irq(&phba->hbalock); + total_cnt -= post_cnt; } /* don't reset xirtag due to hole in xri block */ @@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) /* reset els sgl post count for next round of posting */ post_cnt = 0; } + /* update the number of XRIs posted for ELS */ + phba->sli4_hba.els_xri_cnt = total_cnt; /* free the els sgls failed to post */ lpfc_free_sgl_list(phba, &free_sgl_list); @@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov * 2)); + jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Start error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); /* Enable PCIe device Advanced Error Reporting (AER) if configured */ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { @@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * + 1000); + mod_timer(&psli->mbox_tmo, jiffies + timeout); } /* Mailbox cmd <cmd> issue */ @@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) /* Start timer for the mbox_tmo and log some mailbox post messages */ mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); + msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " @@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, static inline uint32_t lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) { - int i; - - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) - i = smp_processor_id(); - else - i = atomic_add_return(1, &phba->fcp_qidx); + struct lpfc_vector_map_info *cpup; + int chann, cpu; - i = (i % phba->cfg_fcp_io_channel); - return i; + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) { + cpu = smp_processor_id(); + if (cpu < phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpup += cpu; + return cpup->channel_id; + } + chann = cpu; + } + chann = atomic_add_return(1, &phba->fcp_qidx); + chann = (chann % phba->cfg_fcp_io_channel); + return chann; } /** @@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if ((piocb->iocb_flag & LPFC_IO_FCP) || (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + if (unlikely(!phba->sli4_hba.fcp_wq)) + return IOCB_ERROR; if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], &wqe)) return IOCB_ERROR; } else { + if (unlikely(!phba->sli4_hba.els_wq)) + return IOCB_ERROR; if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) return IOCB_ERROR; } @@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { - timeout_req = timeout * HZ; + timeout_req = msecs_to_jiffies(timeout * 1000); timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); @@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, if (retval == MBX_BUSY || retval == MBX_SUCCESS) { wait_event_interruptible_timeout(done_q, pmboxq->mbox_flag & LPFC_MBX_WAKE, - timeout * HZ); + msecs_to_jiffies(timeout * 1000)); spin_lock_irqsave(&phba->hbalock, flag); pmboxq->context1 = NULL; @@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, } wq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3264 WQ[%d]: barset:x%x, offset:x%x\n", - wq->queue_id, pci_barset, db_offset); + "3264 WQ[%d]: barset:x%x, offset:x%x, " + "format:x%x\n", wq->queue_id, pci_barset, + db_offset, wq->db_format); } else { wq->db_format = LPFC_DB_LIST_FORMAT; wq->db_regaddr = phba->sli4_hba.WQDBregaddr; @@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, } hrq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n", - hrq->queue_id, pci_barset, db_offset); + "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " + "format:x%x\n", hrq->queue_id, pci_barset, + db_offset, hrq->db_format); } else { hrq->db_format = LPFC_DB_RING_FORMAT; hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; @@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s type:%s " - "Frame Data:%08x %08x %08x %08x %08x %08x\n", - rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type], + "2538 Received frame rctl:%s (x%x), type:%s (x%x), " + "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", + rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, + type_names[fc_hdr->fh_type], fc_hdr->fh_type, be32_to_cpu(header[0]), be32_to_cpu(header[1]), be32_to_cpu(header[2]), be32_to_cpu(header[3]), - be32_to_cpu(header[4]), be32_to_cpu(header[5])); + be32_to_cpu(header[4]), be32_to_cpu(header[5]), + be32_to_cpu(header[6])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index be02b59ea279..67af460184ba 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -346,11 +346,6 @@ struct lpfc_bmbx { #define SLI4_CT_VFI 2 #define SLI4_CT_FCFI 3 -#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000 -#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000 -#define LPFC_SLI4_MIN_BUF_SIZE 0x400 -#define LPFC_SLI4_MAX_BUF_SIZE 0x20000 - /* * SLI4 specific data structures */ @@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info { #define LPFC_SLI4_HANDLER_NAME_SZ 16 +/* Used for IRQ vector to CPU mapping */ +struct lpfc_vector_map_info { + uint16_t phys_id; + uint16_t core_id; + uint16_t irq; + uint16_t channel_id; + struct cpumask maskbits; +}; +#define LPFC_VECTOR_MAP_EMPTY 0xffff +#define LPFC_MAX_CPU 256 + /* SLI4 HBA data structure entries */ struct lpfc_sli4_hba { void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for @@ -573,6 +579,11 @@ struct lpfc_sli4_hba { struct lpfc_iov iov; spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ + + /* CPU to vector mapping information */ + struct lpfc_vector_map_info *cpu_map; + uint16_t num_online_cpu; + uint16_t num_present_cpu; }; enum lpfc_sge_type { diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 664cd04f7cd8..a38dc3b16969 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.38" +#define LPFC_DRIVER_VERSION "8.3.39" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 0fe188e66000..e28e431564b0 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport, } } -static int +int lpfc_alloc_vpi(struct lpfc_hba *phba) { unsigned long vpi; @@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; long timeout; + bool ns_ndlp_referenced = false; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, @@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_debugfs_terminate(vport); + /* + * The call to fc_remove_host might release the NameServer ndlp. Since + * we might need to use the ndlp to send the DA_ID CT command, + * increment the reference for the NameServer ndlp to prevent it from + * being released. + */ + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + lpfc_nlp_get(ndlp); + ns_ndlp_referenced = true; + } + /* Remove FC host and then SCSI host with the vport */ fc_remove_host(lpfc_shost_from_vport(vport)); scsi_remove_host(lpfc_shost_from_vport(vport)); @@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_discovery_wait(vport); skip_logo: + + /* + * If the NameServer ndlp has been incremented to allow the DA_ID CT + * command to be sent, decrement the ndlp now. + */ + if (ns_ndlp_referenced) { + ndlp = lpfc_findnode_did(vport, NameServer_DID); + lpfc_nlp_put(ndlp); + } + lpfc_cleanup(vport); lpfc_sli_host_down(vport); diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 90828340acea..6b2c94eb8134 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); +int lpfc_alloc_vpi(struct lpfc_hba *phba); /* * queuecommand VPORT-specific return codes. Specified in the host byte code. diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7c90d57b867e..3a9ddae86f1f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) printk(KERN_ERR "megaraid_sas: timed out while" "waiting for HBA to recover\n"); error = -ENODEV; - goto out_kfree_ioc; + goto out_up; } spin_unlock_irqrestore(&instance->hba_lock, flags); error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); + out_up: up(&instance->ioctl_sem); out_kfree_ioc: diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 74550922ad55..7b7381d7671f 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) } for (i = 0; i < MVS_MAX_DEVICES; i++) { mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; - mvi->devices[i].dev_type = NO_DEVICE; + mvi->devices[i].dev_type = SAS_PHY_UNUSED; mvi->devices[i].device_id = i; mvi->devices[i].dev_status = MVS_DEV_NORMAL; init_timer(&mvi->devices[i].timer); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 532110f4562a..c9e244984e30 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, return 0; } -#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) +#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED))) static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, struct mvs_tmf_task *tmf, int *pass) { @@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf * libsas will use dev->port, should * not call task_done for sata */ - if (dev->dev_type != SATA_DEV) + if (dev->dev_type != SAS_SATA_DEV) task->task_done(task); return rc; } @@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) phy->identify.device_type = phy->att_dev_info & PORT_DEV_TYPE_MASK; - if (phy->identify.device_type == SAS_END_DEV) + if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; - else if (phy->identify.device_type != NO_DEVICE) + else if (phy->identify.device_type != SAS_PHY_UNUSED) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; if (oob_done) @@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) { u32 dev; for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { - if (mvi->devices[dev].dev_type == NO_DEVICE) { + if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { mvi->devices[dev].device_id = dev; return &mvi->devices[dev]; } @@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev) u32 id = mvi_dev->device_id; memset(mvi_dev, 0, sizeof(*mvi_dev)); mvi_dev->device_id = id; - mvi_dev->dev_type = NO_DEVICE; + mvi_dev->dev_type = SAS_PHY_UNUSED; mvi_dev->dev_status = MVS_DEV_NORMAL; mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; } @@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) { int rc; struct sas_phy *phy = sas_get_local_phy(dev); - int reset_type = (dev->dev_type == SATA_DEV || + int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; rc = sas_phy_reset(phy, reset_type); sas_put_local_phy(phy); @@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task) } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { - if (SATA_DEV == dev->dev_type) { + if (SAS_SATA_DEV == dev->dev_type) { struct mvs_slot_info *slot = task->lldd_task; u32 slot_idx = (u32)(slot - mvi->slot_info); mv_dprintk("mvs_abort_task() mvi=%p task=%p " diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 9f3cc13a5ce7..60e2fb7f2dca 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch; extern struct kmem_cache *mvs_task_list_cache; #define DEV_IS_EXPANDER(type) \ - ((type == EDGE_DEV) || (type == FANOUT_DEV)) + ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) #define bit(n) ((u64)1 << n) @@ -241,7 +241,7 @@ struct mvs_phy { struct mvs_device { struct list_head dev_entry; - enum sas_dev_type dev_type; + enum sas_device_type dev_type; struct mvs_info *mvi_info; struct domain_device *sas_device; struct timer_list timer; diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile index 52f04296171c..ce4cd87c7c66 100644 --- a/drivers/scsi/pm8001/Makefile +++ b/drivers/scsi/pm8001/Makefile @@ -4,9 +4,10 @@ # Copyright (C) 2008-2009 USI Co., Ltd. -obj-$(CONFIG_SCSI_PM8001) += pm8001.o -pm8001-y += pm8001_init.o \ +obj-$(CONFIG_SCSI_PM8001) += pm80xx.o +pm80xx-y += pm8001_init.o \ pm8001_sas.o \ pm8001_ctl.o \ - pm8001_hwi.o + pm8001_hwi.o \ + pm80xx_hwi.o diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 45bc197bc22f..d99f41c2ca13 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", - pm8001_ha->main_cfg_tbl.interface_rev); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev); + } else { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev); + } } static DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); @@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev)); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev)); + } else { + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev)); + } } static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); /** @@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", - pm8001_ha->main_cfg_tbl.max_out_io); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io); + } else { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io); + } } static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); /** @@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%04d\n", - (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16)); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16) + ); + } else { + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16) + ); + } } static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); /** @@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%04d\n", - pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF + ); + } else { + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF + ); + } } static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); @@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev, struct Scsi_Host *shost = class_to_shost(cdev); struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25; + /* fe000000 means supports SAS2.1 */ + if (pm8001_ha->chip_id == chip_8001) + mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag & + 0xfe000000)>>25; + else + /* fe000000 means supports SAS2.1 */ + mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag & + 0xfe000000)>>25; return show_sas_spec_support_status(mode, buf); } static DEVICE_ATTR(sas_spec_support, S_IRUGO, @@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) goto out; } payload = (struct pm8001_ioctl_payload *)ioctlbuffer; - memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data, + memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data, pm8001_ha->fw_image->size); payload->length = pm8001_ha->fw_image->size; payload->id = 0; + payload->minor_function = 0x1; pm8001_ha->nvmd_completion = &completion; ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); wait_for_completion(&completion); @@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) payload->length = 1024*16; payload->id = 0; fwControl = - (struct fw_control_info *)payload->func_specific; + (struct fw_control_info *)&payload->func_specific; fwControl->len = IOCTL_BUF_SIZE; /* IN */ fwControl->size = partitionSize + HEADER_LEN;/* IN */ fwControl->retcode = 0;/* OUT */ diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index c3d20c8d4abe..479c5a7a863a 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -43,9 +43,12 @@ enum chip_flavors { chip_8001, + chip_8008, + chip_8009, + chip_8018, + chip_8019 }; -#define USI_MAX_MEMCNT 9 -#define PM8001_MAX_DMA_SG SG_ALL + enum phy_speed { PHY_SPEED_15 = 0x01, PHY_SPEED_30 = 0x02, @@ -69,23 +72,34 @@ enum port_type { #define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */ #define PM8001_MAX_INB_NUM 1 #define PM8001_MAX_OUTB_NUM 1 +#define PM8001_MAX_SPCV_INB_NUM 1 +#define PM8001_MAX_SPCV_OUTB_NUM 4 #define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */ +/* Inbound/Outbound queue size */ +#define IOMB_SIZE_SPC 64 +#define IOMB_SIZE_SPCV 128 + /* unchangeable hardware details */ -#define PM8001_MAX_PHYS 8 /* max. possible phys */ -#define PM8001_MAX_PORTS 8 /* max. possible ports */ -#define PM8001_MAX_DEVICES 1024 /* max supported device */ +#define PM8001_MAX_PHYS 16 /* max. possible phys */ +#define PM8001_MAX_PORTS 16 /* max. possible ports */ +#define PM8001_MAX_DEVICES 2048 /* max supported device */ +#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ +#define USI_MAX_MEMCNT_BASE 5 +#define IB (USI_MAX_MEMCNT_BASE + 1) +#define CI (IB + PM8001_MAX_SPCV_INB_NUM) +#define OB (CI + PM8001_MAX_SPCV_INB_NUM) +#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM) +#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM) +#define PM8001_MAX_DMA_SG SG_ALL enum memory_region_num { AAP1 = 0x0, /* application acceleration processor */ IOP, /* IO processor */ - CI, /* consumer index */ - PI, /* producer index */ - IB, /* inbound queue */ - OB, /* outbound queue */ NVMD, /* NVM device */ DEV_MEM, /* memory for devices */ CCB_MEM, /* memory for command control block */ + FW_FLASH /* memory for fw flash update */ }; #define PM8001_EVENT_LOG_SIZE (128 * 1024) diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index b8dd05074abb..69dd49c05f1e 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -50,32 +50,39 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; - pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00); - pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04); - pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08); - pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C); - pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10); - pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); - pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); - pm8001_ha->main_cfg_tbl.inbound_queue_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.signature = + pm8001_mr32(address, 0x00); + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev = + pm8001_mr32(address, 0x04); + pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev = + pm8001_mr32(address, 0x08); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io = + pm8001_mr32(address, 0x0C); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl = + pm8001_mr32(address, 0x10); + pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag = + pm8001_mr32(address, 0x14); + pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset = + pm8001_mr32(address, 0x18); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset = pm8001_mr32(address, MAIN_IBQ_OFFSET); - pm8001_ha->main_cfg_tbl.outbound_queue_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset = pm8001_mr32(address, MAIN_OBQ_OFFSET); - pm8001_ha->main_cfg_tbl.hda_mode_flag = + pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag = pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); /* read analog Setting offset from the configuration table */ - pm8001_ha->main_cfg_tbl.anolog_setup_table_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset = pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); /* read Error Dump Offset and Length */ - pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); - pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); - pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); - pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); } @@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->general_stat_tbl_addr; - pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00); - pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04); - pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08); - pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C); - pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10); - pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14); - pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18); - pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C); - pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20); - pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24); - pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28); - pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C); - pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30); - pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34); - pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38); - pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C); - pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40); - pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44); - pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48); - pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C); - pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50); - pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54); - pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58); - pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C); - pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60); + pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate = + pm8001_mr32(address, 0x00); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 = + pm8001_mr32(address, 0x04); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 = + pm8001_mr32(address, 0x08); + pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt = + pm8001_mr32(address, 0x0C); + pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt = + pm8001_mr32(address, 0x10); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd = + pm8001_mr32(address, 0x14); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] = + pm8001_mr32(address, 0x18); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] = + pm8001_mr32(address, 0x1C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] = + pm8001_mr32(address, 0x20); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] = + pm8001_mr32(address, 0x24); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] = + pm8001_mr32(address, 0x28); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] = + pm8001_mr32(address, 0x2C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] = + pm8001_mr32(address, 0x30); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] = + pm8001_mr32(address, 0x34); + pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val = + pm8001_mr32(address, 0x38); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] = + pm8001_mr32(address, 0x3C); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] = + pm8001_mr32(address, 0x40); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] = + pm8001_mr32(address, 0x44); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] = + pm8001_mr32(address, 0x48); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] = + pm8001_mr32(address, 0x4C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] = + pm8001_mr32(address, 0x50); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] = + pm8001_mr32(address, 0x54); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] = + pm8001_mr32(address, 0x58); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] = + pm8001_mr32(address, 0x5C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] = + pm8001_mr32(address, 0x60); } /** @@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) */ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { - int inbQ_num = 1; int i; void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; - for (i = 0; i < inbQ_num; i++) { + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { u32 offset = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); @@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) */ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { - int outbQ_num = 1; int i; void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; - for (i = 0; i < outbQ_num; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { u32 offset = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); @@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) */ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) { - int qn = 1; int i; u32 offsetib, offsetob; void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; - pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0; - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0; - - pm8001_ha->main_cfg_tbl.upper_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0; + + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; - pm8001_ha->main_cfg_tbl.lower_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; - pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.event_log_option = 0x01; - pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_hi; - pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_lo; - pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01; - pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; - for (i = 0; i < qn; i++) { + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[IB].phys_addr_hi; + pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[IB].phys_addr_lo; + pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; pm8001_ha->inbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[IB].total_len; + pm8001_ha->memoryMap.region[IB + i].total_len; pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = - pm8001_ha->memoryMap.region[CI].phys_addr_hi; + pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = - pm8001_ha->memoryMap.region[CI].phys_addr_lo; + pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].ci_virt = - pm8001_ha->memoryMap.region[CI].virt_ptr; + pm8001_ha->memoryMap.region[CI + i].virt_ptr; offsetib = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(addressib, @@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; } - for (i = 0; i < qn; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[OB].phys_addr_hi; + pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[OB].phys_addr_lo; + pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; pm8001_ha->outbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[OB].total_len; + pm8001_ha->memoryMap.region[OB + i].total_len; pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = - pm8001_ha->memoryMap.region[PI].phys_addr_hi; + pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = - pm8001_ha->memoryMap.region[PI].phys_addr_lo; + pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = - 0 | (10 << 16) | (0 << 24); + 0 | (10 << 16) | (i << 24); pm8001_ha->outbnd_q_tbl[i].pi_virt = - pm8001_ha->memoryMap.region[PI].virt_ptr; + pm8001_ha->memoryMap.region[PI + i].virt_ptr; offsetob = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(addressob, @@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; pm8001_mw32(address, 0x24, - pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd); pm8001_mw32(address, 0x28, - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3); pm8001_mw32(address, 0x2C, - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7); pm8001_mw32(address, 0x30, - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3); pm8001_mw32(address, 0x34, - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7); pm8001_mw32(address, 0x38, - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid0_3); pm8001_mw32(address, 0x3C, - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid4_7); pm8001_mw32(address, 0x40, - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid0_3); pm8001_mw32(address, 0x44, - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid4_7); pm8001_mw32(address, 0x48, - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid0_3); pm8001_mw32(address, 0x4C, - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid4_7); pm8001_mw32(address, 0x50, - pm8001_ha->main_cfg_tbl.upper_event_log_addr); + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr); pm8001_mw32(address, 0x54, - pm8001_ha->main_cfg_tbl.lower_event_log_addr); - pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size); - pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option); + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr); + pm8001_mw32(address, 0x58, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size); + pm8001_mw32(address, 0x5C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option); pm8001_mw32(address, 0x60, - pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr); + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr); pm8001_mw32(address, 0x64, - pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr); - pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size); + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr); + pm8001_mw32(address, 0x68, + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size); pm8001_mw32(address, 0x6C, - pm8001_ha->main_cfg_tbl.iop_event_log_option); + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option); pm8001_mw32(address, 0x70, - pm8001_ha->main_cfg_tbl.fatal_err_interrupt); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt); } /** @@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) */ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) { + u8 i = 0; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + /* 8081 controllers need BAR shift to access MPI space + * as this is shared with BIOS data */ + if (deviceid == 0x8081) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE)); + return -1; + } + } /* check the firmware status */ if (-1 == check_fw_ready(pm8001_ha)) { PM8001_FAIL_DBG(pm8001_ha, @@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) read_outbnd_queue_table(pm8001_ha); /* update main config table ,inbound table and outbound table */ update_main_config_table(pm8001_ha); - update_inbnd_queue_table(pm8001_ha, 0); - update_outbnd_queue_table(pm8001_ha, 0); - mpi_set_phys_g3_with_ssc(pm8001_ha, 0); - /* 7->130ms, 34->500ms, 119->1.5s */ - mpi_set_open_retry_interval_reg(pm8001_ha, 119); + for (i = 0; i < PM8001_MAX_INB_NUM; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) + update_outbnd_queue_table(pm8001_ha, i); + /* 8081 controller donot require these operations */ + if (deviceid != 0x8081) { + mpi_set_phys_g3_with_ssc(pm8001_ha, 0); + /* 7->130ms, 34->500ms, 119->1.5s */ + mpi_set_open_retry_interval_reg(pm8001_ha, 119); + } /* notify firmware update finished and check initialization status */ if (0 == mpi_init_check(pm8001_ha)) { PM8001_INIT_DBG(pm8001_ha, @@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) u32 max_wait_count; u32 value; u32 gst_len_mpistate; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + if (deviceid == 0x8081) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE)); + return -1; + } + } init_pci_device_addresses(pm8001_ha); /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the table is stop */ @@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all * the FW register status to the originated status. * @pm8001_ha: our hba card information - * @signature: signature in host scratch pad0 register. */ static int -pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature) +pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) { u32 regVal, toggleVal; u32 max_wait_count; u32 regVal1, regVal2, regVal3; + u32 signature = 0x252acbcd; /* for host scratch pad0 */ unsigned long flags; /* step1: Check FW is ready for soft reset */ @@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) * pm8001_chip_iounmap - which maped when initialized. * @pm8001_ha: our hba card information */ -static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) { s8 bar, logical = 0; for (bar = 0; bar < 6; bar++) { @@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, * @pm8001_ha: our hba card information */ static void -pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); @@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) * @pm8001_ha: our hba card information */ static void -pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); @@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) } /** - * mpi_msg_free_get- get the free message buffer for transfer inbound queue. + * pm8001_mpi_msg_free_get - get the free message buffer for transfer + * inbound queue. * @circularQ: the inbound queue we want to transfer to HBA. * @messageSize: the message size of this transfer, normally it is 64 bytes * @messagePtr: the pointer to message. */ -static int mpi_msg_free_get(struct inbound_queue_table *circularQ, +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, u16 messageSize, void **messagePtr) { u32 offset, consumer_index; @@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, u8 bcCount = 1; /* only support single buffer */ /* Checks is the requested message size can be allocated in this queue*/ - if (messageSize > 64) { + if (messageSize > IOMB_SIZE_SPCV) { *messagePtr = NULL; return -1; } @@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, return -1; } /* get memory IOMB buffer address */ - offset = circularQ->producer_idx * 64; + offset = circularQ->producer_idx * messageSize; /* increment to next bcCount element */ circularQ->producer_idx = (circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE; @@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, } /** - * mpi_build_cmd- build the message queue for transfer, update the PI to FW - * to tell the fw to get this message from IOMB. + * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to + * FW to tell the fw to get this message from IOMB. * @pm8001_ha: our hba card information * @circularQ: the inbound queue we want to transfer to HBA. * @opCode: the operation code represents commands which LLDD and fw recognized. * @payload: the command payload of each operation command. */ -static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, struct inbound_queue_table *circularQ, - u32 opCode, void *payload) + u32 opCode, void *payload, u32 responseQueue) { u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; - u32 responseQueue = 0; void *pMessage; - if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { + if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, + &pMessage) < 0) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("No free mpi buffer\n")); return -1; } BUG_ON(!payload); /*Copy to the payload*/ - memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); + memcpy(pMessage, payload, (pm8001_ha->iomb_size - + sizeof(struct mpi_msg_hdr))); /*Build the header*/ Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) @@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, circularQ->pi_offset, circularQ->producer_idx); PM8001_IO_DBG(pm8001_ha, - pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx, - circularQ->consumer_index)); + pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n", + responseQueue, opCode, circularQ->producer_idx, + circularQ->consumer_index)); return 0; } -static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, struct outbound_queue_table *circularQ, u8 bc) { u32 producer_index; @@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + - circularQ->consumer_idx * 64); + circularQ->consumer_idx * pm8001_ha->iomb_size); if (pOutBoundMsgHeader != msgHeader) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("consumer_idx = %d msgHeader = %p\n", @@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, } /** - * mpi_msg_consume- get the MPI message from outbound queue message table. + * pm8001_mpi_msg_consume- get the MPI message from outbound queue + * message table. * @pm8001_ha: our hba card information * @circularQ: the outbound queue table. * @messagePtr1: the message contents of this outbound message. * @pBC: the message size. */ -static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, struct outbound_queue_table *circularQ, void **messagePtr1, u8 *pBC) { @@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, /*Get the pointer to the circular queue buffer element*/ msgHeader = (struct mpi_msg_hdr *) (circularQ->base_virt + - circularQ->consumer_idx * 64); + circularQ->consumer_idx * pm8001_ha->iomb_size); /* read header */ header_tmp = pm8001_read_32(msgHeader); msgHeader_tmp = cpu_to_le32(header_tmp); @@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, return MPI_IO_STATUS_BUSY; } -static void pm8001_work_fn(struct work_struct *work) +void pm8001_work_fn(struct work_struct *work) { struct pm8001_work *pw = container_of(work, struct pm8001_work, work); struct pm8001_device *pm8001_dev; @@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work) pm8001_dev = pw->data; /* Most stash device structure */ if ((pm8001_dev == NULL) || ((pw->handler != IO_XFER_ERROR_BREAK) - && (pm8001_dev->dev_type == NO_DEVICE))) { + && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) { kfree(pw); return; } @@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work) } break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: dev = pm8001_dev->sas_device; - pm8001_I_T_nexus_reset(dev); + pm8001_I_T_nexus_event_handler(dev); break; case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: dev = pm8001_dev->sas_device; @@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work) kfree(pw); } -static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, int handler) { struct pm8001_work *pw; @@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, return ret; } +static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_ABORT; + int ret; + + if (!pm8001_ha_dev) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n")); + return; + } + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot " + "allocate task\n")); + return; + } + + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return; + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&task_abort, 0, sizeof(task_abort)); + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + task_abort.tag = cpu_to_le32(ccb_tag); + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + +} + +static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + struct sata_start_req sata_cmd; + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct host_to_dev_fis fis; + struct domain_device *dev; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate task !!!\n")); + return; + } + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate tag !!!\n")); + return; + } + + /* allocate domain device by ourselves as libsas + * is not going to provide any + */ + dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); + if (!dev) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Domain device cannot be allocated\n")); + sas_free_task(task); + return; + } else { + task->dev = dev; + task->dev->lldd_dev = pm8001_ha_dev; + } + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* construct read log FIS */ + memset(&fis, 0, sizeof(struct host_to_dev_fis)); + fis.fis_type = 0x27; + fis.flags = 0x80; + fis.command = ATA_CMD_READ_LOG_EXT; + fis.lbal = 0x10; + fis.sector_count = 0x1; + + sata_cmd.tag = cpu_to_le32(ccb_tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); + + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + +} + /** * mpi_ssp_completion- process the event that FW response to the SSP request. * @pm8001_ha: our hba card information @@ -1867,7 +2058,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) break; } PM8001_IO_DBG(pm8001_ha, - pm8001_printk("scsi_status = %x \n ", + pm8001_printk("scsi_status = %x\n ", psspPayload->ssp_resp_iu.status)); spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; @@ -2096,16 +2287,44 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) status = le32_to_cpu(psataPayload->status); tag = le32_to_cpu(psataPayload->tag); + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("tag null\n")); + return; + } ccb = &pm8001_ha->ccb_info[tag]; param = le32_to_cpu(psataPayload->param); - t = ccb->task; + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ccb null\n")); + return; + } + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task null\n")); + return; + } + + if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) + && unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + ts = &t->task_status; - pm8001_dev = ccb->device; - if (status) + if (!ts) { PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("sata IO status 0x%x\n", status)); - if (unlikely(!t || !t->lldd_task || !t->dev)) + pm8001_printk("ts null\n")); return; + } switch (status) { case IO_SUCCESS: @@ -2113,6 +2332,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; + /* check if response is for SEND READ LOG */ + if (pm8001_dev && + (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { + /* set new bit for abort_all */ + pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; + /* clear bit for read log */ + pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; + pm8001_send_abort_all(pm8001_ha, pm8001_dev); + /* Free the tag */ + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + return; + } } else { u8 len; ts->resp = SAS_TASK_COMPLETE; @@ -2424,6 +2656,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) unsigned long flags; ccb = &pm8001_ha->ccb_info[tag]; + + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("No CCB !!!. returning\n")); + } + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SATA EVENT 0x%x\n", event)); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension */ + if (pm8001_dev) + pm8001_send_read_log(pm8001_ha, pm8001_dev); + return; + } + + ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; pm8001_dev = ccb->device; if (event) @@ -2432,9 +2687,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; - PM8001_IO_DBG(pm8001_ha, - pm8001_printk("port_id = %x,device_id = %x\n", - port_id, dev_id)); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, dev_id, tag, event)); switch (event) { case IO_OVERFLOW: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); @@ -2822,8 +3077,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) } } -static void -mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) { struct set_dev_state_resp *pPayload = (struct set_dev_state_resp *)(piomb + 4); @@ -2843,8 +3098,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static void -mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct get_nvm_data_resp *pPayload = (struct get_nvm_data_resp *)(piomb + 4); @@ -2863,8 +3117,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static void -mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void +pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct fw_control_ex *fw_control_context; struct get_nvm_data_resp *pPayload = @@ -2925,7 +3179,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct local_phy_ctl_resp *pPayload = (struct local_phy_ctl_resp *)(piomb + 4); @@ -2954,7 +3208,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) * while receive a broadcast(change) primitive just tell the sas * layer to discover the changed domain rather than the whole domain. */ -static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) { struct pm8001_phy *phy = &pm8001_ha->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; @@ -2988,7 +3242,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) } /* Get the link rate speed */ -static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) { struct sas_phy *sas_phy = phy->sas_phy.phy; @@ -3025,7 +3279,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame * buffer. */ -static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr) { if (phy->sas_phy.frame_rcvd[0] == 0x34 @@ -3067,7 +3321,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, ((phyId & 0x0F) << 4) | (port_id & 0x0F)); payload.param0 = cpu_to_le32(param0); payload.param1 = cpu_to_le32(param1); - mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); } static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, @@ -3112,19 +3366,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, PHY_NOTIFY_ENABLE_SPINUP); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; case SAS_EDGE_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("expander device.\n")); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; case SAS_FANOUT_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("fanout expander device.\n")); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; default: PM8001_MSG_DBG(pm8001_ha, @@ -3179,7 +3433,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) " phy id = %d\n", port_id, phy_id)); port->port_state = portstate; port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); phy->phy_type |= PORT_TYPE_SATA; phy->phy_attached = 1; phy->sas_phy.oob_mode = SATA_OOB_MODE; @@ -3189,7 +3443,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) sizeof(struct dev_to_host_fis)); phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; - phy->identify.device_type = SATA_DEV; + phy->identify.device_type = SAS_SATA_DEV; pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); pm8001_bytes_dmaed(pm8001_ha, phy_id); @@ -3260,7 +3514,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_reg_resp -process register device ID response. + * pm8001_mpi_reg_resp -process register device ID response. * @pm8001_ha: our hba card information * @piomb: IO message buffer * @@ -3269,7 +3523,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) * has assigned, from now,inter-communication with FW is no longer using the * SAS address, use device ID which FW assigned. */ -static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; @@ -3331,7 +3585,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; @@ -3347,8 +3601,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int -mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +/** + * fw_flash_update_resp - Response from FW for flash update command. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) { u32 status; struct fw_control_ex fw_control_context; @@ -3403,10 +3662,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) break; } ccb->fw_control_context->fw_control->retcode = status; - pci_free_consistent(pm8001_ha->pdev, - fw_control_context.len, - fw_control_context.virtAddr, - fw_control_context.phys_addr); complete(pm8001_ha->nvmd_completion); ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; @@ -3414,8 +3669,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int -mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) { u32 status; int i; @@ -3431,8 +3685,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) return 0; } -static int -mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct sas_task *t; struct pm8001_ccb_info *ccb; @@ -3440,19 +3693,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 status ; u32 tag, scp; struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; struct task_abort_resp *pPayload = (struct task_abort_resp *)(piomb + 4); status = le32_to_cpu(pPayload->status); tag = le32_to_cpu(pPayload->tag); + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TAG NULL. RETURNING !!!")); + return -1; + } + scp = le32_to_cpu(pPayload->scp); ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; - PM8001_IO_DBG(pm8001_ha, - pm8001_printk(" status = 0x%x\n", status)); - if (t == NULL) + pm8001_dev = ccb->device; /* retrieve device */ + + if (!t) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TASK NULL. RETURNING !!!")); return -1; + } ts = &t->task_status; if (status != 0) PM8001_FAIL_DBG(pm8001_ha, @@ -3476,7 +3739,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb(); - t->task_done(t); + + if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t) { + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + /* clear the flag */ + pm8001_dev->id &= 0xBFFFFFFF; + } else + t->task_done(t); + return 0; } @@ -3727,17 +3998,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_LOCAL_PHY_CNTRL: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); - mpi_local_phy_ctl(pm8001_ha, piomb); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); break; case OPC_OUB_DEV_REGIST: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_DEV_REGIST\n")); - mpi_reg_resp(pm8001_ha, piomb); + pm8001_mpi_reg_resp(pm8001_ha, piomb); break; case OPC_OUB_DEREG_DEV: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister the device\n")); - mpi_dereg_resp(pm8001_ha, piomb); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEV_HANDLE: PM8001_MSG_DBG(pm8001_ha, @@ -3775,7 +4046,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_FW_FLASH_UPDATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); - mpi_fw_flash_update_resp(pm8001_ha, piomb); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); break; case OPC_OUB_GPIO_RESPONSE: PM8001_MSG_DBG(pm8001_ha, @@ -3788,17 +4059,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_GENERAL_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); - mpi_general_event(pm8001_ha, piomb); + pm8001_mpi_general_event(pm8001_ha, piomb); break; case OPC_OUB_SSP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SATA_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SAS_DIAG_MODE_START_END: PM8001_MSG_DBG(pm8001_ha, @@ -3823,17 +4094,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SMP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); - mpi_get_nvmd_resp(pm8001_ha, piomb); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_SET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); - mpi_set_nvmd_resp(pm8001_ha, piomb); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_DEVICE_HANDLE_REMOVAL: PM8001_MSG_DBG(pm8001_ha, @@ -3842,7 +4113,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); - mpi_set_dev_state_resp(pm8001_ha, piomb); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, @@ -3864,7 +4135,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) } } -static int process_oq(struct pm8001_hba_info *pm8001_ha) +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; void *pMsg1 = NULL; @@ -3873,14 +4144,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha) unsigned long flags; spin_lock_irqsave(&pm8001_ha->lock, flags); - circularQ = &pm8001_ha->outbnd_q_tbl[0]; + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; do { - ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); if (MPI_IO_STATUS_SUCCESS == ret) { /* process the outbound message */ process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); /* free the message from the outbound circular buffer */ - mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); } if (MPI_IO_STATUS_BUSY == ret) { /* Update the producer index from SPC */ @@ -3903,7 +4175,7 @@ static const u8 data_dir_flags[] = { [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ }; -static void +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) { int i; @@ -3978,7 +4250,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, smp_cmd.long_smp_req.long_resp_size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); - mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); return 0; err_out_2: @@ -4042,7 +4314,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0); return ret; } @@ -4060,6 +4332,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 ATAP = 0x0; u32 dir; struct inbound_queue_table *circularQ; + unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -4080,8 +4353,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); } } - if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); ncg_tag = hdr_tag; + } dir = data_dir_flags[task->data_dir] << 8; sata_cmd.tag = cpu_to_le32(tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); @@ -4112,7 +4387,55 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); + + /* Check for read log for failed drive and return */ + if (sata_cmd.sata_fis.command == 0x2f) { + if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || + (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || + (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { + struct task_status_struct *ts; + + pm8001_ha_dev->id &= 0xDFFFFFFF; + ts = &task->task_status; + + spin_lock_irqsave(&task->task_state_lock, flags); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & + SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p resp 0x%x " + " stat 0x%x but aborted by upper layer " + "\n", task, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + } else if (task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } else if (!task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } + } + } + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); return ret; } @@ -4142,12 +4465,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | phy_id); - payload.sas_identify.dev_type = SAS_END_DEV; + payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; memcpy(payload.sas_identify.sas_addr, pm8001_ha->sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; - ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); return ret; } @@ -4157,7 +4480,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) * @num: the inbound queue number * @phy_id: the phy id which we wanted to start up. */ -static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_stop_req payload; @@ -4169,12 +4492,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); - ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); return ret; } /** - * see comments on mpi_reg_resp. + * see comments on pm8001_mpi_reg_resp. */ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 flag) @@ -4204,11 +4527,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, if (flag == 1) stp_sspsmp_sata = 0x02; /*direct attached sata */ else { - if (pm8001_dev->dev_type == SATA_DEV) + if (pm8001_dev->dev_type == SAS_SATA_DEV) stp_sspsmp_sata = 0x00; /* stp*/ - else if (pm8001_dev->dev_type == SAS_END_DEV || - pm8001_dev->dev_type == EDGE_DEV || - pm8001_dev->dev_type == FANOUT_DEV) + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) stp_sspsmp_sata = 0x01; /*ssp or smp*/ } if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) @@ -4228,14 +4551,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, SAS_ADDR_SIZE); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } /** - * see comments on mpi_reg_resp. + * see comments on pm8001_mpi_reg_resp. */ -static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id) { struct dereg_dev_req payload; @@ -4249,7 +4572,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, payload.device_id = cpu_to_le32(device_id); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister device device_id = %d\n", device_id)); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } @@ -4272,7 +4595,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(1); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } @@ -4296,11 +4619,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) * @stat: stat. */ static irqreturn_t -pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) { - pm8001_chip_interrupt_disable(pm8001_ha); - process_oq(pm8001_ha); - pm8001_chip_interrupt_enable(pm8001_ha); + pm8001_chip_interrupt_disable(pm8001_ha, vec); + process_oq(pm8001_ha, vec); + pm8001_chip_interrupt_enable(pm8001_ha, vec); return IRQ_HANDLED; } @@ -4322,7 +4645,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, task_abort.device_id = cpu_to_le32(dev_id); task_abort.tag = cpu_to_le32(cmd_tag); } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); return ret; } @@ -4331,16 +4654,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, * @task: the task we wanted to aborted. * @flag: the abort flag. */ -static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) { u32 opc, device_id; int rc = TMF_RESP_FUNC_FAILED; - PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag" - " = %x", cmd_tag, task_tag)); - if (pm8001_dev->dev_type == SAS_END_DEV) + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("cmd_tag = %x, abort task tag = 0x%x", + cmd_tag, task_tag)); + if (pm8001_dev->dev_type == SAS_END_DEVICE) opc = OPC_INB_SSP_ABORT; - else if (pm8001_dev->dev_type == SATA_DEV) + else if (pm8001_dev->dev_type == SAS_SATA_DEV) opc = OPC_INB_SATA_ABORT; else opc = OPC_INB_SMP_ABORT;/* SMP */ @@ -4358,7 +4682,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, * @ccb: the ccb information. * @tmf: task management function. */ -static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) { struct sas_task *task = ccb->task; @@ -4376,11 +4700,11 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); circularQ = &pm8001_ha->inbnd_q_tbl[0]; - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0); return ret; } -static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_GET_NVMD_DATA; @@ -4397,7 +4721,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; - fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; + fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific; fw_control_context->len = ioctl_payload->length; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&nvmd_req, 0, sizeof(nvmd_req)); @@ -4456,11 +4780,11 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); return rc; } -static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_SET_NVMD_DATA; @@ -4479,7 +4803,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, return -ENOMEM; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, - ioctl_payload->func_specific, + &ioctl_payload->func_specific, ioctl_payload->length); memset(&nvmd_req, 0, sizeof(nvmd_req)); rc = pm8001_tag_alloc(pm8001_ha, &tag); @@ -4536,7 +4860,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); return rc; } @@ -4545,7 +4869,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, * @pm8001_ha: our hba card information. * @fw_flash_updata_info: firmware flash update param */ -static int +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, void *fw_flash_updata_info, u32 tag) { @@ -4567,11 +4891,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); payload.sgl_addr_hi = cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } -static int +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, void *payload) { @@ -4581,29 +4905,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, int rc; u32 tag; struct pm8001_ccb_info *ccb; - void *buffer = NULL; - dma_addr_t phys_addr; - u32 phys_addr_hi; - u32 phys_addr_lo; + void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr; + dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr; struct pm8001_ioctl_payload *ioctl_payload = payload; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; - fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; - if (fw_control->len != 0) { - if (pm8001_mem_alloc(pm8001_ha->pdev, - (void **)&buffer, - &phys_addr, - &phys_addr_hi, - &phys_addr_lo, - fw_control->len, 0) != 0) { - PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("Mem alloc failure\n")); - kfree(fw_control_context); - return -ENOMEM; - } - } + fw_control = (struct fw_control_info *)&ioctl_payload->func_specific; memcpy(buffer, fw_control->buffer, fw_control->len); flash_update_info.sgl.addr = cpu_to_le64(phys_addr); flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); @@ -4613,6 +4922,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, flash_update_info.total_image_len = fw_control->size; fw_control_context->fw_control = fw_control; fw_control_context->virtAddr = buffer; + fw_control_context->phys_addr = phys_addr; fw_control_context->len = fw_control->len; rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) { @@ -4627,7 +4937,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, return rc; } -static int +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 state) { @@ -4648,7 +4958,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.device_id = cpu_to_le32(pm8001_dev->device_id); payload.nds = cpu_to_le32(state); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } @@ -4673,7 +4983,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) payload.SSAHOLT = cpu_to_le32(0xd << 25); payload.sata_hol_tmo = cpu_to_le32(80); payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } @@ -4706,4 +5016,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = { .set_dev_state_req = pm8001_chip_set_dev_state_req, .sas_re_init_req = pm8001_chip_sas_re_initialization, }; - diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h index d437309cb1e1..d7c1e2034226 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.h +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -131,6 +131,8 @@ #define LINKRATE_30 (0x02 << 8) #define LINKRATE_60 (0x04 << 8) +/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ +#define GSM_SM_BASE 0x4F0000 struct mpi_msg_hdr{ __le32 header; /* Bits [11:0] - Message operation code */ /* Bits [15:12] - Message Category */ @@ -298,7 +300,7 @@ struct local_phy_ctl_resp { #define OP_BITS 0x0000FF00 -#define ID_BITS 0x0000000F +#define ID_BITS 0x000000FF /* * brief the data structure of PORT Control Command diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 3d5e522e00fc..e4b9bc7f5410 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -44,8 +44,16 @@ static struct scsi_transport_template *pm8001_stt; +/** + * chip info structure to identify chip key functionality as + * encryption available/not, no of ports, hw specific function ref + */ static const struct pm8001_chip_info pm8001_chips[] = { - [chip_8001] = { 8, &pm8001_8001_dispatch,}, + [chip_8001] = {0, 8, &pm8001_8001_dispatch,}, + [chip_8008] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8009] = {1, 8, &pm8001_80xx_dispatch,}, + [chip_8018] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8019] = {1, 16, &pm8001_80xx_dispatch,}, }; static int pm8001_id; @@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) } #ifdef PM8001_USE_TASKLET + +/** + * tasklet for 64 msi-x interrupt handler + * @opaque: the passed general host adapter struct + * Note: pm8001_tasklet is common for pm8001 & pm80xx + */ static void pm8001_tasklet(unsigned long opaque) { struct pm8001_hba_info *pm8001_ha; + u32 vec; pm8001_ha = (struct pm8001_hba_info *)opaque; if (unlikely(!pm8001_ha)) BUG_ON(1); - PM8001_CHIP_DISP->isr(pm8001_ha); + vec = pm8001_ha->int_vector; + PM8001_CHIP_DISP->isr(pm8001_ha, vec); +} +#endif + +static struct pm8001_hba_info *outq_to_hba(u8 *outq) +{ + return container_of((outq - *outq), struct pm8001_hba_info, outq[0]); } + +/** + * pm8001_interrupt_handler_msix - main MSIX interrupt handler. + * It obtains the vector number and calls the equivalent bottom + * half or services directly. + * @opaque: the passed outbound queue/vector. Host structure is + * retrieved from the same. + */ +static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) +{ + struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque); + u8 outq = *(u8 *)opaque; + irqreturn_t ret = IRQ_HANDLED; + if (unlikely(!pm8001_ha)) + return IRQ_NONE; + if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) + return IRQ_NONE; + pm8001_ha->int_vector = outq; +#ifdef PM8001_USE_TASKLET + tasklet_schedule(&pm8001_ha->tasklet); +#else + ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq); #endif + return ret; +} +/** + * pm8001_interrupt_handler_intx - main INTx interrupt handler. + * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure. + */ - /** - * pm8001_interrupt - when HBA originate a interrupt,we should invoke this - * dispatcher to handle each case. - * @irq: irq number. - * @opaque: the passed general host adapter struct - */ -static irqreturn_t pm8001_interrupt(int irq, void *opaque) +static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) { struct pm8001_hba_info *pm8001_ha; irqreturn_t ret = IRQ_HANDLED; - struct sas_ha_struct *sha = opaque; + struct sas_ha_struct *sha = dev_id; pm8001_ha = sha->lldd_ha; if (unlikely(!pm8001_ha)) return IRQ_NONE; if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) return IRQ_NONE; + + pm8001_ha->int_vector = 0; #ifdef PM8001_USE_TASKLET tasklet_schedule(&pm8001_ha->tasklet); #else - ret = PM8001_CHIP_DISP->isr(pm8001_ha); + ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); #endif return ret; } @@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque) * @pm8001_ha:our hba structure. * */ -static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) +static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, + const struct pci_device_id *ent) { int i; spin_lock_init(&pm8001_ha->lock); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("pm8001_alloc: PHY:%x\n", + pm8001_ha->chip->n_phy)); for (i = 0; i < pm8001_ha->chip->n_phy; i++) { pm8001_phy_init(pm8001_ha, i); pm8001_ha->port[i].wide_port_phymap = 0; @@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; pm8001_ha->memoryMap.region[IOP].alignment = 32; - /* MPI Memory region 3 for consumer Index of inbound queues */ - pm8001_ha->memoryMap.region[CI].num_elements = 1; - pm8001_ha->memoryMap.region[CI].element_size = 4; - pm8001_ha->memoryMap.region[CI].total_len = 4; - pm8001_ha->memoryMap.region[CI].alignment = 4; - - /* MPI Memory region 4 for producer Index of outbound queues */ - pm8001_ha->memoryMap.region[PI].num_elements = 1; - pm8001_ha->memoryMap.region[PI].element_size = 4; - pm8001_ha->memoryMap.region[PI].total_len = 4; - pm8001_ha->memoryMap.region[PI].alignment = 4; - - /* MPI Memory region 5 inbound queues */ - pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[IB].element_size = 64; - pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[IB].alignment = 64; - - /* MPI Memory region 6 outbound queues */ - pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[OB].element_size = 64; - pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[OB].alignment = 64; + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + /* MPI Memory region 3 for consumer Index of inbound queues */ + pm8001_ha->memoryMap.region[CI+i].num_elements = 1; + pm8001_ha->memoryMap.region[CI+i].element_size = 4; + pm8001_ha->memoryMap.region[CI+i].total_len = 4; + pm8001_ha->memoryMap.region[CI+i].alignment = 4; + + if ((ent->driver_data) != chip_8001) { + /* MPI Memory region 5 inbound queues */ + pm8001_ha->memoryMap.region[IB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[IB+i].element_size = 128; + pm8001_ha->memoryMap.region[IB+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[IB+i].alignment = 128; + } else { + pm8001_ha->memoryMap.region[IB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[IB+i].element_size = 64; + pm8001_ha->memoryMap.region[IB+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[IB+i].alignment = 64; + } + } + + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + /* MPI Memory region 4 for producer Index of outbound queues */ + pm8001_ha->memoryMap.region[PI+i].num_elements = 1; + pm8001_ha->memoryMap.region[PI+i].element_size = 4; + pm8001_ha->memoryMap.region[PI+i].total_len = 4; + pm8001_ha->memoryMap.region[PI+i].alignment = 4; + + if (ent->driver_data != chip_8001) { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[OB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[OB+i].element_size = 128; + pm8001_ha->memoryMap.region[OB+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[OB+i].alignment = 128; + } else { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[OB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[OB+i].element_size = 64; + pm8001_ha->memoryMap.region[OB+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[OB+i].alignment = 64; + } + } /* Memory region write DMA*/ pm8001_ha->memoryMap.region[NVMD].num_elements = 1; pm8001_ha->memoryMap.region[NVMD].element_size = 4096; @@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * sizeof(struct pm8001_ccb_info); + /* Memory region for fw flash */ + pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; + for (i = 0; i < USI_MAX_MEMCNT; i++) { if (pm8001_mem_alloc(pm8001_ha->pdev, &pm8001_ha->memoryMap.region[i].virt_ptr, @@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; for (i = 0; i < PM8001_MAX_DEVICES; i++) { - pm8001_ha->devices[i].dev_type = NO_DEVICE; + pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; pm8001_ha->devices[i].id = i; pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; pm8001_ha->devices[i].running_req = 0; @@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) ioremap(pm8001_ha->io_mem[logicalBar].membase, pm8001_ha->io_mem[logicalBar].memsize); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("PCI: bar %d, logicalBar %d " - "virt_addr=%lx,len=%d\n", bar, logicalBar, - (unsigned long) - pm8001_ha->io_mem[logicalBar].memvirtaddr, + pm8001_printk("PCI: bar %d, logicalBar %d ", + bar, logicalBar)); + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "base addr %llx virt_addr=%llx len=%d\n", + (u64)pm8001_ha->io_mem[logicalBar].membase, + (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr, pm8001_ha->io_mem[logicalBar].memsize)); } else { pm8001_ha->io_mem[logicalBar].membase = 0; @@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) * @shost: scsi host struct which has been initialized before. */ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, - u32 chip_id, - struct Scsi_Host *shost) + const struct pci_device_id *ent, + struct Scsi_Host *shost) + { struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); @@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->pdev = pdev; pm8001_ha->dev = &pdev->dev; - pm8001_ha->chip_id = chip_id; + pm8001_ha->chip_id = ent->driver_data; pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; pm8001_ha->irq = pdev->irq; pm8001_ha->sas = sha; @@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->id = pm8001_id++; pm8001_ha->logging_level = 0x01; sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); + /* IOMB size is 128 for 8088/89 controllers */ + if (pm8001_ha->chip_id != chip_8001) + pm8001_ha->iomb_size = IOMB_SIZE_SPCV; + else + pm8001_ha->iomb_size = IOMB_SIZE_SPC; + #ifdef PM8001_USE_TASKLET + /** + * default tasklet for non msi-x interrupt handler/first msi-x + * interrupt handler + **/ tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); + (unsigned long)pm8001_ha); #endif pm8001_ioremap(pm8001_ha); - if (!pm8001_alloc(pm8001_ha)) + if (!pm8001_alloc(pm8001_ha, ent)) return pm8001_ha; pm8001_free(pm8001_ha); return NULL; @@ -512,21 +605,50 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, */ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) { - u8 i; + u8 i, j; #ifdef PM8001_READ_VPD + /* For new SPC controllers WWN is stored in flash vpd + * For SPC/SPCve controllers WWN is stored in EEPROM + * For Older SPC WWN is stored in NVMD + */ DECLARE_COMPLETION_ONSTACK(completion); struct pm8001_ioctl_payload payload; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); pm8001_ha->nvmd_completion = &completion; - payload.minor_function = 0; - payload.length = 128; - payload.func_specific = kzalloc(128, GFP_KERNEL); + + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) { + payload.minor_function = 4; + payload.length = 4096; + } else { + payload.minor_function = 0; + payload.length = 128; + } + } else { + payload.minor_function = 1; + payload.length = 4096; + } + payload.offset = 0; + payload.func_specific = kzalloc(payload.length, GFP_KERNEL); PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); wait_for_completion(&completion); + + for (i = 0, j = 0; i <= 7; i++, j++) { + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) + pm8001_ha->sas_addr[j] = + payload.func_specific[0x704 + i]; + } else + pm8001_ha->sas_addr[j] = + payload.func_specific[0x804 + i]; + } + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { - memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, - SAS_ADDR_SIZE); + memcpy(&pm8001_ha->phy[i].dev_sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("phy %d sas_addr = %016llx \n", i, + pm8001_printk("phy %d sas_addr = %016llx\n", i, pm8001_ha->phy[i].dev_sas_addr)); } #else @@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) * @chip_info: our ha struct. * @irq_handler: irq_handler */ -static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, - irq_handler_t irq_handler) +static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) { u32 i = 0, j = 0; - u32 number_of_intr = 1; + u32 number_of_intr; int flag = 0; u32 max_entry; int rc; + static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3]; + + /* SPCv controllers supports 64 msi-x */ + if (pm8001_ha->chip_id == chip_8001) { + number_of_intr = 1; + flag |= IRQF_DISABLED; + } else { + number_of_intr = PM8001_MAX_MSIX_VEC; + flag &= ~IRQF_SHARED; + flag |= IRQF_DISABLED; + } + max_entry = sizeof(pm8001_ha->msix_entries) / sizeof(pm8001_ha->msix_entries[0]); - flag |= IRQF_DISABLED; for (i = 0; i < max_entry ; i++) pm8001_ha->msix_entries[i].entry = i; rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, number_of_intr); pm8001_ha->number_of_intr = number_of_intr; if (!rc) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "pci_enable_msix request ret:%d no of intr %d\n", + rc, pm8001_ha->number_of_intr)); + + for (i = 0; i < number_of_intr; i++) + pm8001_ha->outq[i] = i; + for (i = 0; i < number_of_intr; i++) { + snprintf(intr_drvname[i], sizeof(intr_drvname[0]), + DRV_NAME"%d", i); if (request_irq(pm8001_ha->msix_entries[i].vector, - irq_handler, flag, DRV_NAME, - SHOST_TO_SAS_HA(pm8001_ha->shost))) { + pm8001_interrupt_handler_msix, flag, + intr_drvname[i], &pm8001_ha->outq[i])) { for (j = 0; j < i; j++) free_irq( pm8001_ha->msix_entries[j].vector, - SHOST_TO_SAS_HA(pm8001_ha->shost)); + &pm8001_ha->outq[j]); pci_disable_msix(pm8001_ha->pdev); break; } @@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) { struct pci_dev *pdev; - irq_handler_t irq_handler = pm8001_interrupt; int rc; pdev = pm8001_ha->pdev; #ifdef PM8001_USE_MSIX if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) - return pm8001_setup_msix(pm8001_ha, irq_handler); - else + return pm8001_setup_msix(pm8001_ha); + else { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MSIX not supported!!!\n")); goto intx; + } #endif intx: /* initialize the INT-X interrupt */ - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, - SHOST_TO_SAS_HA(pm8001_ha->shost)); + rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED, + DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost)); return rc; } @@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev, { unsigned int rc; u32 pci_reg; + u8 i = 0; struct pm8001_hba_info *pm8001_ha; struct Scsi_Host *shost = NULL; const struct pm8001_chip_info *chip; dev_printk(KERN_INFO, &pdev->dev, - "pm8001: driver version %s\n", DRV_VERSION); + "pm80xx: driver version %s\n", DRV_VERSION); rc = pci_enable_device(pdev); if (rc) goto err_out_enable; @@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev, goto err_out_free; } pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); - pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost); + /* ent->driver variable is used to differentiate between controllers */ + pm8001_ha = pm8001_pci_alloc(pdev, ent, shost); if (!pm8001_ha) { rc = -ENOMEM; goto err_out_free; } list_add_tail(&pm8001_ha->list, &hba_list); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); - if (rc) + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "chip_init failed [ret: %d]\n", rc)); goto err_out_ha_free; + } rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_ha_free; rc = pm8001_request_irq(pm8001_ha); - if (rc) + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "pm8001_request_irq failed [ret: %d]\n", rc)); goto err_out_shost; + } + + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + /* setup thermal configuration. */ + pm80xx_set_thermal_config(pm8001_ha); + } - PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); pm8001_init_sas_add(pm8001_ha); pm8001_post_sas_ha_init(shost, chip); rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); @@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev) sas_remove_host(pm8001_ha->shost); list_del(&pm8001_ha->list); scsi_remove_host(pm8001_ha->shost); - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, sha); + free_irq(pm8001_ha->msix_entries[i].vector, + &pm8001_ha->outq[i]); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); @@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) printk(KERN_ERR " PCI PM not supported\n"); return -ENODEV; } - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, sha); + free_irq(pm8001_ha->msix_entries[i].vector, + &pm8001_ha->outq[i]); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); @@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; int rc; + u8 i = 0; u32 device_state; pm8001_ha = sha->lldd_ha; device_state = pdev->current_state; @@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev) if (rc) goto err_out_disable; - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + /* chip soft rst only for spc */ + if (pm8001_ha->chip_id == chip_8001) { + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip soft reset successful\n")); + } rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); if (rc) goto err_out_disable; - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); + + /* disable all the interrupt bits */ + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + rc = pm8001_request_irq(pm8001_ha); if (rc) goto err_out_disable; - #ifdef PM8001_USE_TASKLET +#ifdef PM8001_USE_TASKLET + /* default tasklet for non msi-x interrupt handler/first msi-x + * interrupt handler */ tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); - #endif - PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); + (unsigned long)pm8001_ha); +#endif + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + } scsi_unblock_requests(pm8001_ha->shost); return 0; @@ -843,14 +1018,45 @@ err_out_enable: return rc; } +/* update of pci device, vendor id and driver data with + * unique value for each of the controller + */ static struct pci_device_id pm8001_pci_table[] = { - { - PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 - }, + { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, { PCI_DEVICE(0x117c, 0x0042), .driver_data = chip_8001 }, + /* Support for SPC/SPCv/SPCve controllers */ + { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 }, + { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 }, + { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 }, + { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 }, + { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 }, + { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 }, + { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 }, + { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 }, + { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, {} /* terminate list */ }; @@ -870,7 +1076,7 @@ static int __init pm8001_init(void) { int rc = -ENOMEM; - pm8001_wq = alloc_workqueue("pm8001", 0, 0); + pm8001_wq = alloc_workqueue("pm80xx", 0, 0); if (!pm8001_wq) goto err; @@ -902,7 +1108,8 @@ module_init(pm8001_init); module_exit(pm8001_exit); MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); -MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver"); +MODULE_DESCRIPTION( + "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pm8001_pci_table); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index b961112395d5..a85d73de7c80 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag) clear_bit(tag, bitmap); } -static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) { pm8001_tag_clear(pm8001_ha, tag); } @@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_GET_EVENTS: spin_lock_irqsave(&pm8001_ha->lock, flags); - if (-1 == pm8001_bar4_shift(pm8001_ha, + if (pm8001_ha->chip_id == chip_8001) { + if (-1 == pm8001_bar4_shift(pm8001_ha, (phy_id < 4) ? 0x30000 : 0x40000)) { - spin_unlock_irqrestore(&pm8001_ha->lock, flags); - return -EINVAL; + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -EINVAL; + } } { struct sas_phy *phy = sas_phy->phy; @@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, phy->loss_of_dword_sync_count = qp[3]; phy->phy_reset_problem_count = qp[4]; } - pm8001_bar4_shift(pm8001_ha, 0); + if (pm8001_ha->chip_id == chip_8001) + pm8001_bar4_shift(pm8001_ha, 0); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return 0; default: @@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost) struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); pm8001_ha = sha->lldd_ha; - PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); + /* SAS_RE_INITIALIZATION not available in SPCv/ve */ + if (pm8001_ha->chip_id == chip_8001) + PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); for (i = 0; i < pm8001_ha->chip->n_phy; ++i) PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); } @@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev) * @tmf: the task management IU */ #define DEV_IS_GONE(pm8001_dev) \ - ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) + ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) static int pm8001_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) { @@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num, struct task_status_struct *tsm = &t->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; - if (dev->dev_type != SATA_DEV) + if (dev->dev_type != SAS_SATA_DEV) t->task_done(t); return 0; } @@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { - if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { + if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { pm8001_ha->devices[dev].id = dev; return &pm8001_ha->devices[dev]; } @@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) } return NULL; } +/** + * pm8001_find_dev - find a matching pm8001_device + * @pm8001_ha: our hba card information + */ +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id) +{ + u32 dev; + for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { + if (pm8001_ha->devices[dev].device_id == device_id) + return &pm8001_ha->devices[dev]; + } + if (dev == PM8001_MAX_DEVICES) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING " + "DEVICE FOUND !!!\n")); + } + return NULL; +} static void pm8001_free_dev(struct pm8001_device *pm8001_dev) { u32 id = pm8001_dev->id; memset(pm8001_dev, 0, sizeof(*pm8001_dev)); pm8001_dev->id = id; - pm8001_dev->dev_type = NO_DEVICE; + pm8001_dev->dev_type = SAS_PHY_UNUSED; pm8001_dev->device_id = PM8001_MAX_DEVICES; pm8001_dev->sas_device = NULL; } @@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) res = -1; } } else { - if (dev->dev_type == SATA_DEV) { + if (dev->dev_type == SAS_SATA_DEV) { pm8001_device->attached_phy = dev->rphy->identify.phy_identifier; flag = 1; /* directly sata*/ @@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); spin_unlock_irqrestore(&pm8001_ha->lock, flags); wait_for_completion(&completion); - if (dev->dev_type == SAS_END_DEV) + if (dev->dev_type == SAS_END_DEVICE) msleep(50); pm8001_ha->flags = PM8001F_RUN_TIME; return 0; @@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev) return pm8001_dev_found_notify(dev); } -static void pm8001_task_done(struct sas_task *task) +void pm8001_task_done(struct sas_task *task) { if (!del_timer(&task->slow_task->timer)) return; @@ -904,7 +927,7 @@ void pm8001_open_reject_retry( struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; pm8001_dev = ccb->device; - if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)) + if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) continue; if (!device_to_close) { uintptr_t d = (uintptr_t)pm8001_dev @@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev) return rc; } +/* +* This function handle the IT_NEXUS_XXX event or completion +* status code for SSP/SATA/SMP I/O request. +*/ +int pm8001_I_T_nexus_event_handler(struct domain_device *dev) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev; + struct pm8001_hba_info *pm8001_ha; + struct sas_phy *phy; + u32 device_id = 0; + + if (!dev || !dev->lldd_dev) + return -1; + + pm8001_dev = dev->lldd_dev; + device_id = pm8001_dev->device_id; + pm8001_ha = pm8001_find_ha_by_dev(dev); + + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("I_T_Nexus handler invoked !!")); + + phy = sas_get_local_phy(dev); + + if (dev_is_sata(dev)) { + DECLARE_COMPLETION_ONSTACK(completion_setstate); + if (scsi_is_sas_phy_local(phy)) { + rc = 0; + goto out; + } + /* send internal ssp/sata/smp abort command to FW */ + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + pm8001_dev->setds_completion = &completion_setstate; + + wait_for_completion(&completion_setstate); + } else { + /* send internal ssp/sata/smp abort command to FW */ + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + } + PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc)); +out: + sas_put_local_phy(phy); + + return rc; +} /* mandatory SAM-3, the task reset the specified LUN*/ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) { diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 11008205aeb3..570819464d90 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -57,8 +57,8 @@ #include <linux/atomic.h> #include "pm8001_defs.h" -#define DRV_NAME "pm8001" -#define DRV_VERSION "0.1.36" +#define DRV_NAME "pm80xx" +#define DRV_VERSION "0.1.37" #define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ #define PM8001_INIT_LOGGING 0x02 /* driver init logging */ #define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ @@ -66,8 +66,8 @@ #define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ #define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ #define PM8001_MSG_LOGGING 0x40 /* misc message logging */ -#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ - __func__, __LINE__, ## arg) +#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \ + format, __func__, __LINE__, ## arg) #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ do { \ if (unlikely(HBA->logging_level & LEVEL)) \ @@ -103,11 +103,12 @@ do { \ #define PM8001_READ_VPD -#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) +#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) #define PM8001_NAME_LENGTH 32/* generic length of strings */ extern struct list_head hba_list; extern const struct pm8001_dispatch pm8001_8001_dispatch; +extern const struct pm8001_dispatch pm8001_80xx_dispatch; struct pm8001_hba_info; struct pm8001_ccb_info; @@ -131,15 +132,15 @@ struct pm8001_ioctl_payload { struct pm8001_dispatch { char *name; int (*chip_init)(struct pm8001_hba_info *pm8001_ha); - int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature); + int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha); void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); - irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha); + irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec); u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha); - int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha); - void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha); - void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha); + int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec); void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); int (*smp_req)(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb); @@ -173,6 +174,7 @@ struct pm8001_dispatch { }; struct pm8001_chip_info { + u32 encrypt; u32 n_phy; const struct pm8001_dispatch *dispatch; }; @@ -204,7 +206,7 @@ struct pm8001_phy { }; struct pm8001_device { - enum sas_dev_type dev_type; + enum sas_device_type dev_type; struct domain_device *sas_device; u32 attached_phy; u32 id; @@ -256,7 +258,20 @@ struct mpi_mem_req { struct mpi_mem region[USI_MAX_MEMCNT]; }; -struct main_cfg_table { +struct encrypt { + u32 cipher_mode; + u32 sec_mode; + u32 status; + u32 flag; +}; + +struct sas_phy_attribute_table { + u32 phystart1_16[16]; + u32 outbound_hw_event_pid1_16[16]; +}; + +union main_cfg_table { + struct { u32 signature; u32 interface_rev; u32 firmware_rev; @@ -292,19 +307,69 @@ struct main_cfg_table { u32 fatal_err_dump_length1; u32 hda_mode_flag; u32 anolog_setup_table_offset; + u32 rsvd[4]; + } pm8001_tbl; + + struct { + u32 signature; + u32 interface_rev; + u32 firmware_rev; + u32 max_out_io; + u32 max_sgl; + u32 ctrl_cap_flag; + u32 gst_offset; + u32 inbound_queue_offset; + u32 outbound_queue_offset; + u32 inbound_q_nppd_hppd; + u32 rsvd[8]; + u32 crc_core_dump; + u32 rsvd1; + u32 upper_event_log_addr; + u32 lower_event_log_addr; + u32 event_log_size; + u32 event_log_severity; + u32 upper_pcs_event_log_addr; + u32 lower_pcs_event_log_addr; + u32 pcs_event_log_size; + u32 pcs_event_log_severity; + u32 fatal_err_interrupt; + u32 fatal_err_dump_offset0; + u32 fatal_err_dump_length0; + u32 fatal_err_dump_offset1; + u32 fatal_err_dump_length1; + u32 gpio_led_mapping; + u32 analog_setup_table_offset; + u32 int_vec_table_offset; + u32 phy_attr_table_offset; + u32 port_recovery_timer; + u32 interrupt_reassertion_delay; + } pm80xx_tbl; }; -struct general_status_table { + +union general_status_table { + struct { u32 gst_len_mpistate; u32 iq_freeze_state0; u32 iq_freeze_state1; u32 msgu_tcnt; u32 iop_tcnt; - u32 reserved; + u32 rsvd; u32 phy_state[8]; - u32 reserved1; - u32 reserved2; - u32 reserved3; + u32 gpio_input_val; + u32 rsvd1[2]; + u32 recover_err_info[8]; + } pm8001_tbl; + struct { + u32 gst_len_mpistate; + u32 iq_freeze_state0; + u32 iq_freeze_state1; + u32 msgu_tcnt; + u32 iop_tcnt; + u32 rsvd[9]; + u32 gpio_input_val; + u32 rsvd1[2]; u32 recover_err_info[8]; + } pm80xx_tbl; }; struct inbound_queue_table { u32 element_pri_size_cnt; @@ -351,15 +416,21 @@ struct pm8001_hba_info { struct device *dev; struct pm8001_hba_memspace io_mem[6]; struct mpi_mem_req memoryMap; + struct encrypt encrypt_info; /* support encryption */ void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ - struct main_cfg_table main_cfg_tbl; - struct general_status_table gs_tbl; - struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; - struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; + void __iomem *pspa_q_tbl_addr; + /*MPI SAS PHY attributes Queue Config Table Addr*/ + void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */ + union main_cfg_table main_cfg_tbl; + union general_status_table gs_tbl; + struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM]; + struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM]; + struct sas_phy_attribute_table phy_attr_table; + /* MPI SAS PHY attributes */ u8 sas_addr[SAS_ADDR_SIZE]; struct sas_ha_struct *sas;/* SCSI/SAS glue */ struct Scsi_Host *shost; @@ -372,10 +443,12 @@ struct pm8001_hba_info { struct pm8001_port port[PM8001_MAX_PHYS]; u32 id; u32 irq; + u32 iomb_size; /* SPC and SPCV IOMB size */ struct pm8001_device *devices; struct pm8001_ccb_info *ccb_info; #ifdef PM8001_USE_MSIX - struct msix_entry msix_entries[16];/*for msi-x interrupt*/ + struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC]; + /*for msi-x interrupt*/ int number_of_intr;/*will be used in remove()*/ #endif #ifdef PM8001_USE_TASKLET @@ -383,7 +456,10 @@ struct pm8001_hba_info { #endif u32 logging_level; u32 fw_status; + u32 smp_exp_mode; + u32 int_vector; const struct firmware *fw_image; + u8 outq[PM8001_MAX_MSIX_VEC]; }; struct pm8001_work { @@ -419,6 +495,9 @@ struct pm8001_fw_image_header { #define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 #define FLASH_UPDATE_DISABLED 0x11 +#define NCQ_READ_LOG_FLAG 0x80000000 +#define NCQ_ABORT_ALL_FLAG 0x40000000 +#define NCQ_2ND_RLE_FLAG 0x20000000 /** * brief param structure for firmware flash update. */ @@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev); void pm8001_dev_gone(struct domain_device *dev); int pm8001_lu_reset(struct domain_device *dev, u8 *lun); int pm8001_I_T_nexus_reset(struct domain_device *dev); +int pm8001_I_T_nexus_event_handler(struct domain_device *dev); int pm8001_query_task(struct sas_task *task); void pm8001_open_reject_retry( struct pm8001_hba_info *pm8001_ha, @@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, u32 mem_size, u32 align); +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha); +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, + struct inbound_queue_table *circularQ, + u32 opCode, void *payload, u32 responseQueue); +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, + u16 messageSize, void **messagePtr); +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, + struct outbound_queue_table *circularQ, u8 bc); +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, + void **messagePtr1, u8 *pBC); +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state); +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, + void *payload); +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, + void *fw_flash_updata_info, u32 tag); +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, + struct pm8001_tmf_task *tmf); +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, + u8 flag, u32 task_tag, u32 cmd_tag); +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id); +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd); +void pm8001_work_fn(struct work_struct *work); +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, + void *data, int handler); +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate); +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr); +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i); +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb); +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +struct sas_task *pm8001_alloc_task(void); +void pm8001_task_done(struct sas_task *task); +void pm8001_free_task(struct sas_task *task); +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag); +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id); +int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha); + int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); /* ctl shared API */ diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c new file mode 100644 index 000000000000..302514d8157b --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -0,0 +1,4130 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 PMC-Sierra, Inc., + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + #include <linux/slab.h> + #include "pm8001_sas.h" + #include "pm80xx_hwi.h" + #include "pm8001_chips.h" + #include "pm8001_ctl.h" + +#define SMP_DIRECT 1 +#define SMP_INDIRECT 2 +/** + * read_main_config_table - read the configure table and save it. + * @pm8001_ha: our hba card information + */ +static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature = + pm8001_mr32(address, MAIN_SIGNATURE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev = + pm8001_mr32(address, MAIN_INTERFACE_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev = + pm8001_mr32(address, MAIN_FW_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io = + pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl = + pm8001_mr32(address, MAIN_MAX_SGL_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag = + pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset = + pm8001_mr32(address, MAIN_GST_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset = + pm8001_mr32(address, MAIN_IBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset = + pm8001_mr32(address, MAIN_OBQ_OFFSET); + + /* read Error Dump Offset and Length */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); + + /* read GPIO LED settings from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping = + pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET); + + /* read analog Setting offset from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset = + pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset = + pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset = + pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET); +} + +/** + * read_general_status_table - read the general status table and save it. + * @pm8001_ha: our hba card information + */ +static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->general_stat_tbl_addr; + pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate = + pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt = + pm8001_mr32(address, GST_MSGUTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt = + pm8001_mr32(address, GST_IOPTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val = + pm8001_mr32(address, GST_GPIO_INPUT_VAL); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] = + pm8001_mr32(address, GST_RERRINFO_OFFSET0); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] = + pm8001_mr32(address, GST_RERRINFO_OFFSET1); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] = + pm8001_mr32(address, GST_RERRINFO_OFFSET2); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] = + pm8001_mr32(address, GST_RERRINFO_OFFSET3); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] = + pm8001_mr32(address, GST_RERRINFO_OFFSET4); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] = + pm8001_mr32(address, GST_RERRINFO_OFFSET5); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] = + pm8001_mr32(address, GST_RERRINFO_OFFSET6); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] = + pm8001_mr32(address, GST_RERRINFO_OFFSET7); +} +/** + * read_phy_attr_table - read the phy attribute table and save it. + * @pm8001_ha: our hba card information + */ +static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->pspa_q_tbl_addr; + pm8001_ha->phy_attr_table.phystart1_16[0] = + pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[1] = + pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[2] = + pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[3] = + pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[4] = + pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[5] = + pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[6] = + pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[7] = + pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[8] = + pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[9] = + pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[10] = + pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[11] = + pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[12] = + pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[13] = + pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[14] = + pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[15] = + pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET); + + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET); + +} + +/** + * read_inbnd_queue_table - read the inbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + u32 offset = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + IB_PIPCI_BAR))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET)); + } +} + +/** + * read_outbnd_queue_table - read the outbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + u32 offset = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + OB_CIPCI_BAR))); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET)); + } +} + +/** + * init_default_table_values - init the default table. + * @pm8001_ha: our hba card information + */ +static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) +{ + int i; + u32 offsetib, offsetob; + void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; + void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + + /* Disable end to end CRC checking */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); + + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = + PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); + pm8001_ha->inbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; + pm8001_ha->inbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[IB + i].total_len; + pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = + pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = + pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].ci_virt = + pm8001_ha->memoryMap.region[CI + i].virt_ptr; + offsetib = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(addressib, + (offsetib + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(addressib, (offsetib + 0x18)); + pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; + pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + } + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + pm8001_ha->outbnd_q_tbl[i].element_size_cnt = + PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); + pm8001_ha->outbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; + pm8001_ha->outbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[OB + i].total_len; + pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = + pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = + pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; + /* interrupt vector based on oq */ + pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24); + pm8001_ha->outbnd_q_tbl[i].pi_virt = + pm8001_ha->memoryMap.region[PI + i].virt_ptr; + offsetob = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(addressob, + offsetob + 0x14)); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(addressob, (offsetob + 0x18)); + pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; + pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + } +} + +/** + * update_main_config_table - update the main default table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); + pm8001_mw32(address, MAIN_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); + pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); + pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump); + + /* SPCv specific */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF; + /* Set GPIOLED to 0x2 for LED indicator */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000; + pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping); + + pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); + pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay); +} + +/** + * update_inbnd_queue_table - update the inbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + u16 offset = number * 0x20; + pm8001_mw32(address, offset + IB_PROPERITY_OFFSET, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); +} + +/** + * update_outbnd_queue_table - update the outbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + u16 offset = number * 0x24; + pm8001_mw32(address, offset + OB_PROPERITY_OFFSET, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); + pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET, + pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); +} + +/** + * mpi_init_check - check firmware initialization status. + * @pm8001_ha: our hba card information + */ +static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + + /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the + table is updated */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_UPDATE; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) + return -1; + /* check the MPI-State for initialization upto 100ms*/ + max_wait_count = 100 * 1000;/* 100 msec */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + } while ((GST_MPI_STATE_INIT != + (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count)); + if (!max_wait_count) + return -1; + + /* check MPI Initialization error */ + gst_len_mpistate = gst_len_mpistate >> 16; + if (0x0000 != gst_len_mpistate) + return -1; + + return 0; +} + +/** + * check_fw_ready - The LLDD check if the FW is ready, if not, return error. + * @pm8001_ha: our hba card information + */ +static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; + u32 max_wait_count; + u32 max_wait_time; + int ret = 0; + + /* reset / PCIe ready */ + max_wait_time = max_wait_count = 100 * 1000; /* 100 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while ((value == 0xFFFFFFFF) && (--max_wait_count)); + + /* check ila status */ + max_wait_time = max_wait_count = 1000 * 1000; /* 1000 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_ILA_READY) != + SCRATCH_PAD_ILA_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" ila ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check RAAE status */ + max_wait_time = max_wait_count = 1800 * 1000; /* 1800 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_RAAE_READY) != + SCRATCH_PAD_RAAE_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" raae ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check iop0 status */ + max_wait_time = max_wait_count = 600 * 1000; /* 600 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) && + (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" iop0 ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check iop1 status only for 16 port controllers */ + if ((pm8001_ha->chip_id != chip_8008) && + (pm8001_ha->chip_id != chip_8009)) { + /* 200 milli sec */ + max_wait_time = max_wait_count = 200 * 1000; + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_IOP1_READY) != + SCRATCH_PAD_IOP1_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "iop1 ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + } + + return ret; +} + +static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *base_addr; + u32 value; + u32 offset; + u32 pcibar; + u32 pcilogic; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */ + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n", + offset, value)); + pcilogic = (value & 0xFC000000) >> 26; + pcibar = get_pci_bar_index(pcilogic); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar)); + pm8001_ha->main_cfg_tbl_addr = base_addr = + pm8001_ha->io_mem[pcibar].memvirtaddr + offset; + pm8001_ha->general_stat_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) & + 0xFFFFFF); + pm8001_ha->inbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) & + 0xFFFFFF); + pm8001_ha->outbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) & + 0xFFFFFF); + pm8001_ha->ivt_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) & + 0xFFFFFF); + pm8001_ha->pspa_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & + 0xFFFFFF); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GST OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("INBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("OBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("IVT OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PSPA OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x90))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - main cfg %p general status %p\n", + pm8001_ha->main_cfg_tbl_addr, + pm8001_ha->general_stat_tbl_addr)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - inbnd %p obnd %p\n", + pm8001_ha->inbnd_q_tbl_addr, + pm8001_ha->outbnd_q_tbl_addr)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - pspa %p ivt %p\n", + pm8001_ha->pspa_q_tbl_addr, + pm8001_ha->ivt_tbl_addr)); +} + +/** + * pm80xx_set_thermal_config - support the thermal configuration + * @pm8001_ha: our hba card information. + */ +int +pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + struct inbound_queue_table *circularQ; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) | + (THERMAL_ENABLE << 8) | THERMAL_OP_CODE; + payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return rc; + +} + +/** +* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol +* Timer configuration page +* @pm8001_ha: our hba card information. +*/ +static int +pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + struct inbound_queue_table *circularQ; + SASProtocolTimerConfig_t SASConfigPage; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_tag_alloc(pm8001_ha, &tag); + + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + + SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE; + SASConfigPage.MST_MSI = 3 << 15; + SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO; + SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) | + (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER; + SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME; + + if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF) + SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF; + + + SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) | + SAS_OPNRJT_RTRY_INTVL; + SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16) + | SAS_COPNRJT_RTRY_TMO; + SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16) + | SAS_COPNRJT_RTRY_THR; + SASConfigPage.MAX_AIP = SAS_MAX_AIP; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.pageCode " + "0x%08x\n", SASConfigPage.pageCode)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.MST_MSI " + " 0x%08x\n", SASConfigPage.MST_MSI)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO " + " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_FRM_TMO " + " 0x%08x\n", SASConfigPage.STP_FRM_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_IDLE_TMO " + " 0x%08x\n", SASConfigPage.STP_IDLE_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL " + " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO " + " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR " + " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR)); + PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP " + " 0x%08x\n", SASConfigPage.MAX_AIP)); + + memcpy(&payload.cfg_pg, &SASConfigPage, + sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm80xx_get_encrypt_info - Check for encryption + * @pm8001_ha: our hba card information. + */ +static int +pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) +{ + u32 scratch3_value; + int ret; + + /* Read encryption status from SCRATCH PAD 3 */ + scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + + if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_READY) { + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + pm8001_ha->encrypt_info.status = 0; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X." + "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) == + SCRATCH_PAD3_ENC_DISABLED) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n", + scratch3_value)); + pm8001_ha->encrypt_info.status = 0xFFFFFFFF; + pm8001_ha->encrypt_info.cipher_mode = 0; + pm8001_ha->encrypt_info.sec_mode = 0; + return 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_DIS_ERR) { + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X." + "Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = -1; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_ENA_ERR) { + + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X." + "Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = -1; + } + return ret; +} + +/** + * pm80xx_encrypt_update - update flash with encryption informtion + * @pm8001_ha: our hba card information. + */ +static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) +{ + struct kek_mgmt_req payload; + struct inbound_queue_table *circularQ; + int rc; + u32 tag; + u32 opc = OPC_INB_KEK_MANAGEMENT; + + memset(&payload, 0, sizeof(struct kek_mgmt_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + /* Currently only one key is used. New KEK index is 1. + * Current KEK index is 1. Store KEK to NVRAM is 1. + */ + payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | + KEK_MGMT_SUBOP_KEYCARDUPDATE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm8001_chip_init - the main init function that initialize whole PM8001 chip. + * @pm8001_ha: our hba card information + */ +static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) +{ + int ret; + u8 i = 0; + + /* check the firmware status */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + return -EBUSY; + } + + /* Initialize pci space address eg: mpi offset */ + init_pci_device_addresses(pm8001_ha); + init_default_table_values(pm8001_ha); + read_main_config_table(pm8001_ha); + read_general_status_table(pm8001_ha); + read_inbnd_queue_table(pm8001_ha); + read_outbnd_queue_table(pm8001_ha); + read_phy_attr_table(pm8001_ha); + + /* update main config table ,inbound table and outbound table */ + update_main_config_table(pm8001_ha); + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) + update_outbnd_queue_table(pm8001_ha, i); + + /* notify firmware update finished and check initialization status */ + if (0 == mpi_init_check(pm8001_ha)) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MPI initialize successful!\n")); + } else + return -EBUSY; + + /* send SAS protocol timer configuration page to FW */ + ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha); + + /* Check for encryption */ + if (pm8001_ha->chip->encrypt) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Checking for encryption\n")); + ret = pm80xx_get_encrypt_info(pm8001_ha); + if (ret == -1) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Encryption error !!\n")); + if (pm8001_ha->encrypt_info.status == 0x81) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled with error." + "Saving encryption key to flash\n")); + pm80xx_encrypt_update(pm8001_ha); + } + } + } + return 0; +} + +static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + init_pci_device_addresses(pm8001_ha); + /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the + table is stop */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); + + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_RESET; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:IBDB value/=%x\n", value)); + return -1; + } + + /* check the MPI-State for termination in progress */ + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_UNINIT == + (gst_len_mpistate & GST_MPI_STATE_MASK)) + break; + } while (--max_wait_count); + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TIME OUT MPI State = 0x%x\n", + gst_len_mpistate & GST_MPI_STATE_MASK)); + return -1; + } + + return 0; +} + +/** + * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all + * the FW register status to the originated status. + * @pm8001_ha: our hba card information + */ + +static int +pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 regval; + u32 bootloader_state; + + /* Check if MPI is in ready state to reset */ + if (mpi_uninit_check(pm8001_ha) != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MPI state is not ready\n")); + return -1; + } + + /* checked for reset register normal state; 0x0 */ + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("reset register before write : 0x%x\n", regval)); + + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE); + mdelay(500); + + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("reset register after write 0x%x\n", regval)); + + if ((regval & SPCv_SOFT_RESET_READ_MASK) == + SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" soft reset successful [regval: 0x%x]\n", + regval)); + } else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" soft reset failed [regval: 0x%x]\n", + regval)); + + /* check bootloader is successfully executed or in HDA mode */ + bootloader_state = + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & + SCRATCH_PAD1_BOOTSTATE_MASK; + + if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode SEEPROM\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode Bootstrap Pin\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode soft reset\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state-HDA mode critical error\n")); + } + return -EBUSY; + } + + /* check the firmware status after reset */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + return -EBUSY; + } + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPCv soft reset Complete\n")); + return 0; +} + +static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 i; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset start\n")); + + /* do SPCv chip reset. */ + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPC soft reset Complete\n")); + + /* Check this ..whether delay is required or no */ + /* delay 10 usec */ + udelay(10); + + /* wait for 20 msec until the firmware gets reloaded */ + i = 20; + do { + mdelay(1); + } while ((--i) != 0); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset finished\n")); +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); +} + +/** + * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL); +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + u32 mask; + mask = (u32)(1 << vec); + + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); + return; +#endif + pm80xx_chip_intx_interrupt_enable(pm8001_ha); + +} + +/** + * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + u32 mask; + if (vec == 0xFF) + mask = 0xFFFFFFFF; + else + mask = (u32)(1 << vec); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); + return; +#endif + pm80xx_chip_intx_interrupt_disable(pm8001_ha); +} + +static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_ABORT; + int ret; + + if (!pm8001_ha_dev) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n")); + return; + } + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot " + "allocate task\n")); + return; + } + + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return; + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&task_abort, 0, sizeof(task_abort)); + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + task_abort.tag = cpu_to_le32(ccb_tag); + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + +} + +static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + struct sata_start_req sata_cmd; + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct host_to_dev_fis fis; + struct domain_device *dev; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate task !!!\n")); + return; + } + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate tag !!!\n")); + return; + } + + /* allocate domain device by ourselves as libsas + * is not going to provide any + */ + dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); + if (!dev) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Domain device cannot be allocated\n")); + sas_free_task(task); + return; + } else { + task->dev = dev; + task->dev->lldd_dev = pm8001_ha_dev; + } + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* construct read log FIS */ + memset(&fis, 0, sizeof(struct host_to_dev_fis)); + fis.fis_type = 0x27; + fis.flags = 0x80; + fis.command = ATA_CMD_READ_LOG_EXT; + fis.lbal = 0x10; + fis.sector_count = 0x1; + + sata_cmd.tag = cpu_to_le32(ccb_tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); + + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + +} + +/** + * mpi_ssp_completion- process the event that FW response to the SSP request. + * @pm8001_ha: our hba card information + * @piomb: the message contents of this outbound message. + * + * When FW has completed a ssp request for example a IO request, after it has + * filled the SG data with the data, it will trigger this event represent + * that he has finished the job,please check the coresponding buffer. + * So we will tell the caller who maybe waiting the result to tell upper layer + * that the task has been finished. + */ +static void +mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 param; + u32 tag; + struct ssp_completion_resp *psspPayload; + struct task_status_struct *ts; + struct ssp_response_iu *iu; + struct pm8001_device *pm8001_dev; + psspPayload = (struct ssp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psspPayload->status); + tag = le32_to_cpu(psspPayload->tag); + ccb = &pm8001_ha->ccb_info[tag]; + if ((status == IO_ABORTED) && ccb->open_retry) { + /* Being completed by another */ + ccb->open_retry = 0; + return; + } + pm8001_dev = ccb->device; + param = le32_to_cpu(psspPayload->param); + t = ccb->task; + + if (status && status != IO_UNDERFLOW) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SUCCESS ,param = 0x%x\n", + param)); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + } else { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + iu = &psspPayload->ssp_resp_iu; + sas_ssp_task_response(pm8001_ha->dev, t, iu); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_UNDERFLOW: + /* SSP Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n", + param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + /* Force the midlayer to retry */ + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_DS_NON_OPERATIONAL); + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_TM_TAG_NOT_FOUND: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_TM_TAG_NOT_FOUND\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_SSP_EXT_IU_ZERO_LEN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + } + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("scsi_status = 0x%x\n ", + psspPayload->ssp_resp_iu.status)); + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with io_status 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct ssp_event_resp *psspPayload = + (struct ssp_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psspPayload->event); + u32 tag = le32_to_cpu(psspPayload->tag); + u32 port_id = le32_to_cpu(psspPayload->port_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", event)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");) + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); + return; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); + return; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + return; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with event 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + u32 param; + u32 status; + u32 tag; + struct sata_completion_resp *psataPayload; + struct task_status_struct *ts; + struct ata_task_resp *resp ; + u32 *sata_resp; + struct pm8001_device *pm8001_dev; + unsigned long flags; + + psataPayload = (struct sata_completion_resp *)(piomb + 4); + status = le32_to_cpu(psataPayload->status); + tag = le32_to_cpu(psataPayload->tag); + + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("tag null\n")); + return; + } + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psataPayload->param); + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ccb null\n")); + return; + } + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task null\n")); + return; + } + + if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) + && unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + + ts = &t->task_status; + if (!ts) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ts null\n")); + return; + } + + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + /* check if response is for SEND READ LOG */ + if (pm8001_dev && + (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { + /* set new bit for abort_all */ + pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; + /* clear bit for read log */ + pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; + pm80xx_send_abort_all(pm8001_ha, pm8001_dev); + /* Free the tag */ + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + return; + } + } else { + u8 len; + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SAS_PROTO_RESPONSE len = %d\n", + param)); + sata_resp = &psataPayload->sata_resp[0]; + resp = (struct ata_task_resp *)ts->buf; + if (t->ata_task.dma_xfer == 0 && + t->data_dir == PCI_DMA_FROMDEVICE) { + len = sizeof(struct pio_setup_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("PIO read len = %d\n", len)); + } else if (t->ata_task.use_ncq) { + len = sizeof(struct set_dev_bits_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("FPDMA len = %d\n", len)); + } else { + len = sizeof(struct dev_to_host_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("other len = %d\n", len)); + } + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = len; + memcpy(&resp->ending_fis[0], sata_resp, len); + ts->buf_valid_size = sizeof(*resp); + } else + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("response to large\n")); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + /* following cases are to do cases */ + case IO_UNDERFLOW: + /* SATA Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW param = %d\n", param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*in order to force CPU ordering*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_XFER_ERROR_SATA_LINK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_NON_OPERATIONAL); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_IN_ERROR); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else if (t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } else if (!t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct sata_event_resp *psataPayload = + (struct sata_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psataPayload->event); + u32 tag = le32_to_cpu(psataPayload->tag); + u32 port_id = le32_to_cpu(psataPayload->port_id); + u32 dev_id = le32_to_cpu(psataPayload->device_id); + unsigned long flags; + + ccb = &pm8001_ha->ccb_info[tag]; + + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("No CCB !!!. returning\n")); + return; + } + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SATA EVENT 0x%x\n", event)); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension */ + if (pm8001_dev) + pm80xx_send_read_log(pm8001_ha, pm8001_dev); + return; + } + + if (unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_PEER_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + break; + case IO_XFER_PIO_SETUP_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_DMA_ACTIVATE_TIMEOUT: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else if (t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } else if (!t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 param, i; + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 tag; + struct smp_completion_resp *psmpPayload; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + char *pdma_respaddr = NULL; + + psmpPayload = (struct smp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psmpPayload->status); + tag = le32_to_cpu(psmpPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psmpPayload->param); + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("smp IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + + switch (status) { + + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + if (pm8001_dev) + pm8001_dev->running_req--; + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("DIRECT RESPONSE Length:%d\n", + param)); + pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64 + ((u64)sg_dma_address + (&t->smp_task.smp_resp)))); + for (i = 0; i < param; i++) { + *(pdma_respaddr+i) = psmpPayload->_r_a[i]; + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "SMP Byte%d DMA data 0x%x psmp 0x%x\n", + i, *(pdma_respaddr+i), + psmpPayload->_r_a[i])); + } + } + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case IO_ERROR_HW_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_HW_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk(\ + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_RX_FRAME: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_RX_FRAME\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_ERROR_INTERNAL_SMP_RESOURCE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + /* not allowed case. Therefore, return failed status */ + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with io_status 0x%x resp 0x%x" + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/** + * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * @pm8001_ha: our hba card information + * @Qnum: the outbound queue message number. + * @SEA: source of event to ack + * @port_id: port id. + * @phyId: phy id. + * @param0: parameter 0. + * @param1: parameter 1. + */ +static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) +{ + struct hw_event_ack_req payload; + u32 opc = OPC_INB_SAS_HW_EVENT_ACK; + + struct inbound_queue_table *circularQ; + + memset((u8 *)&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; + payload.tag = cpu_to_le32(1); + payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | + ((phyId & 0xFF) << 24) | (port_id & 0xFF)); + payload.param0 = cpu_to_le32(param0); + payload.param1 = cpu_to_le32(param1); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); +} + +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op); + +/** + * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + u8 deviceType = pPayload->sas_identify.dev_type; + port->port_state = portstate; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "portid:%d; phyid:%d; linkrate:%d; " + "portstate:%x; devicetype:%x\n", + port_id, phy_id, link_rate, portstate, deviceType)); + + switch (deviceType) { + case SAS_PHY_UNUSED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("device type no device.\n")); + break; + case SAS_END_DEVICE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); + pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_EDGE_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("expander device.\n")); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("fanout expander device.\n")); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unknown device type(%x)\n", deviceType)); + break; + } + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = deviceType; + phy->phy_attached = 1; + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + phy->sas_phy.oob_mode = SAS_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, &pPayload->sas_identify, + sizeof(struct sas_identify_frame)-4); + phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) + mdelay(200);/*delay a moment to wait disk to spinup*/ + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "port id %d, phy id %d link_rate %d portstate 0x%x\n", + port_id, phy_id, link_rate, portstate)); + + port->port_state = portstate; + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->sas_phy.oob_mode = SATA_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), + sizeof(struct dev_to_host_fis)); + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->identify.device_type = SAS_SATA_DEV; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_phy_down -we should notify the libsas the phy is down. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + port->port_state = portstate; + phy->phy_type = 0; + phy->identify.device_type = 0; + phy->phy_attached = 0; + memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE); + switch (portstate) { + case PORT_VALID: + break; + case PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" PortInvalid portID %d\n", port_id)); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + case PORT_IN_RESET: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Port In Reset portID %d\n", port_id)); + break; + case PORT_NOT_ESTABLISHED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); + port->port_attached = 0; + break; + case PORT_LOSTCOMM: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + default: + port->port_attached = 0; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and(default) = 0x%x\n", + portstate)); + break; + + } +} + +static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_start_resp *pPayload = + (struct phy_start_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phy_id = + le32_to_cpu(pPayload->phyid); + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n", + status, phy_id)); + if (status == 0) { + phy->phy_state = 1; + if (pm8001_ha->flags == PM8001F_RUN_TIME) + complete(phy->enable_completion); + } + return 0; + +} + +/** + * mpi_thermal_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct thermal_hw_event *pPayload = + (struct thermal_hw_event *)(piomb + 4); + + u32 thermal_event = le32_to_cpu(pPayload->thermal_event); + u32 rht_lht = le32_to_cpu(pPayload->rht_lht); + + if (thermal_event & 0x40) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Local high temperature violated!\n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Measured local high temperature %d\n", + ((rht_lht & 0xFF00) >> 8))); + } + if (thermal_event & 0x10) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Remote high temperature violated!\n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Measured remote high temperature %d\n", + ((rht_lht & 0xFF000000) >> 24))); + } + return 0; +} + +/** + * mpi_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + unsigned long flags; + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u16 eventType = + (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8); + u8 status = + (u8)((lr_status_evt_portid & 0x0F000000) >> 24); + + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n", + port_id, phy_id, eventType, status)); + + switch (eventType) { + + case HW_EVENT_SAS_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_START_STATUS\n")); + hw_event_sas_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_PHY_UP\n")); + hw_event_sata_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_SPINUP_HOLD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); + break; + case HW_EVENT_PHY_DOWN: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_DOWN\n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); + phy->phy_attached = 0; + phy->phy_state = 0; + hw_event_phy_down(pm8001_ha, piomb); + break; + case HW_EVENT_PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_INVALID\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + /* the broadcast change primitive received, tell the LIBSAS this event + to revalidate the sas domain*/ + case HW_EVENT_BROADCAST_CHANGE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, + port_id, phy_id, 1, 0); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_PHY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_ERROR\n")); + sas_phy_disconnected(&phy->sas_phy); + phy->phy_attached = 0; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR); + break; + case HW_EVENT_BROADCAST_EXP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_EXP\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_LINK_ERR_INVALID_DWORD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_DISPARITY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_DISPARITY_ERROR, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_CODE_VIOLATION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_CODE_VIOLATION, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_MALFUNCTION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_MALFUNCTION\n")); + break; + case HW_EVENT_BROADCAST_SES: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_SES\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_INBOUND_CRC_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_INBOUND_CRC_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_HARD_RESET_RECEIVED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n")); + sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET); + break; + case HW_EVENT_ID_FRAME_TIMEOUT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_PHY_RESET_FAILED, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RESET_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVERY_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_PORT_RECOVERY_TIMER_TMO, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVER: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVER\n")); + break; + case HW_EVENT_PORT_RESET_COMPLETE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n")); + break; + case EVENT_BROADCAST_ASYNCH_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Unknown event type 0x%x\n", eventType)); + break; + } + return 0; +} + +/** + * mpi_phy_stop_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_stop_resp *pPayload = + (struct phy_stop_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phyid = + le32_to_cpu(pPayload->phyid); + struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("phy:0x%x status:0x%x\n", + phyid, status)); + if (status == 0) + phy->phy_state = 0; + return 0; +} + +/** + * mpi_set_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct set_ctrl_cfg_resp *pPayload = + (struct set_ctrl_cfg_resp *)(piomb + 4); + u32 status = le32_to_cpu(pPayload->status); + u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", + status, err_qlfr_pgcd)); + + return 0; +} + +/** + * mpi_get_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_get_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_flash_op_ext_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_set_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_kek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4); + + u32 status = le32_to_cpu(pPayload->status); + u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop); + u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n", + status, kidx_new_curr_ksop, err_qlfr)); + + return 0; +} + +/** + * mpi_dek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * ssp_coalesced_comp_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * process_one_iomb - process one outbound Queue memory block + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + __le32 pHeader = *(__le32 *)piomb; + u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF); + + switch (opc) { + case OPC_OUB_ECHO: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n")); + break; + case OPC_OUB_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_HW_EVENT\n")); + mpi_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_THERM_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_THERMAL_EVENT\n")); + mpi_thermal_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_COMP\n")); + mpi_ssp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SMP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_COMP\n")); + mpi_smp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_LOCAL_PHY_CNTRL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_REGIST: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_REGIST\n")); + pm8001_mpi_reg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEREG_DEV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unresgister the deviece\n")); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEV_HANDLE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n")); + break; + case OPC_OUB_SATA_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_COMP\n")); + mpi_sata_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_EVENT\n")); + mpi_sata_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_EVENT\n")); + mpi_ssp_event(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_HANDLE_ARRIV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n")); + /*This is for target*/ + break; + case OPC_OUB_SSP_RECV_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n")); + /*This is for target*/ + break; + case OPC_OUB_FW_FLASH_UPDATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GPIO_RESPONSE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_RESPONSE\n")); + break; + case OPC_OUB_GPIO_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_EVENT\n")); + break; + case OPC_OUB_GENERAL_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); + pm8001_mpi_general_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SAS_DIAG_MODE_START_END: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n")); + break; + case OPC_OUB_SAS_DIAG_EXECUTE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n")); + break; + case OPC_OUB_GET_TIME_STAMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_TIME_STAMP\n")); + break; + case OPC_OUB_SAS_HW_EVENT_ACK: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n")); + break; + case OPC_OUB_PORT_CONTROL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_PORT_CONTROL\n")); + break; + case OPC_OUB_SMP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEVICE_HANDLE_REMOVAL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n")); + break; + case OPC_OUB_SET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n")); + break; + case OPC_OUB_SET_DEV_INFO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEV_INFO\n")); + break; + /* spcv specifc commands */ + case OPC_OUB_PHY_START_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_PHY_START_RESP opcode:%x\n", opc)); + mpi_phy_start_resp(pm8001_ha, piomb); + break; + case OPC_OUB_PHY_STOP_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc)); + mpi_phy_stop_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_CONTROLLER_CONFIG: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc)); + mpi_set_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_CONTROLLER_CONFIG: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc)); + mpi_get_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_PHY_PROFILE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc)); + mpi_get_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_FLASH_OP_EXT: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc)); + mpi_flash_op_ext_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_PHY_PROFILE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc)); + mpi_set_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_KEK_MANAGEMENT_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc)); + mpi_kek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEK_MANAGEMENT_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc)); + mpi_dek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COALESCED_COMP_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc)); + ssp_coalesced_comp_resp(pm8001_ha, piomb); + break; + default: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Unknown outbound Queue IOMB OPC = 0x%x\n", opc)); + break; + } +} + +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + struct outbound_queue_table *circularQ; + void *pMsg1 = NULL; + u8 uninitialized_var(bc); + u32 ret = MPI_IO_STATUS_FAIL; + unsigned long flags; + + spin_lock_irqsave(&pm8001_ha->lock, flags); + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; + do { + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + if (MPI_IO_STATUS_SUCCESS == ret) { + /* process the outbound message */ + process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); + /* free the message from the outbound circular buffer */ + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); + } + if (MPI_IO_STATUS_BUSY == ret) { + /* Update the producer index from SPC */ + circularQ->producer_index = + cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); + if (le32_to_cpu(circularQ->producer_index) == + circularQ->consumer_idx) + /* OQ is empty */ + break; + } + } while (1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return ret; +} + +/* PCI_DMA_... to our direction translation. */ +static const u8 data_dir_flags[] = { + [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */ + [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */ + [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ + [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ +}; + +static void build_smp_cmd(u32 deviceID, __le32 hTag, + struct smp_req *psmp_cmd, int mode, int length) +{ + psmp_cmd->tag = hTag; + psmp_cmd->device_id = cpu_to_le32(deviceID); + if (mode == SMP_DIRECT) { + length = length - 4; /* subtract crc */ + psmp_cmd->len_ip_ir = cpu_to_le32(length << 16); + } else { + psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); + } +} + +/** + * pm8001_chip_smp_req - send a SMP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + int elem, rc; + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len; + struct smp_req smp_cmd; + u32 opc; + struct inbound_queue_table *circularQ; + char *preq_dma_addr = NULL; + __le64 tmp_addr; + u32 i, length; + + memset(&smp_cmd, 0, sizeof(smp_cmd)); + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + opc = OPC_INB_SMP_REQUEST; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); + + length = sg_req->length; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP Frame Length %d\n", sg_req->length)); + if (!(length - 8)) + pm8001_ha->smp_exp_mode = SMP_DIRECT; + else + pm8001_ha->smp_exp_mode = SMP_INDIRECT; + + /* DIRECT MODE support only in spcv/ve */ + pm8001_ha->smp_exp_mode = SMP_DIRECT; + + tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + preq_dma_addr = (char *)phys_to_virt(tmp_addr); + + /* INDIRECT MODE command settings. Use DMA */ + if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP REQUEST INDIRECT MODE\n")); + /* for SPCv indirect mode. Place the top 4 bytes of + * SMP Request header here. */ + for (i = 0; i < 4; i++) + smp_cmd.smp_req16[i] = *(preq_dma_addr + i); + /* exclude top 4 bytes for SMP req header */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req) - 4); + /* exclude 4 bytes for SMP req header and CRC */ + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32((u32)sg_dma_len + (&task->smp_task.smp_resp)-4); + } else { /* DIRECT MODE */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req)); + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32 + ((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + } + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP REQUEST DIRECT MODE\n")); + for (i = 0; i < length; i++) + if (i < 16) { + smp_cmd.smp_req16[i] = *(preq_dma_addr+i); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req16[i], + *(preq_dma_addr))); + } else { + smp_cmd.smp_req[i] = *(preq_dma_addr+i); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req[i], + *(preq_dma_addr))); + } + } + + build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, + &smp_cmd, pm8001_ha->smp_exp_mode, length); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); + return 0; + +err_out_2: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +err_out: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + return rc; +} + +static int check_enc_sas_cmd(struct sas_task *task) +{ + if ((task->ssp_task.cdb[0] == READ_10) + || (task->ssp_task.cdb[0] == WRITE_10) + || (task->ssp_task.cdb[0] == WRITE_VERIFY)) + return 1; + else + return 0; +} + +static int check_enc_sat_cmd(struct sas_task *task) +{ + int ret = 0; + switch (task->ata_task.fis.command) { + case ATA_CMD_FPDMA_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ: + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + ret = 1; + break; + default: + ret = 0; + break; + } + return ret; +} + +/** + * pm80xx_chip_ssp_io_req - send a SSP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct ssp_ini_io_start_req ssp_cmd; + u32 tag = ccb->ccb_tag; + int ret; + u64 phys_addr; + struct inbound_queue_table *circularQ; + static u32 inb; + static u32 outb; + u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + /* data address domain added for spcv; set to 0 by host, + * used internally by controller + * 0 for SAS 1.1 and SAS 2.0 compatible TLR + */ + ssp_cmd.dad_dir_m_tlr = + cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0); + ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); + ssp_cmd.tag = cpu_to_le32(tag); + if (task->ssp_task.enable_first_burst) + ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled.Sending Encrypt SAS command 0x%x\n", + task->ssp_task.cdb[0])); + opc = OPC_INB_SSP_INI_DIF_ENC_IO; + /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/ + ssp_cmd.dad_dir_m_tlr = cpu_to_le32 + ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.enc_addr_low = 0; + ssp_cmd.enc_addr_high = 0; + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + ssp_cmd.key_cmode = 0x6 << 4; + /* set tweak values. Should be the start lba */ + ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) | + (task->ssp_task.cdb[3] << 16) | + (task->ssp_task.cdb[4] << 8) | + (task->ssp_task.cdb[5])); + } else { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Sending Normal SAS command 0x%x inb q %x\n", + task->ssp_task.cdb[0], inb)); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.addr_low = 0; + ssp_cmd.addr_high = 0; + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } + } + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++); + + /* rotate the outb queue */ + outb = outb%PM8001_MAX_SPCV_OUTB_NUM; + + return ret; +} + +static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + u32 tag = ccb->ccb_tag; + int ret; + static u32 inb; + static u32 outb; + struct sata_start_req sata_cmd; + u32 hdr_tag, ncg_tag = 0; + u64 phys_addr; + u32 ATAP = 0x0; + u32 dir; + struct inbound_queue_table *circularQ; + unsigned long flags; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + if (task->data_dir == PCI_DMA_NONE) { + ATAP = 0x04; /* no data*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n")); + } else if (likely(!task->ata_task.device_control_reg_update)) { + if (task->ata_task.dma_xfer) { + ATAP = 0x06; /* DMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n")); + } else { + ATAP = 0x05; /* PIO*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); + } + if (task->ata_task.use_ncq && + dev->sata_dev.command_set != ATAPI_COMMAND_SET) { + ATAP = 0x07; /* FPDMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); + } + } + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + ncg_tag = hdr_tag; + } + dir = data_dir_flags[task->data_dir] << 8; + sata_cmd.tag = cpu_to_le32(tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); + + sata_cmd.sata_fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ + sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n", + sata_cmd.sata_fis.command)); + opc = OPC_INB_SATA_DIF_ENC_IO; + + /* set encryption bit */ + sata_cmd.ncqtag_atap_dir_m_dad = + cpu_to_le32(((ncg_tag & 0xff)<<16)| + ((ATAP & 0x3f) << 10) | 0x20 | dir); + /* dad (bit 0-1) is 0 */ + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + sata_cmd.enc_addr_low = lower_32_bits(phys_addr); + sata_cmd.enc_addr_high = upper_32_bits(phys_addr); + sata_cmd.enc_esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.enc_addr_low = lower_32_bits(dma_addr); + sata_cmd.enc_addr_high = upper_32_bits(dma_addr); + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.enc_addr_low = 0; + sata_cmd.enc_addr_high = 0; + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + sata_cmd.key_index_mode = 0x6 << 4; + /* set tweak values. Should be the start lba */ + sata_cmd.twk_val0 = + cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | + (sata_cmd.sata_fis.lbah << 16) | + (sata_cmd.sata_fis.lbam << 8) | + (sata_cmd.sata_fis.lbal)); + sata_cmd.twk_val1 = + cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) | + (sata_cmd.sata_fis.lbam_exp)); + } else { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Sending Normal SATA command 0x%x inb %x\n", + sata_cmd.sata_fis.command, inb)); + /* dad (bit 0-1) is 0 */ + sata_cmd.ncqtag_atap_dir_m_dad = + cpu_to_le32(((ncg_tag & 0xff)<<16) | + ((ATAP & 0x3f) << 10) | dir); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.addr_low = lower_32_bits(dma_addr); + sata_cmd.addr_high = upper_32_bits(dma_addr); + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.addr_low = 0; + sata_cmd.addr_high = 0; + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } + /* scsi cdb */ + sata_cmd.atapi_scsi_cdb[0] = + cpu_to_le32(((task->ata_task.atapi_packet[0]) | + (task->ata_task.atapi_packet[1] << 8) | + (task->ata_task.atapi_packet[2] << 16) | + (task->ata_task.atapi_packet[3] << 24))); + sata_cmd.atapi_scsi_cdb[1] = + cpu_to_le32(((task->ata_task.atapi_packet[4]) | + (task->ata_task.atapi_packet[5] << 8) | + (task->ata_task.atapi_packet[6] << 16) | + (task->ata_task.atapi_packet[7] << 24))); + sata_cmd.atapi_scsi_cdb[2] = + cpu_to_le32(((task->ata_task.atapi_packet[8]) | + (task->ata_task.atapi_packet[9] << 8) | + (task->ata_task.atapi_packet[10] << 16) | + (task->ata_task.atapi_packet[11] << 24))); + sata_cmd.atapi_scsi_cdb[3] = + cpu_to_le32(((task->ata_task.atapi_packet[12]) | + (task->ata_task.atapi_packet[13] << 8) | + (task->ata_task.atapi_packet[14] << 16) | + (task->ata_task.atapi_packet[15] << 24))); + } + + /* Check for read log for failed drive and return */ + if (sata_cmd.sata_fis.command == 0x2f) { + if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || + (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || + (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { + struct task_status_struct *ts; + + pm8001_ha_dev->id &= 0xDFFFFFFF; + ts = &task->task_status; + + spin_lock_irqsave(&task->task_state_lock, flags); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & + SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p resp 0x%x " + " stat 0x%x but aborted by upper layer " + "\n", task, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + return 0; + } else if (task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } else if (!task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } + } + } + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, + &sata_cmd, outb++); + + /* rotate the outb queue */ + outb = outb%PM8001_MAX_SPCV_OUTB_NUM; + return ret; +} + +/** + * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int +pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) +{ + struct phy_start_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTART; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PHY START REQ for phy_id %d\n", phy_id)); + /* + ** [0:7] PHY Identifier + ** [8:11] link rate 1.5G, 3G, 6G + ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode + ** [14] 0b disable spin up hold; 1b enable spin up hold + ** [15] ob no change in current PHY analig setup 1b enable using SPAST + */ + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | phy_id); + /* SSC Disable and SAS Analog ST configuration */ + /** + payload.ase_sh_lm_slr_phyid = + cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | + phy_id); + Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need + **/ + + payload.sas_identify.dev_type = SAS_END_DEVICE; + payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; + memcpy(payload.sas_identify.sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); + payload.sas_identify.phy_id = phy_id; + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + return ret; +} + +/** + * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, + u8 phy_id) +{ + struct phy_stop_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTOP; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + payload.phy_id = cpu_to_le32(phy_id); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + return ret; +} + +/** + * see comments on pm8001_mpi_reg_resp. + */ +static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag) +{ + struct reg_dev_req payload; + u32 opc; + u32 stp_sspsmp_sata = 0x4; + struct inbound_queue_table *circularQ; + u32 linkrate, phy_id; + int rc, tag = 0xdeadbeef; + struct pm8001_ccb_info *ccb; + u8 retryFlag = 0x1; + u16 firstBurstSize = 0; + u16 ITNT = 2000; + struct domain_device *dev = pm8001_dev->sas_device; + struct domain_device *parent_dev = dev->parent; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + ccb = &pm8001_ha->ccb_info[tag]; + ccb->device = pm8001_dev; + ccb->ccb_tag = tag; + payload.tag = cpu_to_le32(tag); + + if (flag == 1) { + stp_sspsmp_sata = 0x02; /*direct attached sata */ + } else { + if (pm8001_dev->dev_type == SAS_SATA_DEV) + stp_sspsmp_sata = 0x00; /* stp*/ + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) + stp_sspsmp_sata = 0x01; /*ssp or smp*/ + } + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + phy_id = parent_dev->ex_dev.ex_phy->phy_id; + else + phy_id = pm8001_dev->attached_phy; + + opc = OPC_INB_REG_DEV; + + linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? + pm8001_dev->sas_device->linkrate : dev->port->linkrate; + + payload.phyid_portid = + cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) | + ((phy_id & 0xFF) << 8)); + + payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) | + ((linkrate & 0x0F) << 24) | + ((stp_sspsmp_sata & 0x03) << 28)); + payload.firstburstsize_ITNexustimeout = + cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); + + memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, + SAS_ADDR_SIZE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm80xx_chip_phy_ctl_req - support the local phy operation + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to operate + * @phy_op: + */ +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op) +{ + struct local_phy_ctl_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 opc = OPC_INB_LOCAL_PHY_CONTROL; + memset(&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(1); + payload.phyop_phyid = + cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return ret; +} + +static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; +#ifdef PM8001_USE_MSIX + return 1; +#endif + value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); + if (value) + return 1; + return 0; + +} + +/** + * pm8001_chip_isr - PM8001 isr handler. + * @pm8001_ha: our hba card information. + * @irq: irq number. + * @stat: stat. + */ +static irqreturn_t +pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + pm80xx_chip_interrupt_disable(pm8001_ha, vec); + process_oq(pm8001_ha, vec); + pm80xx_chip_interrupt_enable(pm8001_ha, vec); + return IRQ_HANDLED; +} + +const struct pm8001_dispatch pm8001_80xx_dispatch = { + .name = "pmc80xx", + .chip_init = pm80xx_chip_init, + .chip_soft_rst = pm80xx_chip_soft_rst, + .chip_rst = pm80xx_hw_chip_rst, + .chip_iounmap = pm8001_chip_iounmap, + .isr = pm80xx_chip_isr, + .is_our_interupt = pm80xx_chip_is_our_interupt, + .isr_process_oq = process_oq, + .interrupt_enable = pm80xx_chip_interrupt_enable, + .interrupt_disable = pm80xx_chip_interrupt_disable, + .make_prd = pm8001_chip_make_sg, + .smp_req = pm80xx_chip_smp_req, + .ssp_io_req = pm80xx_chip_ssp_io_req, + .sata_req = pm80xx_chip_sata_req, + .phy_start_req = pm80xx_chip_phy_start_req, + .phy_stop_req = pm80xx_chip_phy_stop_req, + .reg_dev_req = pm80xx_chip_reg_dev_req, + .dereg_dev_req = pm8001_chip_dereg_dev_req, + .phy_ctl_req = pm80xx_chip_phy_ctl_req, + .task_abort = pm8001_chip_abort_task, + .ssp_tm_req = pm8001_chip_ssp_tm_req, + .get_nvmd_req = pm8001_chip_get_nvmd_req, + .set_nvmd_req = pm8001_chip_set_nvmd_req, + .fw_flash_update_req = pm8001_chip_fw_flash_update_req, + .set_dev_state_req = pm8001_chip_set_dev_state_req, +}; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h new file mode 100644 index 000000000000..2b760ba75d7b --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -0,0 +1,1523 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PMC8001_REG_H_ +#define _PMC8001_REG_H_ + +#include <linux/types.h> +#include <scsi/libsas.h> + +/* for Request Opcode of IOMB */ +#define OPC_INB_ECHO 1 /* 0x000 */ +#define OPC_INB_PHYSTART 4 /* 0x004 */ +#define OPC_INB_PHYSTOP 5 /* 0x005 */ +#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ +#define OPC_INB_SSPINITMSTART 7 /* 0x007 */ +/* 0x8 RESV IN SPCv */ +#define OPC_INB_RSVD 8 /* 0x008 */ +#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ +#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ +#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ +/* 0xC, 0xD, 0xE removed in SPCv */ +#define OPC_INB_SSP_ABORT 15 /* 0x00F */ +#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ +#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ +#define OPC_INB_SMP_REQUEST 18 /* 0x012 */ +/* 0x13 SMP_RESPONSE is removed in SPCv */ +#define OPC_INB_SMP_ABORT 20 /* 0x014 */ +/* 0x16 RESV IN SPCv */ +#define OPC_INB_RSVD1 22 /* 0x016 */ +#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ +#define OPC_INB_SATA_ABORT 24 /* 0x018 */ +#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ +/* 0x1A RESV IN SPCv */ +#define OPC_INB_RSVD2 26 /* 0x01A */ +#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ +#define OPC_INB_GPIO 34 /* 0x022 */ +#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ +#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ +/* 0x25 RESV IN SPCv */ +#define OPC_INB_RSVD3 37 /* 0x025 */ +#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ +#define OPC_INB_PORT_CONTROL 39 /* 0x027 */ +#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ +#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ +#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ +#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ +#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ +/* 0x2D RESV IN SPCv */ +#define OPC_INB_RSVD4 45 /* 0x02D */ +#define OPC_INB_SGPIO_REGISTER 46 /* 0x02E */ +#define OPC_INB_PCIE_DIAG_EXEC 47 /* 0x02F */ +#define OPC_INB_SET_CONTROLLER_CONFIG 48 /* 0x030 */ +#define OPC_INB_GET_CONTROLLER_CONFIG 49 /* 0x031 */ +#define OPC_INB_REG_DEV 50 /* 0x032 */ +#define OPC_INB_SAS_HW_EVENT_ACK 51 /* 0x033 */ +#define OPC_INB_GET_DEVICE_INFO 52 /* 0x034 */ +#define OPC_INB_GET_PHY_PROFILE 53 /* 0x035 */ +#define OPC_INB_FLASH_OP_EXT 54 /* 0x036 */ +#define OPC_INB_SET_PHY_PROFILE 55 /* 0x037 */ +#define OPC_INB_KEK_MANAGEMENT 256 /* 0x100 */ +#define OPC_INB_DEK_MANAGEMENT 257 /* 0x101 */ +#define OPC_INB_SSP_INI_DIF_ENC_IO 258 /* 0x102 */ +#define OPC_INB_SATA_DIF_ENC_IO 259 /* 0x103 */ + +/* for Response Opcode of IOMB */ +#define OPC_OUB_ECHO 1 /* 0x001 */ +#define OPC_OUB_RSVD 4 /* 0x004 */ +#define OPC_OUB_SSP_COMP 5 /* 0x005 */ +#define OPC_OUB_SMP_COMP 6 /* 0x006 */ +#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ +#define OPC_OUB_RSVD1 10 /* 0x00A */ +#define OPC_OUB_DEREG_DEV 11 /* 0x00B */ +#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ +#define OPC_OUB_SATA_COMP 13 /* 0x00D */ +#define OPC_OUB_SATA_EVENT 14 /* 0x00E */ +#define OPC_OUB_SSP_EVENT 15 /* 0x00F */ +#define OPC_OUB_RSVD2 16 /* 0x010 */ +/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/ +#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ +#define OPC_OUB_RSVD3 19 /* 0x013 */ +#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ +#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ +#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ +#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ +#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ +#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ +#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ +#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ +#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ +#define OPC_OUB_RSVD4 31 /* 0x01F */ +#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ +#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ +#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ +#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ +#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ +#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ +#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ +#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ +#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ +#define OPC_OUB_RSVD5 41 /* 0x029 */ +#define OPC_OUB_HW_EVENT 1792 /* 0x700 */ +#define OPC_OUB_DEV_HANDLE_ARRIV 1824 /* 0x720 */ +#define OPC_OUB_THERM_HW_EVENT 1840 /* 0x730 */ +#define OPC_OUB_SGPIO_RESP 2094 /* 0x82E */ +#define OPC_OUB_PCIE_DIAG_EXECUTE 2095 /* 0x82F */ +#define OPC_OUB_DEV_REGIST 2098 /* 0x832 */ +#define OPC_OUB_SAS_HW_EVENT_ACK 2099 /* 0x833 */ +#define OPC_OUB_GET_DEVICE_INFO 2100 /* 0x834 */ +/* spcv specific commands */ +#define OPC_OUB_PHY_START_RESP 2052 /* 0x804 */ +#define OPC_OUB_PHY_STOP_RESP 2053 /* 0x805 */ +#define OPC_OUB_SET_CONTROLLER_CONFIG 2096 /* 0x830 */ +#define OPC_OUB_GET_CONTROLLER_CONFIG 2097 /* 0x831 */ +#define OPC_OUB_GET_PHY_PROFILE 2101 /* 0x835 */ +#define OPC_OUB_FLASH_OP_EXT 2102 /* 0x836 */ +#define OPC_OUB_SET_PHY_PROFILE 2103 /* 0x837 */ +#define OPC_OUB_KEK_MANAGEMENT_RESP 2304 /* 0x900 */ +#define OPC_OUB_DEK_MANAGEMENT_RESP 2305 /* 0x901 */ +#define OPC_OUB_SSP_COALESCED_COMP_RESP 2306 /* 0x902 */ + +/* for phy start*/ +#define SSC_DISABLE_15 (0x01 << 16) +#define SSC_DISABLE_30 (0x02 << 16) +#define SSC_DISABLE_60 (0x04 << 16) +#define SAS_ASE (0x01 << 15) +#define SPINHOLD_DISABLE (0x00 << 14) +#define SPINHOLD_ENABLE (0x01 << 14) +#define LINKMODE_SAS (0x01 << 12) +#define LINKMODE_DSATA (0x02 << 12) +#define LINKMODE_AUTO (0x03 << 12) +#define LINKRATE_15 (0x01 << 8) +#define LINKRATE_30 (0x02 << 8) +#define LINKRATE_60 (0x06 << 8) + +/* Thermal related */ +#define THERMAL_ENABLE 0x1 +#define THERMAL_LOG_ENABLE 0x1 +#define THERMAL_OP_CODE 0x6 +#define LTEMPHIL 70 +#define RTEMPHIL 100 + +/* Encryption info */ +#define SCRATCH_PAD3_ENC_DISABLED 0x00000000 +#define SCRATCH_PAD3_ENC_DIS_ERR 0x00000001 +#define SCRATCH_PAD3_ENC_ENA_ERR 0x00000002 +#define SCRATCH_PAD3_ENC_READY 0x00000003 +#define SCRATCH_PAD3_ENC_MASK SCRATCH_PAD3_ENC_READY + +#define SCRATCH_PAD3_XTS_ENABLED (1 << 14) +#define SCRATCH_PAD3_SMA_ENABLED (1 << 4) +#define SCRATCH_PAD3_SMB_ENABLED (1 << 5) +#define SCRATCH_PAD3_SMF_ENABLED 0 +#define SCRATCH_PAD3_SM_MASK 0x000000F0 +#define SCRATCH_PAD3_ERR_CODE 0x00FF0000 + +#define SEC_MODE_SMF 0x0 +#define SEC_MODE_SMA 0x100 +#define SEC_MODE_SMB 0x200 +#define CIPHER_MODE_ECB 0x00000001 +#define CIPHER_MODE_XTS 0x00000002 +#define KEK_MGMT_SUBOP_KEYCARDUPDATE 0x4 + +/* SAS protocol timer configuration page */ +#define SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x04 +#define STP_MCT_TMO 32 +#define SSP_MCT_TMO 32 +#define SAS_MAX_OPEN_TIME 5 +#define SMP_MAX_CONN_TIMER 0xFF +#define STP_FRM_TIMER 0 +#define STP_IDLE_TIME 5 /* 5 us; controller default */ +#define SAS_MFD 0 +#define SAS_OPNRJT_RTRY_INTVL 2 +#define SAS_DOPNRJT_RTRY_TMO 128 +#define SAS_COPNRJT_RTRY_TMO 128 + +/* + Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. + Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 + is DOPNRJT_RTRY_TMO +*/ +#define SAS_DOPNRJT_RTRY_THR 23438 +#define SAS_COPNRJT_RTRY_THR 23438 +#define SAS_MAX_AIP 0x200000 +#define IT_NEXUS_TIMEOUT 0x7D0 +#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30) + +struct mpi_msg_hdr { + __le32 header; /* Bits [11:0] - Message operation code */ + /* Bits [15:12] - Message Category */ + /* Bits [21:16] - Outboundqueue ID for the + operation completion message */ + /* Bits [23:22] - Reserved */ + /* Bits [28:24] - Buffer Count, indicates how + many buffer are allocated for the massage */ + /* Bits [30:29] - Reserved */ + /* Bits [31] - Message Valid bit */ +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to describe enable the phy (128 bytes) + */ +struct phy_start_req { + __le32 tag; + __le32 ase_sh_lm_slr_phyid; + struct sas_identify_frame sas_identify; /* 28 Bytes */ + __le32 spasti; + u32 reserved[21]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to disable the phy (128 bytes) + */ +struct phy_stop_req { + __le32 tag; + __le32 phy_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* set device bits fis - device to host */ +struct set_dev_bits_fis { + u8 fis_type; /* 0xA1*/ + u8 n_i_pmport; + /* b7 : n Bit. Notification bit. If set device needs attention. */ + /* b6 : i Bit. Interrupt Bit */ + /* b5-b4: reserved2 */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u32 _r_a; +} __attribute__ ((packed)); +/* PIO setup FIS - device to host */ +struct pio_setup_fis { + u8 fis_type; /* 0x5f */ + u8 i_d_pmPort; + /* b7 : reserved */ + /* b6 : i bit. Interrupt bit */ + /* b5 : d bit. data transfer direction. set to 1 for device to host + xfer */ + /* b4 : reserved */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + u8 sector_count; + u8 sector_count_exp; + u8 _r_b; + u8 e_status; + u8 _r_c[2]; + u8 transfer_count; +} __attribute__ ((packed)); + +/* + * brief the data structure of SATA Completion Response + * use to describe the sata task response (64 bytes) + */ +struct sata_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u32 sata_resp[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SAS HW Event Notification + * use to alert the host about the hardware event(64 bytes) + */ +/* updated outbound struct for spcv */ + +struct hw_event_resp { + __le32 lr_status_evt_portid; + __le32 evt_param; + __le32 phyid_npip_portstate; + struct sas_identify_frame sas_identify; + struct dev_to_host_fis sata_fis; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure for thermal event notification + */ + +struct thermal_hw_event { + __le32 thermal_event; + __le32 rht_lht; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of REGISTER DEVICE Command + * use to describe MPI REGISTER DEVICE Command (64 bytes) + */ + +struct reg_dev_req { + __le32 tag; + __le32 phyid_portid; + __le32 dtype_dlr_mcn_ir_retry; + __le32 firstburstsize_ITNexustimeout; + u8 sas_addr[SAS_ADDR_SIZE]; + __le32 upper_device_id; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEREGISTER DEVICE Command + * use to request spc to remove all internal resources associated + * with the device id (64 bytes) + */ + +struct dereg_dev_req { + __le32 tag; + __le32 device_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEVICE_REGISTRATION Response + * use to notify the completion of the device registration (64 bytes) + */ +struct dev_reg_resp { + __le32 tag; + __le32 status; + __le32 device_id; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of Local PHY Control Command + * use to issue PHY CONTROL to local phy (64 bytes) + */ +struct local_phy_ctl_req { + __le32 tag; + __le32 phyop_phyid; + u32 reserved1[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Local Phy Control Response + * use to describe MPI Local Phy Control Response (64 bytes) + */ + struct local_phy_ctl_resp { + __le32 tag; + __le32 phyop_phyid; + __le32 status; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +#define OP_BITS 0x0000FF00 +#define ID_BITS 0x000000FF + +/* + * brief the data structure of PORT Control Command + * use to control port properties (64 bytes) + */ + +struct port_ctl_req { + __le32 tag; + __le32 portop_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of HW Event Ack Command + * use to acknowledge receive HW event (64 bytes) + */ +struct hw_event_ack_req { + __le32 tag; + __le32 phyid_sea_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_START Response Command + * indicates the completion of PHY_START command (64 bytes) + */ +struct phy_start_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_STOP Response Command + * indicates the completion of PHY_STOP command (64 bytes) + */ +struct phy_stop_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP Completion Response + * use to indicate a SSP Completion (n bytes) + */ +struct ssp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 ssptag_rescv_rescpad; + struct ssp_response_iu ssp_resp_iu; + __le32 residual_count; +} __attribute__((packed, aligned(4))); + +#define SSP_RESCV_BIT 0x00010000 + +/* + * brief the data structure of SATA EVNET response + * use to indicate a SATA Completion (64 bytes) + */ +struct sata_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved; + __le32 event_param0; + __le32 event_param1; + __le32 sata_addr_h32; + __le32 sata_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP EVNET esponse + * use to indicate a SSP Completion (64 bytes) + */ +struct ssp_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + __le32 ssp_tag; + __le32 event_param0; + __le32 event_param1; + __le32 sas_addr_h32; + __le32 sas_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of General Event Notification Response + * use to describe MPI General Event Notification Response (64 bytes) + */ +struct general_event_resp { + __le32 status; + __le32 inb_IOMB_payload[14]; +} __attribute__((packed, aligned(4))); + +#define GENERAL_EVENT_PAYLOAD 14 +#define OPCODE_BITS 0x00000fff + +/* + * brief the data structure of SMP Request Command + * use to describe MPI SMP REQUEST Command (64 bytes) + */ +struct smp_req { + __le32 tag; + __le32 device_id; + __le32 len_ip_ir; + /* Bits [0] - Indirect response */ + /* Bits [1] - Indirect Payload */ + /* Bits [15:2] - Reserved */ + /* Bits [23:16] - direct payload Len */ + /* Bits [31:24] - Reserved */ + u8 smp_req16[16]; + union { + u8 smp_req[32]; + struct { + __le64 long_req_addr;/* sg dma address, LE */ + __le32 long_req_size;/* LE */ + u32 _r_a; + __le64 long_resp_addr;/* sg dma address, LE */ + __le32 long_resp_size;/* LE */ + u32 _r_b; + } long_smp_req;/* sequencer extension */ + }; + __le32 rsvd[16]; +} __attribute__((packed, aligned(4))); +/* + * brief the data structure of SMP Completion Response + * use to describe MPI SMP Completion Response (64 bytes) + */ +struct smp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u8 _r_a[252]; +} __attribute__((packed, aligned(4))); + +/* + *brief the data structure of SSP SMP SATA Abort Command + * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) + */ +struct task_abort_req { + __le32 tag; + __le32 device_id; + __le32 tag_to_abort; + __le32 abort_all; + u32 reserved[27]; +} __attribute__((packed, aligned(4))); + +/* These flags used for SSP SMP & SATA Abort */ +#define ABORT_MASK 0x3 +#define ABORT_SINGLE 0x0 +#define ABORT_ALL 0x1 + +/** + * brief the data structure of SSP SATA SMP Abort Response + * use to describe SSP SMP & SATA Abort Response ( 64 bytes) + */ +struct task_abort_resp { + __le32 tag; + __le32 status; + __le32 scp; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Command + * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) + */ +struct sas_diag_start_end_req { + __le32 tag; + __le32 operation_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Command + * use to describe MPI SAS Diagnostic Execute Command (64 bytes) + */ +struct sas_diag_execute_req { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 pat1_pat2; + __le32 threshold; + __le32 codepat_errmsk; + __le32 pmon; + __le32 pERF1CTL; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +#define SAS_DIAG_PARAM_BYTES 24 + +/* + * brief the data structure of Set Device State Command + * use to describe MPI Set Device State Command (64 bytes) + */ +struct set_dev_state_req { + __le32 tag; + __le32 device_id; + __le32 nds; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SATA Start Command + * use to describe MPI SATA IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ + +struct sata_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 ncqtag_atap_dir_m_dad; + struct host_to_dev_fis sata_fis; + u32 reserved1; + u32 reserved2; /* dword 11. rsvd for normal I/O. */ + /* EPLE Descl for enc I/O */ + u32 addr_low; /* dword 12. rsvd for enc I/O */ + u32 addr_high; /* dword 13. reserved for enc I/O */ + __le32 len; /* dword 14: length for normal I/O. */ + /* EPLE Desch for enc I/O */ + __le32 esgl; /* dword 15. rsvd for enc I/O */ + __le32 atapi_scsi_cdb[4]; /* dword 16-19. rsvd for enc I/O */ + /* The below fields are reserved for normal I/O */ + __le32 key_index_mode; /* dword 20 */ + __le32 sector_cnt_enss;/* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28. Encryption SGL address high */ + __le32 enc_addr_high; /* dword 29. Encryption SGL address low */ + __le32 enc_len; /* dword 30. Encryption length */ + __le32 enc_esgl; /* dword 31. Encryption esgl bit */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI TM Start Command + * use to describe MPI SSP INI TM Start Command (64 bytes) + */ +struct ssp_ini_tm_start_req { + __le32 tag; + __le32 device_id; + __le32 relate_tag; + __le32 tmf; + u8 lun[8]; + __le32 ds_ads_m; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +struct ssp_info_unit { + u8 lun[8];/* SCSI Logical Unit Number */ + u8 reserved1;/* reserved */ + u8 efb_prio_attr; + /* B7 : enabledFirstBurst */ + /* B6-3 : taskPriority */ + /* B2-0 : taskAttribute */ + u8 reserved2; /* reserved */ + u8 additional_cdb_len; + /* B7-2 : additional_cdb_len */ + /* B1-0 : reserved */ + u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI IO Start Command + * use to describe MPI SSP INI IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ +struct ssp_ini_io_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dad_dir_m_tlr; + struct ssp_info_unit ssp_iu; + __le32 addr_low; /* dword 12: sgl low for normal I/O. */ + /* epl_descl for encryption I/O */ + __le32 addr_high; /* dword 13: sgl hi for normal I/O */ + /* dpl_descl for encryption I/O */ + __le32 len; /* dword 14: len for normal I/O. */ + /* edpl_desch for encryption I/O */ + __le32 esgl; /* dword 15: ESGL bit for normal I/O. */ + /* user defined tag mask for enc I/O */ + /* The below fields are reserved for normal I/O */ + u8 udt[12]; /* dword 16-18 */ + __le32 sectcnt_ios; /* dword 19 */ + __le32 key_cmode; /* dword 20 */ + __le32 ks_enss; /* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28: Encryption sgl addr low */ + __le32 enc_addr_high; /* dword 29: Encryption sgl addr hi */ + __le32 enc_len; /* dword 30: Encryption length */ + __le32 enc_esgl; /* dword 31: ESGL bit for encryption */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND + * use to initiate SSP I/O operation with optional DIF/ENC + */ +struct ssp_dif_enc_io_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dirMTlr; + __le32 sspiu0; + __le32 sspiu1; + __le32 sspiu2; + __le32 sspiu3; + __le32 sspiu4; + __le32 sspiu5; + __le32 sspiu6; + __le32 epl_des; + __le32 dpl_desl_ndplr; + __le32 dpl_desh; + __le32 uum_uuv_bss_difbits; + u8 udt[12]; + __le32 sectcnt_ios; + __le32 key_cmode; + __le32 ks_enss; + __le32 keytagl; + __le32 keytagh; + __le32 twk_val0; + __le32 twk_val1; + __le32 twk_val2; + __le32 twk_val3; + __le32 addr_low; + __le32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Firmware download + * use to describe MPI FW DOWNLOAD Command (64 bytes) + */ +struct fw_flash_Update_req { + __le32 tag; + __le32 cur_image_offset; + __le32 cur_image_len; + __le32 total_image_len; + u32 reserved0[7]; + __le32 sgl_addr_lo; + __le32 sgl_addr_hi; + __le32 len; + __le32 ext_reserved; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define FWFLASH_IOMB_RESERVED_LEN 0x07 +/** + * brief the data structure of FW_FLASH_UPDATE Response + * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) + * + */ + struct fw_flash_Update_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Get NVM Data Command + * use to get data from NVM in HBA(64 bytes) + */ +struct get_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +struct set_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_req { + __le32 tag; + __le32 cfg_pg[14]; + u32 reserved[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET CONTROLLER CONFIG COMMAND + * use to get controller configuration page + */ +struct get_ctrl_cfg_req { + __le32 tag; + __le32 pgcd; + __le32 int_vec; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for KEK_MANAGEMENT COMMAND + * use for KEK management + */ +struct kek_mgmt_req { + __le32 tag; + __le32 new_curidx_ksop; + u32 reserved; + __le32 kblob[12]; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for DEK_MANAGEMENT COMMAND + * use for DEK management + */ +struct dek_mgmt_req { + __le32 tag; + __le32 kidx_dsop; + __le32 dekidx; + __le32 addr_l; + __le32 addr_h; + __le32 nent; + __le32 dbf_tblsize; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct set_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct get_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + __le32 profile[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for EXT FLASH PARTITION + * use to manage ext flash partition + */ +struct ext_flash_partition_req { + __le32 tag; + __le32 cmd; + __le32 offset; + __le32 len; + u32 reserved[7]; + __le32 addr_low; + __le32 addr_high; + __le32 len1; + __le32 ext; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define TWI_DEVICE 0x0 +#define C_SEEPROM 0x1 +#define VPD_FLASH 0x4 +#define AAP1_RDUMP 0x5 +#define IOP_RDUMP 0x6 +#define EXPAN_ROM 0x7 + +#define IPMode 0x80000000 +#define NVMD_TYPE 0x0000000F +#define NVMD_STAT 0x0000FFFF +#define NVMD_LEN 0xFF000000 +/** + * brief the data structure of Get NVMD Data Response + * use to describe MPI Get NVMD Data Response (64 bytes) + */ +struct get_nvm_data_resp { + __le32 tag; + __le32 ir_tda_bn_dps_das_nvm; + __le32 dlen_status; + __le32 nvm_data[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Response + * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) + * + */ +struct sas_diag_start_end_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Response + * use to describe MPI SAS Diagnostic Execute Response (64 bytes) + * + */ +struct sas_diag_execute_resp { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 Status; + __le32 ReportData; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Set Device State Response + * use to describe MPI Set Device State Response (64 bytes) + * + */ +struct set_dev_state_resp { + __le32 tag; + __le32 status; + __le32 device_id; + __le32 pds_nds; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - begins */ +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr_pgcd; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +struct get_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr; + __le32 confg_page[12]; +} __attribute__((packed, aligned(4))); + +struct kek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kidx_new_curr_ksop; + __le32 err_qlfr; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +struct dek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kekidx_tbls_dsop; + __le32 dekidx; + __le32 err_qlfr; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct get_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct flash_op_ext_resp { + __le32 tag; + __le32 cmd; + __le32 status; + __le32 epart_size; + __le32 epart_sect_size; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct set_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct ssp_coalesced_comp_resp { + __le32 coal_cnt; + __le32 tag0; + __le32 ssp_tag0; + __le32 tag1; + __le32 ssp_tag1; + __le32 add_tag_ssp_tag[10]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - ends */ + +/* brief data structure for SAS protocol timer configuration page. + * + */ +struct SASProtocolTimerConfig { + __le32 pageCode; /* 0 */ + __le32 MST_MSI; /* 1 */ + __le32 STP_SSP_MCT_TMO; /* 2 */ + __le32 STP_FRM_TMO; /* 3 */ + __le32 STP_IDLE_TMO; /* 4 */ + __le32 OPNRJT_RTRY_INTVL; /* 5 */ + __le32 Data_Cmd_OPNRJT_RTRY_TMO; /* 6 */ + __le32 Data_Cmd_OPNRJT_RTRY_THR; /* 7 */ + __le32 MAX_AIP; /* 8 */ +} __attribute__((packed, aligned(4))); + +typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; + +#define NDS_BITS 0x0F +#define PDS_BITS 0xF0 + +/* + * HW Events type + */ + +#define HW_EVENT_RESET_START 0x01 +#define HW_EVENT_CHIP_RESET_COMPLETE 0x02 +#define HW_EVENT_PHY_STOP_STATUS 0x03 +#define HW_EVENT_SAS_PHY_UP 0x04 +#define HW_EVENT_SATA_PHY_UP 0x05 +#define HW_EVENT_SATA_SPINUP_HOLD 0x06 +#define HW_EVENT_PHY_DOWN 0x07 +#define HW_EVENT_PORT_INVALID 0x08 +#define HW_EVENT_BROADCAST_CHANGE 0x09 +#define HW_EVENT_PHY_ERROR 0x0A +#define HW_EVENT_BROADCAST_SES 0x0B +#define HW_EVENT_INBOUND_CRC_ERROR 0x0C +#define HW_EVENT_HARD_RESET_RECEIVED 0x0D +#define HW_EVENT_MALFUNCTION 0x0E +#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F +#define HW_EVENT_BROADCAST_EXP 0x10 +#define HW_EVENT_PHY_START_STATUS 0x11 +#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 +#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 +#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 +#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 +#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 +#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 +#define HW_EVENT_PORT_RECOVER 0x18 +#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 +#define HW_EVENT_PORT_RESET_COMPLETE 0x20 +#define EVENT_BROADCAST_ASYNCH_EVENT 0x21 + +/* port state */ +#define PORT_NOT_ESTABLISHED 0x00 +#define PORT_VALID 0x01 +#define PORT_LOSTCOMM 0x02 +#define PORT_IN_RESET 0x04 +#define PORT_3RD_PARTY_RESET 0x07 +#define PORT_INVALID 0x08 + +/* + * SSP/SMP/SATA IO Completion Status values + */ + +#define IO_SUCCESS 0x00 +#define IO_ABORTED 0x01 +#define IO_OVERFLOW 0x02 +#define IO_UNDERFLOW 0x03 +#define IO_FAILED 0x04 +#define IO_ABORT_RESET 0x05 +#define IO_NOT_VALID 0x06 +#define IO_NO_DEVICE 0x07 +#define IO_ILLEGAL_PARAMETER 0x08 +#define IO_LINK_FAILURE 0x09 +#define IO_PROG_ERROR 0x0A + +#define IO_EDC_IN_ERROR 0x0B +#define IO_EDC_OUT_ERROR 0x0C +#define IO_ERROR_HW_TIMEOUT 0x0D +#define IO_XFER_ERROR_BREAK 0x0E +#define IO_XFER_ERROR_PHY_NOT_READY 0x0F +#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 +#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 +#define IO_OPEN_CNX_ERROR_BREAK 0x12 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 +#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 +#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 +#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 +#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 +/* This error code 0x18 is not used on SPCv */ +#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 +#define IO_XFER_ERROR_NAK_RECEIVED 0x19 +#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A +#define IO_XFER_ERROR_PEER_ABORTED 0x1B +#define IO_XFER_ERROR_RX_FRAME 0x1C +#define IO_XFER_ERROR_DMA 0x1D +#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E +#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F +#define IO_XFER_ERROR_SATA 0x20 + +/* This error code 0x22 is not used on SPCv */ +#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 +#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 +#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 +#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 +/* This error code 0x25 is not used on SPCv */ +#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 +#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 +#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 +#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 +#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 + +/* The following error code 0x31 and 0x32 are not using (obsolete) */ +#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 +#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 + +#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 +#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 +#define IO_XFER_CMD_FRAME_ISSUED 0x36 +#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 +#define IO_PORT_IN_RESET 0x38 +#define IO_DS_NON_OPERATIONAL 0x39 +#define IO_DS_IN_RECOVERY 0x3A +#define IO_TM_TAG_NOT_FOUND 0x3B +#define IO_XFER_PIO_SETUP_ERROR 0x3C +#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D +#define IO_DS_IN_ERROR 0x3E +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F +#define IO_ABORT_IN_PROGRESS 0x40 +#define IO_ABORT_DELAYED 0x41 +#define IO_INVALID_LENGTH 0x42 + +/********** additional response event values *****************/ + +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT 0x43 +#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED 0x44 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO 0x45 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST 0x46 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48 +#define IO_DS_INVALID 0x49 +/* WARNING: the value is not contiguous from here */ +#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52 +#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53 +#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54 +#define MPI_IO_RQE_BUSY_FULL 0x55 +#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56 +#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57 +#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58 + +#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004 +#define MPI_ERR_ATAPI_DEVICE_BUSY 0x1024 + +#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS 0x2040 +/* + * An encryption IO request failed due to DEK Key Tag mismatch. + * The key tag supplied in the encryption IOMB does not match with + * the Key Tag in the referenced DEK Entry. + */ +#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH 0x2041 +#define IO_XFR_ERROR_CIPHER_MODE_INVALID 0x2042 +/* + * An encryption I/O request failed because the initial value (IV) + * in the unwrapped DEK blob didn't match the IV used to unwrap it. + */ +#define IO_XFR_ERROR_DEK_IV_MISMATCH 0x2043 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR 0x2044 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_INTERNAL_RAM 0x2045 +/* + * An encryption I/O request failed + * because the DEK index specified in the I/O was outside the bounds of + * the total number of entries in the host DEK table. + */ +#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046 + +/* define DIF IO response error status code */ +#define IO_XFR_ERROR_DIF_MISMATCH 0x3000 +#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH 0x3001 +#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH 0x3002 +#define IO_XFR_ERROR_DIF_CRC_MISMATCH 0x3003 + +/* define operator management response status and error qualifier code */ +#define OPR_MGMT_OP_NOT_SUPPORTED 0x2060 +#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL 0x2061 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND 0x2062 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH 0x2063 +#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED 0x2064 +#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL 0x2022 +#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE 0x2023 +/***************** additional response event values ***************/ + +/* WARNING: This error code must always be the last number. + * If you add error code, modify this code also + * It is used as an index + */ +#define IO_ERROR_UNKNOWN_GENERIC 0x2023 + +/* MSGU CONFIGURATION TABLE*/ + +#define SPCv_MSGU_CFG_TABLE_UPDATE 0x01 +#define SPCv_MSGU_CFG_TABLE_RESET 0x02 +#define SPCv_MSGU_CFG_TABLE_FREEZE 0x04 +#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08 +#define MSGU_IBDB_SET 0x00 +#define MSGU_HOST_INT_STATUS 0x08 +#define MSGU_HOST_INT_MASK 0x0C +#define MSGU_IOPIB_INT_STATUS 0x18 +#define MSGU_IOPIB_INT_MASK 0x1C +#define MSGU_IBDB_CLEAR 0x20 + +#define MSGU_MSGU_CONTROL 0x24 +#define MSGU_ODR 0x20 +#define MSGU_ODCR 0x28 + +#define MSGU_ODMR 0x30 +#define MSGU_ODMR_U 0x34 +#define MSGU_ODMR_CLR 0x38 +#define MSGU_ODMR_CLR_U 0x3C +#define MSGU_OD_RSVD 0x40 + +#define MSGU_SCRATCH_PAD_0 0x44 +#define MSGU_SCRATCH_PAD_1 0x48 +#define MSGU_SCRATCH_PAD_2 0x4C +#define MSGU_SCRATCH_PAD_3 0x50 +#define MSGU_HOST_SCRATCH_PAD_0 0x54 +#define MSGU_HOST_SCRATCH_PAD_1 0x58 +#define MSGU_HOST_SCRATCH_PAD_2 0x5C +#define MSGU_HOST_SCRATCH_PAD_3 0x60 +#define MSGU_HOST_SCRATCH_PAD_4 0x64 +#define MSGU_HOST_SCRATCH_PAD_5 0x68 +#define MSGU_HOST_SCRATCH_PAD_6 0x6C +#define MSGU_HOST_SCRATCH_PAD_7 0x70 + +/* bit definition for ODMR register */ +#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all + interrupt vector */ +#define ODMR_CLEAR_ALL 0 /* clear all + interrupt vector */ +/* bit definition for ODCR register */ +#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all + interrupt vector*/ +/* MSIX Interupts */ +#define MSIX_TABLE_OFFSET 0x2000 +#define MSIX_TABLE_ELEMENT_SIZE 0x10 +#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC +#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + \ + MSIX_INTERRUPT_CONTROL_OFFSET) +#define MSIX_INTERRUPT_DISABLE 0x1 +#define MSIX_INTERRUPT_ENABLE 0x0 + +/* state definition for Scratch Pad1 register */ +#define SCRATCH_PAD_RAAE_READY 0x3 +#define SCRATCH_PAD_ILA_READY 0xC +#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 +#define SCRATCH_PAD_IOP0_READY 0xC00 +#define SCRATCH_PAD_IOP1_READY 0x3000 + +/* boot loader state */ +#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */ +#define SCRATCH_PAD1_BOOTSTATE_SUCESS 0x0 /* Load successful */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM 0x10 /* HDA SEEPROM */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP 0x20 /* HDA BootStrap Pins */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET 0x30 /* HDA Soft Reset */ +#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR 0x40 /* HDA critical error */ +#define SCRATCH_PAD1_BOOTSTATE_R1 0x50 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_R2 0x60 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_FATAL 0x70 /* Fatal Error */ + + /* state definition for Scratch Pad2 register */ +#define SCRATCH_PAD2_POR 0x00 /* power on state */ +#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD2_ERR 0x02 /* error state */ +#define SCRATCH_PAD2_RDY 0x03 /* ready state */ +#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW rdy for soft reset flag */ +#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ +#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 + Mask, bit1-0 State */ +#define SCRATCH_PAD2_RESERVED 0x000003FC/* Scratch Pad1 + Reserved bit 2 to 9 */ + +#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ +#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ + +/* main configuration offset - byte offset */ +#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */ +#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */ +#define MAIN_FW_REVISION 0x08 /* DWORD 0x02 */ +#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */ +#define MAIN_MAX_SGL_OFFSET 0x10 /* DWORD 0x04 */ +#define MAIN_CNTRL_CAP_OFFSET 0x14 /* DWORD 0x05 */ +#define MAIN_GST_OFFSET 0x18 /* DWORD 0x06 */ +#define MAIN_IBQ_OFFSET 0x1C /* DWORD 0x07 */ +#define MAIN_OBQ_OFFSET 0x20 /* DWORD 0x08 */ +#define MAIN_IQNPPD_HPPD_OFFSET 0x24 /* DWORD 0x09 */ + +/* 0x28 - 0x4C - RSVD */ +#define MAIN_EVENT_CRC_CHECK 0x48 /* DWORD 0x12 */ +#define MAIN_EVENT_LOG_ADDR_HI 0x50 /* DWORD 0x14 */ +#define MAIN_EVENT_LOG_ADDR_LO 0x54 /* DWORD 0x15 */ +#define MAIN_EVENT_LOG_BUFF_SIZE 0x58 /* DWORD 0x16 */ +#define MAIN_EVENT_LOG_OPTION 0x5C /* DWORD 0x17 */ +#define MAIN_PCS_EVENT_LOG_ADDR_HI 0x60 /* DWORD 0x18 */ +#define MAIN_PCS_EVENT_LOG_ADDR_LO 0x64 /* DWORD 0x19 */ +#define MAIN_PCS_EVENT_LOG_BUFF_SIZE 0x68 /* DWORD 0x1A */ +#define MAIN_PCS_EVENT_LOG_OPTION 0x6C /* DWORD 0x1B */ +#define MAIN_FATAL_ERROR_INTERRUPT 0x70 /* DWORD 0x1C */ +#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */ +#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */ +#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */ +#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */ +#define MAIN_GPIO_LED_FLAGS_OFFSET 0x84 /* DWORD 0x21 */ +#define MAIN_ANALOG_SETUP_OFFSET 0x88 /* DWORD 0x22 */ + +#define MAIN_INT_VECTOR_TABLE_OFFSET 0x8C /* DWORD 0x23 */ +#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */ +#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */ +#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */ + +/* Gereral Status Table offset - byte offset */ +#define GST_GSTLEN_MPIS_OFFSET 0x00 +#define GST_IQ_FREEZE_STATE0_OFFSET 0x04 +#define GST_IQ_FREEZE_STATE1_OFFSET 0x08 +#define GST_MSGUTCNT_OFFSET 0x0C +#define GST_IOPTCNT_OFFSET 0x10 +/* 0x14 - 0x34 - RSVD */ +#define GST_GPIO_INPUT_VAL 0x38 +/* 0x3c - 0x40 - RSVD */ +#define GST_RERRINFO_OFFSET0 0x44 +#define GST_RERRINFO_OFFSET1 0x48 +#define GST_RERRINFO_OFFSET2 0x4c +#define GST_RERRINFO_OFFSET3 0x50 +#define GST_RERRINFO_OFFSET4 0x54 +#define GST_RERRINFO_OFFSET5 0x58 +#define GST_RERRINFO_OFFSET6 0x5c +#define GST_RERRINFO_OFFSET7 0x60 + +/* General Status Table - MPI state */ +#define GST_MPI_STATE_UNINIT 0x00 +#define GST_MPI_STATE_INIT 0x01 +#define GST_MPI_STATE_TERMINATION 0x02 +#define GST_MPI_STATE_ERROR 0x03 +#define GST_MPI_STATE_MASK 0x07 + +/* Per SAS PHY Attributes */ + +#define PSPA_PHYSTATE0_OFFSET 0x00 /* Dword V */ +#define PSPA_OB_HW_EVENT_PID0_OFFSET 0x04 /* DWORD V+1 */ +#define PSPA_PHYSTATE1_OFFSET 0x08 /* Dword V+2 */ +#define PSPA_OB_HW_EVENT_PID1_OFFSET 0x0C /* DWORD V+3 */ +#define PSPA_PHYSTATE2_OFFSET 0x10 /* Dword V+4 */ +#define PSPA_OB_HW_EVENT_PID2_OFFSET 0x14 /* DWORD V+5 */ +#define PSPA_PHYSTATE3_OFFSET 0x18 /* Dword V+6 */ +#define PSPA_OB_HW_EVENT_PID3_OFFSET 0x1C /* DWORD V+7 */ +#define PSPA_PHYSTATE4_OFFSET 0x20 /* Dword V+8 */ +#define PSPA_OB_HW_EVENT_PID4_OFFSET 0x24 /* DWORD V+9 */ +#define PSPA_PHYSTATE5_OFFSET 0x28 /* Dword V+10 */ +#define PSPA_OB_HW_EVENT_PID5_OFFSET 0x2C /* DWORD V+11 */ +#define PSPA_PHYSTATE6_OFFSET 0x30 /* Dword V+12 */ +#define PSPA_OB_HW_EVENT_PID6_OFFSET 0x34 /* DWORD V+13 */ +#define PSPA_PHYSTATE7_OFFSET 0x38 /* Dword V+14 */ +#define PSPA_OB_HW_EVENT_PID7_OFFSET 0x3C /* DWORD V+15 */ +#define PSPA_PHYSTATE8_OFFSET 0x40 /* DWORD V+16 */ +#define PSPA_OB_HW_EVENT_PID8_OFFSET 0x44 /* DWORD V+17 */ +#define PSPA_PHYSTATE9_OFFSET 0x48 /* DWORD V+18 */ +#define PSPA_OB_HW_EVENT_PID9_OFFSET 0x4C /* DWORD V+19 */ +#define PSPA_PHYSTATE10_OFFSET 0x50 /* DWORD V+20 */ +#define PSPA_OB_HW_EVENT_PID10_OFFSET 0x54 /* DWORD V+21 */ +#define PSPA_PHYSTATE11_OFFSET 0x58 /* DWORD V+22 */ +#define PSPA_OB_HW_EVENT_PID11_OFFSET 0x5C /* DWORD V+23 */ +#define PSPA_PHYSTATE12_OFFSET 0x60 /* DWORD V+24 */ +#define PSPA_OB_HW_EVENT_PID12_OFFSET 0x64 /* DWORD V+25 */ +#define PSPA_PHYSTATE13_OFFSET 0x68 /* DWORD V+26 */ +#define PSPA_OB_HW_EVENT_PID13_OFFSET 0x6c /* DWORD V+27 */ +#define PSPA_PHYSTATE14_OFFSET 0x70 /* DWORD V+28 */ +#define PSPA_OB_HW_EVENT_PID14_OFFSET 0x74 /* DWORD V+29 */ +#define PSPA_PHYSTATE15_OFFSET 0x78 /* DWORD V+30 */ +#define PSPA_OB_HW_EVENT_PID15_OFFSET 0x7c /* DWORD V+31 */ +/* end PSPA */ + +/* inbound queue configuration offset - byte offset */ +#define IB_PROPERITY_OFFSET 0x00 +#define IB_BASE_ADDR_HI_OFFSET 0x04 +#define IB_BASE_ADDR_LO_OFFSET 0x08 +#define IB_CI_BASE_ADDR_HI_OFFSET 0x0C +#define IB_CI_BASE_ADDR_LO_OFFSET 0x10 +#define IB_PIPCI_BAR 0x14 +#define IB_PIPCI_BAR_OFFSET 0x18 +#define IB_RESERVED_OFFSET 0x1C + +/* outbound queue configuration offset - byte offset */ +#define OB_PROPERITY_OFFSET 0x00 +#define OB_BASE_ADDR_HI_OFFSET 0x04 +#define OB_BASE_ADDR_LO_OFFSET 0x08 +#define OB_PI_BASE_ADDR_HI_OFFSET 0x0C +#define OB_PI_BASE_ADDR_LO_OFFSET 0x10 +#define OB_CIPCI_BAR 0x14 +#define OB_CIPCI_BAR_OFFSET 0x18 +#define OB_INTERRUPT_COALES_OFFSET 0x1C +#define OB_DYNAMIC_COALES_OFFSET 0x20 +#define OB_PROPERTY_INT_ENABLE 0x40000000 + +#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 +#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 +/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ +#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 +#define PCIE_EVENT_INTERRUPT 0x003044 +#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 +#define PCIE_ERROR_INTERRUPT 0x00304C + +/* SPCV soft reset */ +#define SPC_REG_SOFT_RESET 0x00001000 +#define SPCv_NORMAL_RESET_VALUE 0x1 + +#define SPCv_SOFT_RESET_READ_MASK 0xC0 +#define SPCv_SOFT_RESET_NO_RESET 0x0 +#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED 0x40 +#define SPCv_SOFT_RESET_HDA_MODE_OCCURED 0x80 +#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED 0xC0 + +/* signature definition for host scratch pad0 register */ +#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd +/* Signature for Soft Reset */ + +/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ +#define SPC_REG_RESET 0x000000/* reset register */ + +/* bit definition for SPC_RESET register */ +#define SPC_REG_RESET_OSSP 0x00000001 +#define SPC_REG_RESET_RAAE 0x00000002 +#define SPC_REG_RESET_PCS_SPBC 0x00000004 +#define SPC_REG_RESET_PCS_IOP_SS 0x00000008 +#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 +#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 +#define SPC_REG_RESET_PCS_LM 0x00000040 +#define SPC_REG_RESET_PCS 0x00000080 +#define SPC_REG_RESET_GSM 0x00000100 +#define SPC_REG_RESET_DDR2 0x00010000 +#define SPC_REG_RESET_BDMA_CORE 0x00020000 +#define SPC_REG_RESET_BDMA_SXCBI 0x00040000 +#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 +#define SPC_REG_RESET_PCIE_PWR 0x00100000 +#define SPC_REG_RESET_PCIE_SFT 0x00200000 +#define SPC_REG_RESET_PCS_SXCBI 0x00400000 +#define SPC_REG_RESET_LMS_SXCBI 0x00800000 +#define SPC_REG_RESET_PMIC_SXCBI 0x01000000 +#define SPC_REG_RESET_PMIC_CORE 0x02000000 +#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 +#define SPC_REG_RESET_DEVICE 0x80000000 + +/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ +#define SPCV_IBW_AXI_TRANSLATION_LOW 0x001010 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +/* Dynamic map through Bar4 - 0x00700000 */ +#define GSM_CONFIG_RESET 0x00000000 +#define RAM_ECC_DB_ERR 0x00000018 +#define GSM_READ_ADDR_PARITY_INDIC 0x00000058 +#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 +#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 +#define GSM_READ_ADDR_PARITY_CHECK 0x00000038 +#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 +#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 + +#define RB6_ACCESS_REG 0x6A0000 +#define HDAC_EXEC_CMD 0x0002 +#define HDA_C_PA 0xcb +#define HDA_SEQ_ID_BITS 0x00ff0000 +#define HDA_GSM_OFFSET_BITS 0x00FFFFFF +#define HDA_GSM_CMD_OFFSET_BITS 0x42C0 +#define HDA_GSM_RSP_OFFSET_BITS 0x42E0 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +#define SPC_TOP_LEVEL_ADDR_BASE 0x000000 +#define GSM_CONFIG_RESET_VALUE 0x00003b00 +#define GPIO_ADDR_BASE 0x00090000 +#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c + +/* RB6 offset */ +#define SPC_RB6_OFFSET 0x80C0 +/* Magic number of soft reset for RB6 */ +#define RB6_MAGIC_NUMBER_RST 0x1234 + +/* Device Register status */ +#define DEVREG_SUCCESS 0x00 +#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 +#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 +#define DEVREG_FAILURE_INVALID_PHY_ID 0x03 +#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 +#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 +#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 +#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + +#endif diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 317a7fdc3b82..23d607218ae8 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -24,7 +24,9 @@ config SCSI_QLA_FC Firmware images can be retrieved from: - ftp://ftp.qlogic.com/outgoing/linux/firmware/ + http://ldriver.qlogic.com/firmware/ + + They are also included in the linux-firmware tree as well. config TCM_QLA2XXX tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 729b74389f83..937fed8cb038 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { - lcmd_pkt->cntrl_flags = - __constant_cpu_to_le16(TMF_WRITE_DATA); + lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; vha->qla_stats.output_bytes += scsi_bufflen(cmd); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { - lcmd_pkt->cntrl_flags = - __constant_cpu_to_le16(TMF_READ_DATA); + lcmd_pkt->cntrl_flags = TMF_READ_DATA; vha->qla_stats.input_bytes += scsi_bufflen(cmd); } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5307bf86d5e0..ad72c1d85111 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr) qla2x00_rel_sp(sp->fcport->vha, sp); } -void +static void qla2x00_sp_compl(void *data, void *ptr, int res) { struct qla_hw_data *ha = (struct qla_hw_data *)data; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index d182c96e17ea..7a3870f385f6 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1370,7 +1370,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) dump_stack(); return; } - target_wait_for_sess_cmds(se_sess, 0); + target_wait_for_sess_cmds(se_sess); transport_deregister_session_configfs(sess->se_sess); transport_deregister_session(sess->se_sess); diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index 14fec976f634..fad71ed067ec 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb, mrb->mbox_cmd = in_mbox[0]; wmb(); + ha->iocb_cnt += mrb->iocb_cnt; ha->isp_ops->queue_iocb(ha); exit_mbox_iocb: spin_unlock_irqrestore(&ha->hardware_lock, flags); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index a47f99957ba8..4d231c12463e 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); - fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf); - fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf); + fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); + fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); fw_ddb_entry->ipv4_tos = conn->ipv4_tos; fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); - fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn); - fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn); + fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); + fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type); fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); @@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) * If this is invoked as a result of a userspace call then the entry is marked * as nonpersistent using flash_state field. **/ -int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, - struct dev_db_entry *fw_ddb_entry, - uint16_t *idx, int user) +static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t *idx, int user) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct iscsi_bus_flash_conn *fnode_conn = NULL; @@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, ql4_printk(KERN_ERR, ha, "%s: A non-persistent entry %s found\n", __func__, dev->kobj.name); + put_device(dev); goto exit_ddb_add; } @@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, int parent_type, parent_index = 0xffff; int rc = 0; - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) return -EIO; @@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, rc = sprintf(buf, "\n"); break; case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: - if ((fnode_sess->discovery_parent_idx) >= 0 && - (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)) + if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES) parent_index = fnode_sess->discovery_parent_idx; rc = sprintf(buf, "%u\n", parent_index); @@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, parent_type = ISCSI_DISC_PARENT_ISNS; else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) parent_type = ISCSI_DISC_PARENT_UNKNOWN; - else if (fnode_sess->discovery_parent_type >= 0 && - fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) + else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) parent_type = ISCSI_DISC_PARENT_SENDTGT; else parent_type = ISCSI_DISC_PARENT_UNKNOWN; @@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, rc = -ENOSYS; break; } + + put_device(dev); return rc; } @@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, { struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); - struct dev_db_entry *fw_ddb_entry = NULL; struct iscsi_flashnode_param_info *fnode_param; struct nlattr *attr; int rc = QLA_ERROR; uint32_t rem = len; - fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL); - if (!fw_ddb_entry) { - DEBUG2(ql4_printk(KERN_ERR, ha, - "%s: Unable to allocate ddb buffer\n", - __func__)); - return -ENOMEM; - } - nla_for_each_attr(attr, data, len, rem) { fnode_param = nla_data(attr); @@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint16_t *ddb_cookie = NULL; - size_t ddb_size; + size_t ddb_size = 0; void *pddb = NULL; int target_id; int rc = 0; - if (!fnode_sess) { - rc = -EINVAL; - goto exit_ddb_del; - } - if (fnode_sess->is_boot_target) { rc = -EPERM; DEBUG2(ql4_printk(KERN_ERR, ha, @@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) dev_db_start_offset += (fnode_sess->target_id * sizeof(*fw_ddb_entry)); - dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) - - (void *)fw_ddb_entry; + dev_db_start_offset += offsetof(struct dev_db_entry, cookie); ddb_size = sizeof(*ddb_cookie); } diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 83e0fec35d56..fe873cf7570d 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.03.00-k8" +#define QLA4XXX_DRIVER_VERSION "5.03.00-k9" diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 5add6f4e7928..0a537a0515ca 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1997,24 +1997,39 @@ out: return ret; } -static unsigned int map_state(sector_t lba, unsigned int *num) +static unsigned long lba_to_map_index(sector_t lba) +{ + if (scsi_debug_unmap_alignment) { + lba += scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; + } + do_div(lba, scsi_debug_unmap_granularity); + + return lba; +} + +static sector_t map_index_to_lba(unsigned long index) { - unsigned int granularity, alignment, mapped; - sector_t block, next, end; + return index * scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; +} - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - block = lba + alignment; - do_div(block, granularity); +static unsigned int map_state(sector_t lba, unsigned int *num) +{ + sector_t end; + unsigned int mapped; + unsigned long index; + unsigned long next; - mapped = test_bit(block, map_storep); + index = lba_to_map_index(lba); + mapped = test_bit(index, map_storep); if (mapped) - next = find_next_zero_bit(map_storep, map_size, block); + next = find_next_zero_bit(map_storep, map_size, index); else - next = find_next_bit(map_storep, map_size, block); + next = find_next_bit(map_storep, map_size, index); - end = next * granularity - scsi_debug_unmap_alignment; + end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); *num = end - lba; return mapped; @@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num) static void map_region(sector_t lba, unsigned int len) { - unsigned int granularity, alignment; sector_t end = lba + len; - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - while (lba < end) { - sector_t block, rem; - - block = lba + alignment; - rem = do_div(block, granularity); + unsigned long index = lba_to_map_index(lba); - if (block < map_size) - set_bit(block, map_storep); + if (index < map_size) + set_bit(index, map_storep); - lba += granularity - rem; + lba = map_index_to_lba(index + 1); } } static void unmap_region(sector_t lba, unsigned int len) { - unsigned int granularity, alignment; sector_t end = lba + len; - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - while (lba < end) { - sector_t block, rem; - - block = lba + alignment; - rem = do_div(block, granularity); + unsigned long index = lba_to_map_index(lba); - if (rem == 0 && lba + granularity < end && block < map_size) { - clear_bit(block, map_storep); - if (scsi_debug_lbprz) + if (lba == map_index_to_lba(index) && + lba + scsi_debug_unmap_granularity <= end && + index < map_size) { + clear_bit(index, map_storep); + if (scsi_debug_lbprz) { memset(fake_storep + - block * scsi_debug_sector_size, 0, - scsi_debug_sector_size); + lba * scsi_debug_sector_size, 0, + scsi_debug_sector_size * + scsi_debug_unmap_granularity); + } } - lba += granularity - rem; + lba = map_index_to_lba(index + 1); } } @@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, write_lock_irqsave(&atomic_rw, iflags); ret = do_device_access(SCpnt, devip, lba, num, 1); - if (scsi_debug_unmap_granularity) + if (scsi_debug_lbp()) map_region(lba, num); write_unlock_irqrestore(&atomic_rw, iflags); if (-1 == ret) @@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, write_lock_irqsave(&atomic_rw, iflags); - if (unmap && scsi_debug_unmap_granularity) { + if (unmap && scsi_debug_lbp()) { unmap_region(lba, num); goto out; } @@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, fake_storep + (lba * scsi_debug_sector_size), scsi_debug_sector_size); - if (scsi_debug_unmap_granularity) + if (scsi_debug_lbp()) map_region(lba, num); out: write_unlock_irqrestore(&atomic_rw, iflags); @@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void) /* Logical Block Provisioning */ if (scsi_debug_lbp()) { - unsigned int map_bytes; - scsi_debug_unmap_max_blocks = clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); @@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void) clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); if (scsi_debug_unmap_alignment && - scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { + scsi_debug_unmap_granularity <= + scsi_debug_unmap_alignment) { printk(KERN_ERR - "%s: ERR: unmap_granularity < unmap_alignment\n", + "%s: ERR: unmap_granularity <= unmap_alignment\n", __func__); return -EINVAL; } - map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity); - map_bytes = map_size >> 3; - map_storep = vmalloc(map_bytes); + map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; + map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long)); printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", map_size); @@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void) goto free_vm; } - memset(map_storep, 0x0, map_bytes); + bitmap_zero(map_storep, map_size); /* Map first 1KB for partition table */ if (scsi_debug_num_parts) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index c1b05a83d403..f43de1e56420 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/delay.h> +#include <linux/jiffies.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; DECLARE_COMPLETION_ONSTACK(done); - unsigned long timeleft; + unsigned long timeleft = timeout; struct scsi_eh_save ses; + const unsigned long stall_for = msecs_to_jiffies(100); int rtn; +retry: scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); shost->eh_action = &done; scsi_log_send(scmd); scmd->scsi_done = scsi_eh_done; - shost->hostt->queuecommand(shost, scmd); - - timeleft = wait_for_completion_timeout(&done, timeout); + rtn = shost->hostt->queuecommand(shost, scmd); + if (rtn) { + if (timeleft > stall_for) { + scsi_eh_restore_cmnd(scmd, &ses); + timeleft -= stall_for; + msleep(jiffies_to_msecs(stall_for)); + goto retry; + } + /* signal not to enter either branch of the if () below */ + timeleft = 0; + rtn = NEEDS_RETRY; + } else { + timeleft = wait_for_completion_timeout(&done, timeout); + } shost->eh_action = NULL; - scsi_log_completion(scmd, SUCCESS); + scsi_log_completion(scmd, rtn); SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd: %p, timeleft: %ld\n", __func__, scmd, timeleft)); /* - * If there is time left scsi_eh_done got called, and we will - * examine the actual status codes to see whether the command - * actually did complete normally, else tell the host to forget - * about this command. + * If there is time left scsi_eh_done got called, and we will examine + * the actual status codes to see whether the command actually did + * complete normally, else if we have a zero return and no time left, + * the command must still be pending, so abort it and return FAILED. + * If we never actually managed to issue the command, because + * ->queuecommand() kept returning non zero, use the rtn = FAILED + * value above (so don't execute either branch of the if) */ if (timeleft) { rtn = scsi_eh_completed_normally(scmd); @@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, rtn = FAILED; break; } - } else { + } else if (!rtn) { scsi_abort_eh_cmnd(scmd); rtn = FAILED; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c31187d79343..86d522004a20 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -276,11 +276,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, } EXPORT_SYMBOL(scsi_execute); - -int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, +int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, int retries, - int *resid) + int *resid, int flags) { char *sense = NULL; int result; @@ -291,14 +290,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, return DRIVER_ERROR << 24; } result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, 0, resid); + sense, timeout, retries, flags, resid); if (sshdr) scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); kfree(sense); return result; } -EXPORT_SYMBOL(scsi_execute_req); +EXPORT_SYMBOL(scsi_execute_req_flags); /* * Function: scsi_init_cmd_errh() diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 8f6b12cbd224..42539ee2cb11 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -144,33 +144,83 @@ static int scsi_bus_restore(struct device *dev) #ifdef CONFIG_PM_RUNTIME +static int sdev_blk_runtime_suspend(struct scsi_device *sdev, + int (*cb)(struct device *)) +{ + int err; + + err = blk_pre_runtime_suspend(sdev->request_queue); + if (err) + return err; + if (cb) + err = cb(&sdev->sdev_gendev); + blk_post_runtime_suspend(sdev->request_queue, err); + + return err; +} + +static int sdev_runtime_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL; + struct scsi_device *sdev = to_scsi_device(dev); + int err; + + if (sdev->request_queue->dev) + return sdev_blk_runtime_suspend(sdev, cb); + + err = scsi_dev_type_suspend(dev, cb); + if (err == -EAGAIN) + pm_schedule_suspend(dev, jiffies_to_msecs( + round_jiffies_up_relative(HZ/10))); + return err; +} + static int scsi_runtime_suspend(struct device *dev) { int err = 0; - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; dev_dbg(dev, "scsi_runtime_suspend\n"); - if (scsi_is_sdev_device(dev)) { - err = scsi_dev_type_suspend(dev, - pm ? pm->runtime_suspend : NULL); - if (err == -EAGAIN) - pm_schedule_suspend(dev, jiffies_to_msecs( - round_jiffies_up_relative(HZ/10))); - } + if (scsi_is_sdev_device(dev)) + err = sdev_runtime_suspend(dev); /* Insert hooks here for targets, hosts, and transport classes */ return err; } -static int scsi_runtime_resume(struct device *dev) +static int sdev_blk_runtime_resume(struct scsi_device *sdev, + int (*cb)(struct device *)) { int err = 0; + + blk_pre_runtime_resume(sdev->request_queue); + if (cb) + err = cb(&sdev->sdev_gendev); + blk_post_runtime_resume(sdev->request_queue, err); + + return err; +} + +static int sdev_runtime_resume(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL; + + if (sdev->request_queue->dev) + return sdev_blk_runtime_resume(sdev, cb); + else + return scsi_dev_type_resume(dev, cb); +} + +static int scsi_runtime_resume(struct device *dev) +{ + int err = 0; dev_dbg(dev, "scsi_runtime_resume\n"); if (scsi_is_sdev_device(dev)) - err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL); + err = sdev_runtime_resume(dev); /* Insert hooks here for targets, hosts, and transport classes */ @@ -185,10 +235,18 @@ static int scsi_runtime_idle(struct device *dev) /* Insert hooks here for targets, hosts, and transport classes */ - if (scsi_is_sdev_device(dev)) - err = pm_schedule_suspend(dev, 100); - else + if (scsi_is_sdev_device(dev)) { + struct scsi_device *sdev = to_scsi_device(dev); + + if (sdev->request_queue->dev) { + pm_runtime_mark_last_busy(dev); + err = pm_runtime_autosuspend(dev); + } else { + err = pm_runtime_suspend(dev); + } + } else { err = pm_runtime_suspend(dev); + } return err; } diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index db66357211ed..86f0c5d5c116 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -84,6 +84,7 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file) static const struct file_operations proc_scsi_fops = { .open = proc_scsi_host_open, + .release = single_release, .read = seq_read, .llseek = seq_lseek, .write = proc_scsi_host_write diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 47799a33d6ca..133926b1bb78 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1019,8 +1019,7 @@ exit_match_index: /** * iscsi_get_flashnode_by_index -finds flashnode session entry by index * @shost: pointer to host data - * @data: pointer to data containing value to use for comparison - * @fn: function pointer that does actual comparison + * @idx: index to match * * Finds the flashnode session object for the passed index * @@ -1029,13 +1028,13 @@ exit_match_index: * %NULL on failure */ static struct iscsi_bus_flash_session * -iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data, - int (*fn)(struct device *dev, void *data)) +iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct device *dev; - dev = device_find_child(&shost->shost_gendev, data, fn); + dev = device_find_child(&shost->shost_gendev, &idx, + flashnode_match_index); if (dev) fnode_sess = iscsi_dev_to_flash_session(dev); @@ -1059,18 +1058,13 @@ struct device * iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, int (*fn)(struct device *dev, void *data)) { - struct device *dev; - - dev = device_find_child(&shost->shost_gendev, data, fn); - return dev; + return device_find_child(&shost->shost_gendev, data, fn); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); /** * iscsi_find_flashnode_conn - finds flashnode connection entry * @fnode_sess: pointer to parent flashnode session entry - * @data: pointer to data containing value to use for comparison - * @fn: function pointer that does actual comparison * * Finds the flashnode connection object comparing the data passed using logic * defined in passed function pointer @@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); * %NULL on failure */ struct device * -iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess, - void *data, - int (*fn)(struct device *dev, void *data)) +iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess) { - struct device *dev; - - dev = device_find_child(&fnode_sess->dev, data, fn); - return dev; + return device_find_child(&fnode_sess->dev, NULL, + iscsi_is_flashnode_conn_dev); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); @@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->set_flashnode_param) { @@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.set_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.set_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.set_flashnode.host_no); + __func__, idx, ev->u.set_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport, { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->del_flashnode) { @@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.del_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.del_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.del_flashnode.host_no); + __func__, idx, ev->u.del_flashnode.host_no); err = -ENODEV; goto put_host; } err = transport->del_flashnode(fnode_sess); + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->login_flashnode) { @@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.login_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.login_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.login_flashnode.host_no); + __func__, idx, ev->u.login_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->login_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->logout_flashnode) { @@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.logout_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.logout_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.logout_flashnode.host_no); + __func__, idx, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->logout_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void) } iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh"); - if (!iscsi_eh_timer_workq) + if (!iscsi_eh_timer_workq) { + err = -ENOMEM; goto release_nls; + } return 0; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e6689776b4f6..c1c555242d0d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, char *buffer_data; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; + const char *temp = "temporary "; int len; if (sdp->type != TYPE_DISK) @@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, * it's not worth the risk */ return -EINVAL; + if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { + buf += sizeof(temp) - 1; + sdkp->cache_override = 1; + } else { + sdkp->cache_override = 0; + } + for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) { len = strlen(sd_cache_types[i]); if (strncmp(sd_cache_types[i], buf, len) == 0 && @@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, return -EINVAL; rcd = ct & 0x01 ? 1 : 0; wce = ct & 0x02 ? 1 : 0; + + if (sdkp->cache_override) { + sdkp->WCE = wce; + sdkp->RCD = rcd; + return count; + } + if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, SD_MAX_RETRIES, &data, NULL)) return -EINVAL; @@ -1121,10 +1136,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode) sdev = sdkp->device; - retval = scsi_autopm_get_device(sdev); - if (retval) - goto error_autopm; - /* * If the device is in error recovery, wait until it is done. * If the device is offline, then disallow any access to it. @@ -1169,8 +1180,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode) return 0; error_out: - scsi_autopm_put_device(sdev); -error_autopm: scsi_disk_put(sdkp); return retval; } @@ -1205,7 +1214,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode) * XXX is followed by a "rmmod sd_mod"? */ - scsi_autopm_put_device(sdev); scsi_disk_put(sdkp); } @@ -1366,14 +1374,9 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) retval = -ENODEV; if (scsi_block_when_processing_errors(sdp)) { - retval = scsi_autopm_get_device(sdp); - if (retval) - goto out; - sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, sshdr); - scsi_autopm_put_device(sdp); } /* failed to execute TUR, assume media not present */ @@ -1423,8 +1426,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp) * Leave the rest of the command zero to indicate * flush everything. */ - res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL); + res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, + &sshdr, SD_FLUSH_TIMEOUT, + SD_MAX_RETRIES, NULL, REQ_PM); if (res == 0) break; } @@ -2318,6 +2322,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) int old_rcd = sdkp->RCD; int old_dpofua = sdkp->DPOFUA; + + if (sdkp->cache_override) + return; + first_len = 4; if (sdp->skip_ms_page_8) { if (sdp->type == TYPE_RBC) @@ -2811,6 +2819,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sdkp->capacity = 0; sdkp->media_present = 1; sdkp->write_prot = 0; + sdkp->cache_override = 0; sdkp->WCE = 0; sdkp->RCD = 0; sdkp->ATO = 0; @@ -2837,6 +2846,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", sdp->removable ? "removable " : ""); + blk_pm_runtime_init(sdp->request_queue, dev); scsi_autopm_put_device(sdp); put_device(&sdkp->dev); } @@ -3020,8 +3030,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) if (!scsi_device_online(sdp)) return -ENODEV; - res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_TIMEOUT, SD_MAX_RETRIES, NULL); + res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, + SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM); if (res) { sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); sd_print_result(sdkp, res); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 74a1e4ca5401..2386aeb41fe8 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -73,6 +73,7 @@ struct scsi_disk { u8 protection_type;/* Data Integrity Field */ u8 provisioning_mode; unsigned ATO : 1; /* state of disk ATO bit */ + unsigned cache_override : 1; /* temp override of WCE,RCD */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 04998f36e507..6174ca4ea275 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn) if (sdt->app_tag == 0xffff) return 0; - /* Bad ref tag received from disk */ - if (sdt->ref_tag == 0xffffffff) { - printk(KERN_ERR - "%s: bad phys ref tag on sector %lu\n", - bix->disk_name, (unsigned long)sector); - return -EIO; - } - if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { printk(KERN_ERR "%s: ref tag error on sector %lu (rcvd %u)\n", diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 0371047c5922..35faf24c6044 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI If you have a controller with this interface, say Y or M here. If unsure, say N. + +config SCSI_UFSHCD_PLATFORM + tristate "Platform bus based UFS Controller support" + depends on SCSI_UFSHCD + ---help--- + This selects the UFS host controller support. Select this if + you have an UFS controller on Platform bus. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 9eda0dfbd6df..1e5bd48457d6 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -1,3 +1,4 @@ # UFSHCD makefile obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o +obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c new file mode 100644 index 000000000000..03319acd9c72 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -0,0 +1,217 @@ +/* + * Universal Flash Storage Host controller Platform bus based glue driver + * + * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c + * Copyright (C) 2011-2013 Samsung India Software Operations + * + * Authors: + * Santosh Yaraganavi <santosh.sy@samsung.com> + * Vinayak Holikatti <h.vinayak@samsung.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * See the COPYING file in the top-level directory or visit + * <http://www.gnu.org/licenses/gpl-2.0.html> + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This program is provided "AS IS" and "WITH ALL FAULTS" and + * without warranty of any kind. You are solely responsible for + * determining the appropriateness of using and distributing + * the program and assume all risks associated with your exercise + * of rights with respect to the program, including but not limited + * to infringement of third party rights, the risks and costs of + * program errors, damage to or loss of data, programs or equipment, + * and unavailability or interruption of operations. Under no + * circumstances will the contributor of this Program be liable for + * any damages of any kind arising from your use or distribution of + * this program. + */ + +#include "ufshcd.h" +#include <linux/platform_device.h> + +#ifdef CONFIG_PM +/** + * ufshcd_pltfrm_suspend - suspend power management function + * @dev: pointer to device handle + * + * + * Returns 0 + */ +static int ufshcd_pltfrm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ufs_hba *hba = platform_get_drvdata(pdev); + + /* + * TODO: + * 1. Call ufshcd_suspend + * 2. Do bus specific power management + */ + + disable_irq(hba->irq); + + return 0; +} + +/** + * ufshcd_pltfrm_resume - resume power management function + * @dev: pointer to device handle + * + * Returns 0 + */ +static int ufshcd_pltfrm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ufs_hba *hba = platform_get_drvdata(pdev); + + /* + * TODO: + * 1. Call ufshcd_resume. + * 2. Do bus specific wake up + */ + + enable_irq(hba->irq); + + return 0; +} +#else +#define ufshcd_pltfrm_suspend NULL +#define ufshcd_pltfrm_resume NULL +#endif + +/** + * ufshcd_pltfrm_probe - probe routine of the driver + * @pdev: pointer to Platform device handle + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_pltfrm_probe(struct platform_device *pdev) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + struct resource *mem_res; + struct resource *irq_res; + resource_size_t mem_size; + int err; + struct device *dev = &pdev->dev; + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_res) { + dev_err(&pdev->dev, + "Memory resource not available\n"); + err = -ENODEV; + goto out_error; + } + + mem_size = resource_size(mem_res); + if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) { + dev_err(&pdev->dev, + "Cannot reserve the memory resource\n"); + err = -EBUSY; + goto out_error; + } + + mmio_base = ioremap_nocache(mem_res->start, mem_size); + if (!mmio_base) { + dev_err(&pdev->dev, "memory map failed\n"); + err = -ENOMEM; + goto out_release_regions; + } + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq_res) { + dev_err(&pdev->dev, "IRQ resource not available\n"); + err = -ENODEV; + goto out_iounmap; + } + + err = dma_set_coherent_mask(dev, dev->coherent_dma_mask); + if (err) { + dev_err(&pdev->dev, "set dma mask failed\n"); + goto out_iounmap; + } + + err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start); + if (err) { + dev_err(&pdev->dev, "Intialization failed\n"); + goto out_iounmap; + } + + platform_set_drvdata(pdev, hba); + + return 0; + +out_iounmap: + iounmap(mmio_base); +out_release_regions: + release_mem_region(mem_res->start, mem_size); +out_error: + return err; +} + +/** + * ufshcd_pltfrm_remove - remove platform driver routine + * @pdev: pointer to platform device handle + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_pltfrm_remove(struct platform_device *pdev) +{ + struct resource *mem_res; + resource_size_t mem_size; + struct ufs_hba *hba = platform_get_drvdata(pdev); + + disable_irq(hba->irq); + + /* Some buggy controllers raise interrupt after + * the resources are removed. So first we unregister the + * irq handler and then the resources used by driver + */ + + free_irq(hba->irq, hba); + ufshcd_remove(hba); + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_res) + dev_err(&pdev->dev, "ufshcd: Memory resource not available\n"); + else { + mem_size = resource_size(mem_res); + release_mem_region(mem_res->start, mem_size); + } + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id ufs_of_match[] = { + { .compatible = "jedec,ufs-1.1"}, +}; + +static const struct dev_pm_ops ufshcd_dev_pm_ops = { + .suspend = ufshcd_pltfrm_suspend, + .resume = ufshcd_pltfrm_resume, +}; + +static struct platform_driver ufshcd_pltfrm_driver = { + .probe = ufshcd_pltfrm_probe, + .remove = ufshcd_pltfrm_remove, + .driver = { + .name = "ufshcd", + .owner = THIS_MODULE, + .pm = &ufshcd_dev_pm_ops, + .of_match_table = ufs_of_match, + }, +}; + +module_platform_driver(ufshcd_pltfrm_driver); + +MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); +MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); +MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(UFSHCD_DRIVER_VERSION); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 60fd40c4e4c2..c32a478df81b 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp) ucd_cmd_ptr->header.dword_2 = 0; ucd_cmd_ptr->exp_data_transfer_len = - cpu_to_be32(lrbp->cmd->transfersize); + cpu_to_be32(lrbp->cmd->sdb.length); memcpy(ucd_cmd_ptr->cdb, lrbp->cmd->cmnd, diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 787bd2c22bca..380387a47b1d 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -526,13 +526,17 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master, } if (xfer->tx_buf) - spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); + if (xfer->bits_per_word > 8) + spi_writel(as, TDR, *(u16 *)(xfer->tx_buf)); + else + spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); else spi_writel(as, TDR, 0); dev_dbg(master->dev.parent, - " start pio xfer %p: len %u tx %p rx %p\n", - xfer, xfer->len, xfer->tx_buf, xfer->rx_buf); + " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", + xfer, xfer->len, xfer->tx_buf, xfer->rx_buf, + xfer->bits_per_word); /* Enable relevant interrupts */ spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); @@ -950,21 +954,39 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) { u8 *txp; u8 *rxp; + u16 *txp16; + u16 *rxp16; unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; if (xfer->rx_buf) { - rxp = ((u8 *)xfer->rx_buf) + xfer_pos; - *rxp = spi_readl(as, RDR); + if (xfer->bits_per_word > 8) { + rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); + *rxp16 = spi_readl(as, RDR); + } else { + rxp = ((u8 *)xfer->rx_buf) + xfer_pos; + *rxp = spi_readl(as, RDR); + } } else { spi_readl(as, RDR); } - - as->current_remaining_bytes--; + if (xfer->bits_per_word > 8) { + as->current_remaining_bytes -= 2; + if (as->current_remaining_bytes < 0) + as->current_remaining_bytes = 0; + } else { + as->current_remaining_bytes--; + } if (as->current_remaining_bytes) { if (xfer->tx_buf) { - txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; - spi_writel(as, TDR, *txp); + if (xfer->bits_per_word > 8) { + txp16 = (u16 *)(((u8 *)xfer->tx_buf) + + xfer_pos + 2); + spi_writel(as, TDR, *txp16); + } else { + txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; + spi_writel(as, TDR, *txp); + } } else { spi_writel(as, TDR, 0); } @@ -1378,9 +1400,16 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) } } + if (xfer->bits_per_word > 8) { + if (xfer->len % 2) { + dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n"); + return -EINVAL; + } + } + /* FIXME implement these protocol options!! */ - if (xfer->speed_hz) { - dev_dbg(&spi->dev, "no protocol options yet\n"); + if (xfer->speed_hz < spi->max_speed_hz) { + dev_dbg(&spi->dev, "can't change speed in transfer\n"); return -ENOPROTOOPT; } diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 2e8f24a1fb95..50b13c9b1ab6 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = { }, { }, }; -MODULE_DEVICE_TABLE(of, davini_spi_of_match); +MODULE_DEVICE_TABLE(of, davinci_spi_of_match); /** * spi_davinci_get_pdata - Get platform data from DTS binding diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c index 60cfae51c713..eab593eaaafa 100644 --- a/drivers/spi/spi-sh-hspi.c +++ b/drivers/spi/spi-sh-hspi.c @@ -89,7 +89,7 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val) if ((mask & hspi_read(hspi, SPSR)) == val) return 0; - msleep(20); + udelay(10); } dev_err(hspi->dev, "timeout\n"); diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index d65c000efe35..09df8e22dba0 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c @@ -489,11 +489,6 @@ static int tegra_sflash_probe(struct platform_device *pdev) tegra_sflash_parse_dt(tsd); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "No IO memory resource\n"); - ret = -ENODEV; - goto exit_free_master; - } tsd->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(tsd->base)) { ret = PTR_ERR(tsd->base); diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 35f60bd252dd..637d728fbeb5 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -1487,7 +1487,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) return 0; err_spi_register_master: - free_irq(board_dat->pdev->irq, board_dat); + free_irq(board_dat->pdev->irq, data); err_request_irq: pch_spi_free_resources(board_dat, data); err_spi_get_resources: @@ -1667,6 +1667,7 @@ static int pch_spi_probe(struct pci_dev *pdev, pd_dev = platform_device_alloc("pch-spi", i); if (!pd_dev) { dev_err(&pdev->dev, "platform_device_alloc failed\n"); + retval = -ENOMEM; goto err_platform_device; } pd_dev_save->pd_save[i] = pd_dev; diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index e1d769607425..34d18dcfa0db 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c @@ -267,7 +267,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) { struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); u32 ipif_ier; - u16 cr; /* We get here with transmitter inhibited */ @@ -276,7 +275,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) xspi->remaining_bytes = t->len; INIT_COMPLETION(xspi->done); - xilinx_spi_fill_tx_fifo(xspi); /* Enable the transmit empty interrupt, which we use to determine * progress on the transmission. @@ -285,12 +283,41 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, xspi->regs + XIPIF_V123B_IIER_OFFSET); - /* Start the transfer by not inhibiting the transmitter any longer */ - cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & - ~XSPI_CR_TRANS_INHIBIT; - xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); + for (;;) { + u16 cr; + u8 sr; + + xilinx_spi_fill_tx_fifo(xspi); + + /* Start the transfer by not inhibiting the transmitter any + * longer + */ + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & + ~XSPI_CR_TRANS_INHIBIT; + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); + + wait_for_completion(&xspi->done); + + /* A transmit has just completed. Process received data and + * check for more data to transmit. Always inhibit the + * transmitter while the Isr refills the transmit register/FIFO, + * or make sure it is stopped if we're done. + */ + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); + xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, + xspi->regs + XSPI_CR_OFFSET); + + /* Read out all the data from the Rx FIFO */ + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); + while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { + xspi->rx_fn(xspi); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); + } - wait_for_completion(&xspi->done); + /* See if there is more data to send */ + if (!xspi->remaining_bytes > 0) + break; + } /* Disable the transmit empty interrupt */ xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); @@ -314,38 +341,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET); if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */ - u16 cr; - u8 sr; - - /* A transmit has just completed. Process received data and - * check for more data to transmit. Always inhibit the - * transmitter while the Isr refills the transmit register/FIFO, - * or make sure it is stopped if we're done. - */ - cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); - xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, - xspi->regs + XSPI_CR_OFFSET); - - /* Read out all the data from the Rx FIFO */ - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { - xspi->rx_fn(xspi); - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - } - - /* See if there is more data to send */ - if (xspi->remaining_bytes > 0) { - xilinx_spi_fill_tx_fifo(xspi); - /* Start the transfer by not inhibiting the - * transmitter any longer - */ - xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); - } else { - /* No more data to send. - * Indicate the transfer is completed. - */ - complete(&xspi->done); - } + complete(&xspi->done); } return IRQ_HANDLED; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 163fd802b7ac..32b7bb111eb6 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master) spi->dev.parent = &master->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; - spi->cs_gpio = -EINVAL; + spi->cs_gpio = -ENOENT; device_initialize(&spi->dev); return spi; } @@ -1067,8 +1067,11 @@ static int of_spi_register_master(struct spi_master *master) nb = of_gpio_named_count(np, "cs-gpios"); master->num_chipselect = max(nb, (int)master->num_chipselect); - if (nb < 1) + /* Return error only for an incorrectly formed cs-gpios property */ + if (nb == 0 || nb == -ENOENT) return 0; + else if (nb < 0) + return nb; cs = devm_kzalloc(&master->dev, sizeof(int) * master->num_chipselect, @@ -1079,7 +1082,7 @@ static int of_spi_register_master(struct spi_master *master) return -ENOMEM; for (i = 0; i < master->num_chipselect; i++) - cs[i] = -EINVAL; + cs[i] = -ENOENT; for (i = 0; i < nb; i++) cs[i] = of_get_named_gpio(np, "cs-gpios", i); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 4e8a1794f50a..aefe820a8005 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -72,10 +72,10 @@ source "drivers/staging/sep/Kconfig" source "drivers/staging/iio/Kconfig" -source "drivers/staging/zram/Kconfig" - source "drivers/staging/zsmalloc/Kconfig" +source "drivers/staging/zram/Kconfig" + source "drivers/staging/wlags49_h2/Kconfig" source "drivers/staging/wlags49_h25/Kconfig" diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c index ceb1c643753d..6dc27dac679d 100644 --- a/drivers/staging/android/alarm-dev.c +++ b/drivers/staging/android/alarm-dev.c @@ -264,6 +264,8 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } rv = alarm_do_ioctl(file, cmd, &ts); + if (rv) + return rv; switch (ANDROID_ALARM_BASE_CMD(cmd)) { case ANDROID_ALARM_GET_TIME(0): @@ -272,7 +274,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } - return rv; + return 0; } #ifdef CONFIG_COMPAT static long alarm_compat_ioctl(struct file *file, unsigned int cmd, @@ -295,6 +297,8 @@ static long alarm_compat_ioctl(struct file *file, unsigned int cmd, } rv = alarm_do_ioctl(file, cmd, &ts); + if (rv) + return rv; switch (ANDROID_ALARM_BASE_CMD(cmd)) { case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */ @@ -303,7 +307,7 @@ static long alarm_compat_ioctl(struct file *file, unsigned int cmd, break; } - return rv; + return 0; } #endif diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c index b040200a5a55..9bd874789ce5 100644 --- a/drivers/staging/android/logger.c +++ b/drivers/staging/android/logger.c @@ -242,7 +242,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log, * 'log->buffer' which contains the first entry readable by 'euid' */ static size_t get_next_entry_by_uid(struct logger_log *log, - size_t off, uid_t euid) + size_t off, kuid_t euid) { while (off != log->w_off) { struct logger_entry *entry; @@ -251,7 +251,7 @@ static size_t get_next_entry_by_uid(struct logger_log *log, entry = get_entry_header(log, off, &scratch); - if (entry->euid == euid) + if (uid_eq(entry->euid, euid)) return off; next_len = sizeof(struct logger_entry) + entry->len; diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h index cc6bbd99c8e0..70af7d805dff 100644 --- a/drivers/staging/android/logger.h +++ b/drivers/staging/android/logger.h @@ -66,7 +66,7 @@ struct logger_entry { __s32 tid; __s32 sec; __s32 nsec; - uid_t euid; + kuid_t euid; char msg[0]; }; diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig index 7871579bb83d..87e852a0ef49 100644 --- a/drivers/staging/comedi/Kconfig +++ b/drivers/staging/comedi/Kconfig @@ -981,6 +981,7 @@ config COMEDI_ME_DAQ config COMEDI_NI_6527 tristate "NI 6527 support" + depends on HAS_DMA select COMEDI_MITE ---help--- Enable support for the National Instruments 6527 PCI card @@ -990,6 +991,7 @@ config COMEDI_NI_6527 config COMEDI_NI_65XX tristate "NI 65xx static dio PCI card support" + depends on HAS_DMA select COMEDI_MITE ---help--- Enable support for National Instruments 65xx static dio boards. @@ -1003,6 +1005,7 @@ config COMEDI_NI_65XX config COMEDI_NI_660X tristate "NI 660x counter/timer PCI card support" + depends on HAS_DMA select COMEDI_NI_TIOCMD ---help--- Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602, @@ -1013,6 +1016,7 @@ config COMEDI_NI_660X config COMEDI_NI_670X tristate "NI 670x PCI card support" + depends on HAS_DMA select COMEDI_MITE ---help--- Enable support for National Instruments PCI-6703 and PCI-6704 @@ -1022,6 +1026,7 @@ config COMEDI_NI_670X config COMEDI_NI_LABPC_PCI tristate "NI Lab-PC PCI-1200 support" + depends on HAS_DMA select COMEDI_NI_LABPC select COMEDI_MITE ---help--- @@ -1032,6 +1037,7 @@ config COMEDI_NI_LABPC_PCI config COMEDI_NI_PCIDIO tristate "NI PCI-DIO32HS, PCI-6533, PCI-6534 support" + depends on HAS_DMA select COMEDI_MITE select COMEDI_8255 ---help--- @@ -1043,6 +1049,7 @@ config COMEDI_NI_PCIDIO config COMEDI_NI_PCIMIO tristate "NI PCI-MIO-E series and M series support" + depends on HAS_DMA select COMEDI_NI_TIOCMD select COMEDI_8255 select COMEDI_FC @@ -1095,10 +1102,12 @@ config COMEDI_SSV_DNP called ssv_dnp. config COMEDI_MITE + depends on HAS_DMA tristate config COMEDI_NI_TIOCMD tristate + depends on HAS_DMA select COMEDI_NI_TIO select COMEDI_MITE diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c index ca709901fb3e..d4be0e68509b 100644 --- a/drivers/staging/comedi/comedi_buf.c +++ b/drivers/staging/comedi/comedi_buf.c @@ -51,10 +51,12 @@ static void __comedi_buf_free(struct comedi_device *dev, clear_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags)); if (s->async_dma_dir != DMA_NONE) { +#ifdef CONFIG_HAS_DMA dma_free_coherent(dev->hw_dev, PAGE_SIZE, buf->virt_addr, buf->dma_addr); +#endif } else { free_page((unsigned long)buf->virt_addr); } @@ -74,6 +76,12 @@ static void __comedi_buf_alloc(struct comedi_device *dev, struct comedi_buf_page *buf; unsigned i; + if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) { + dev_err(dev->class_dev, + "dma buffer allocation not supported\n"); + return; + } + async->buf_page_list = vzalloc(sizeof(*buf) * n_pages); if (async->buf_page_list) pages = vmalloc(sizeof(struct page *) * n_pages); @@ -84,11 +92,15 @@ static void __comedi_buf_alloc(struct comedi_device *dev, for (i = 0; i < n_pages; i++) { buf = &async->buf_page_list[i]; if (s->async_dma_dir != DMA_NONE) +#ifdef CONFIG_HAS_DMA buf->virt_addr = dma_alloc_coherent(dev->hw_dev, PAGE_SIZE, &buf->dma_addr, GFP_KERNEL | __GFP_COMP); +#else + break; +#endif else buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL); if (!buf->virt_addr) diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 00f2547024ec..924c54c9c31f 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -246,9 +246,6 @@ static int resize_async_buffer(struct comedi_device *dev, return -EBUSY; } - if (!async->prealloc_buf) - return -EINVAL; - /* make sure buffer is an integral number of pages * (we round up) */ new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK; diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c index 3d978f34d212..77a7bb632580 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.c +++ b/drivers/staging/comedi/drivers/ni_labpc.c @@ -976,8 +976,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma_chan); - set_dma_addr(devpriv->dma_chan, - virt_to_bus(devpriv->dma_buffer)); + set_dma_addr(devpriv->dma_chan, devpriv->dma_addr); /* set appropriate size of transfer */ devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd); if (cmd->stop_src == TRIG_COUNT && @@ -1089,7 +1088,7 @@ static void labpc_drain_dma(struct comedi_device *dev) devpriv->count -= num_points; /* set address and count for next transfer */ - set_dma_addr(devpriv->dma_chan, virt_to_bus(devpriv->dma_buffer)); + set_dma_addr(devpriv->dma_chan, devpriv->dma_addr); set_dma_count(devpriv->dma_chan, leftover * sample_size); release_dma_lock(flags); @@ -1741,6 +1740,9 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it) unsigned long dma_flags; devpriv->dma_chan = dma_chan; + devpriv->dma_addr = + virt_to_bus(devpriv->dma_buffer); + dma_flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); set_dma_mode(devpriv->dma_chan, DMA_MODE_READ); diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h index 615f16f271c0..4b691f5a9965 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.h +++ b/drivers/staging/comedi/drivers/ni_labpc.h @@ -82,6 +82,7 @@ struct labpc_private { unsigned int divisor_b1; unsigned int dma_chan; /* dma channel to use */ u16 *dma_buffer; /* buffer ai will dma into */ + phys_addr_t dma_addr; /* transfer size in bytes for current transfer */ unsigned int dma_transfer_size; /* we are using dma/fifo-half-full/etc. */ diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index a46d579016d9..8c5dee9b3b05 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -310,9 +310,11 @@ static int ni_gpct_insn_read(struct comedi_device *dev, static int ni_gpct_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); +#ifdef PCIDMA static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int ni_gpct_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); +#endif static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void handle_gpct_interrupt(struct comedi_device *dev, @@ -4617,9 +4619,7 @@ static int ni_E_init(struct comedi_device *dev) for (j = 0; j < NUM_GPCT; ++j) { s = &dev->subdevices[NI_GPCT_SUBDEV(j)]; s->type = COMEDI_SUBD_COUNTER; - s->subdev_flags = - SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL | SDF_CMD_READ - /* | SDF_CMD_WRITE */ ; + s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL; s->n_chan = 3; if (board->reg_type & ni_reg_m_series_mask) s->maxdata = 0xffffffff; @@ -4628,11 +4628,14 @@ static int ni_E_init(struct comedi_device *dev) s->insn_read = &ni_gpct_insn_read; s->insn_write = &ni_gpct_insn_write; s->insn_config = &ni_gpct_insn_config; +#ifdef PCIDMA + s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */; s->do_cmd = &ni_gpct_cmd; s->len_chanlist = 1; s->do_cmdtest = &ni_gpct_cmdtest; s->cancel = &ni_gpct_cancel; s->async_dma_dir = DMA_BIDIRECTIONAL; +#endif s->private = &devpriv->counter_dev->counters[j]; devpriv->counter_dev->counters[j].chip_index = 0; @@ -5216,10 +5219,10 @@ static int ni_gpct_insn_write(struct comedi_device *dev, return ni_tio_winsn(counter, insn, data); } +#ifdef PCIDMA static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { int retval; -#ifdef PCIDMA struct ni_gpct *counter = s->private; /* const struct comedi_cmd *cmd = &s->async->cmd; */ @@ -5233,23 +5236,20 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s) ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL); ni_e_series_enable_second_irq(dev, counter->counter_index, 1); retval = ni_tio_cmd(counter, s->async); -#else - retval = -ENOTSUPP; -#endif return retval; } +#endif +#ifdef PCIDMA static int ni_gpct_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { -#ifdef PCIDMA struct ni_gpct *counter = s->private; return ni_tio_cmdtest(counter, cmd); -#else return -ENOTSUPP; -#endif } +#endif static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { diff --git a/drivers/staging/dwc2/Kconfig b/drivers/staging/dwc2/Kconfig index f0b4739c65a1..d15d9d58e5ac 100644 --- a/drivers/staging/dwc2/Kconfig +++ b/drivers/staging/dwc2/Kconfig @@ -2,7 +2,6 @@ config USB_DWC2 tristate "DesignWare USB2 DRD Core Support" depends on USB depends on VIRT_TO_BUS - select USB_OTG_UTILS help Say Y or M here if your system has a Dual Role HighSpeed USB controller based on the DesignWare HSOTG IP Core. @@ -39,6 +38,7 @@ config USB_DWC2_TRACK_MISSED_SOFS bool "Enable Missed SOF Tracking" help Say Y here to enable logging of missed SOF events to the dmesg log. + WARNING: This feature is still experimental. If in doubt, say N. config USB_DWC2_DEBUG_PERIODIC diff --git a/drivers/staging/dwc2/hcd.c b/drivers/staging/dwc2/hcd.c index 827ab781ae9b..8551ccedf037 100644 --- a/drivers/staging/dwc2/hcd.c +++ b/drivers/staging/dwc2/hcd.c @@ -2804,9 +2804,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq, /* Set device flags indicating whether the HCD supports DMA */ if (hsotg->core_params->dma_enable > 0) { - if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0) - dev_warn(hsotg->dev, - "can't enable workaround for >2GB RAM\n"); + if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) + dev_warn(hsotg->dev, "can't set DMA mask\n"); if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0) dev_warn(hsotg->dev, "can't enable workaround for >2GB RAM\n"); diff --git a/drivers/staging/dwc2/hcd_intr.c b/drivers/staging/dwc2/hcd_intr.c index 6e5dbed6ccec..e24062f0a49e 100644 --- a/drivers/staging/dwc2/hcd_intr.c +++ b/drivers/staging/dwc2/hcd_intr.c @@ -56,8 +56,6 @@ static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) { #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS -#warning Compiling code to track missed SOFs - u16 curr_frame_number = hsotg->frame_number; if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) { diff --git a/drivers/staging/dwc2/platform.c b/drivers/staging/dwc2/platform.c index 1f3d581a1078..44cce2fa6361 100644 --- a/drivers/staging/dwc2/platform.c +++ b/drivers/staging/dwc2/platform.c @@ -95,6 +95,14 @@ static int dwc2_driver_probe(struct platform_device *dev) hsotg->dev = &dev->dev; + /* + * Use reasonable defaults so platforms don't have to provide these. + */ + if (!dev->dev.dma_mask) + dev->dev.dma_mask = &dev->dev.coherent_dma_mask; + if (!dev->dev.coherent_dma_mask) + dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + irq = platform_get_irq(dev, 0); if (irq < 0) { dev_err(&dev->dev, "missing IRQ resource\n"); @@ -102,11 +110,6 @@ static int dwc2_driver_probe(struct platform_device *dev) } res = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&dev->dev, "missing memory base resource\n"); - return -EINVAL; - } - hsotg->regs = devm_ioremap_resource(&dev->dev, res); if (IS_ERR(hsotg->regs)) return PTR_ERR(hsotg->regs); diff --git a/drivers/staging/gdm72xx/Kconfig b/drivers/staging/gdm72xx/Kconfig index 3c18efe31365..69059138de4a 100644 --- a/drivers/staging/gdm72xx/Kconfig +++ b/drivers/staging/gdm72xx/Kconfig @@ -39,7 +39,7 @@ if WIMAX_GDM72XX_USB config WIMAX_GDM72XX_USB_PM bool "Enable power managerment support" - depends on USB_SUSPEND + depends on PM_RUNTIME endif # WIMAX_GDM72XX_USB diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index 2856b8fd44ad..163c638e4095 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c @@ -690,7 +690,6 @@ static void mxs_lradc_trigger_remove(struct iio_dev *iio) static int mxs_lradc_buffer_preenable(struct iio_dev *iio) { struct mxs_lradc *lradc = iio_priv(iio); - struct iio_buffer *buffer = iio->buffer; int ret = 0, chan, ofs = 0; unsigned long enable = 0; uint32_t ctrl4_set = 0; @@ -698,7 +697,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio) uint32_t ctrl1_irq = 0; const uint32_t chan_value = LRADC_CH_ACCUMULATE | ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); - const int len = bitmap_weight(buffer->scan_mask, LRADC_MAX_TOTAL_CHANS); + const int len = bitmap_weight(iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS); if (!len) return -EINVAL; @@ -725,7 +724,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio) lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR); writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR); - for_each_set_bit(chan, buffer->scan_mask, LRADC_MAX_TOTAL_CHANS) { + for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs); ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs); diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c index d060f2572512..c99f890cc6c6 100644 --- a/drivers/staging/iio/light/tsl2x7x_core.c +++ b/drivers/staging/iio/light/tsl2x7x_core.c @@ -1869,6 +1869,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp, dev_info(&chip->client->dev, "%s: i2c device found does not match expected id\n", __func__); + ret = -EINVAL; goto fail1; } @@ -1907,7 +1908,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp, if (ret) { dev_err(&clientp->dev, "%s: irq request failed", __func__); - goto fail2; + goto fail1; } } @@ -1920,17 +1921,17 @@ static int tsl2x7x_probe(struct i2c_client *clientp, if (ret) { dev_err(&clientp->dev, "%s: iio registration failed\n", __func__); - goto fail1; + goto fail2; } dev_info(&clientp->dev, "%s Light sensor found.\n", id->name); return 0; -fail1: +fail2: if (clientp->irq) free_irq(clientp->irq, indio_dev); -fail2: +fail1: iio_device_free(indio_dev); return ret; diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig index 8c9e40390f42..ef699f753186 100644 --- a/drivers/staging/imx-drm/Kconfig +++ b/drivers/staging/imx-drm/Kconfig @@ -1,6 +1,7 @@ config DRM_IMX tristate "DRM Support for Freescale i.MX" select DRM_KMS_HELPER + select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) @@ -19,10 +20,12 @@ config DRM_IMX_FB_HELPER config DRM_IMX_PARALLEL_DISPLAY tristate "Support for parallel displays" depends on DRM_IMX + select VIDEOMODE_HELPERS config DRM_IMX_TVE tristate "Support for TV and VGA displays" depends on DRM_IMX + select REGMAP_MMIO help Choose this to enable the internal Television Encoder (TVe) found on i.MX53 processors. @@ -30,6 +33,7 @@ config DRM_IMX_TVE config DRM_IMX_IPUV3_CORE tristate "IPUv3 core support" depends on DRM_IMX + depends on RESET_CONTROLLER help Choose this if you have a i.MX5/6 system and want to use the IPU. This option only enables IPU base @@ -38,5 +42,6 @@ config DRM_IMX_IPUV3_CORE config DRM_IMX_IPUV3 tristate "DRM Support for i.MX IPUv3" depends on DRM_IMX + depends on DRM_IMX_IPUV3_CORE help Choose this if you have a i.MX5 or i.MX6 processor. diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c index ac1634464407..03892de9bd7e 100644 --- a/drivers/staging/imx-drm/imx-tve.c +++ b/drivers/staging/imx-drm/imx-tve.c @@ -670,7 +670,9 @@ static int imx_tve_probe(struct platform_device *pdev) tve->dac_reg = devm_regulator_get(&pdev->dev, "dac"); if (!IS_ERR(tve->dac_reg)) { regulator_set_voltage(tve->dac_reg, 2750000, 2750000); - regulator_enable(tve->dac_reg); + ret = regulator_enable(tve->dac_reg); + if (ret) + return ret; } tve->clk = devm_clk_get(&pdev->dev, "tve"); diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c index ea61c869110f..ff5c63350932 100644 --- a/drivers/staging/imx-drm/ipuv3-crtc.c +++ b/drivers/staging/imx-drm/ipuv3-crtc.c @@ -316,31 +316,14 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc, static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) { - struct drm_pending_vblank_event *e; - struct timeval now; unsigned long flags; struct drm_device *drm = ipu_crtc->base.dev; spin_lock_irqsave(&drm->event_lock, flags); - - e = ipu_crtc->page_flip_event; - if (!e) { - spin_unlock_irqrestore(&drm->event_lock, flags); - return; - } - - do_gettimeofday(&now); - e->event.sequence = 0; - e->event.tv_sec = now.tv_sec; - e->event.tv_usec = now.tv_usec; + if (ipu_crtc->page_flip_event) + drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); ipu_crtc->page_flip_event = NULL; - imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); - - list_add_tail(&e->base.link, &e->base.file_priv->event_list); - - wake_up_interruptible(&e->base.file_priv->event_wait); - spin_unlock_irqrestore(&drm->event_lock, flags); } diff --git a/drivers/staging/media/solo6x10/Kconfig b/drivers/staging/media/solo6x10/Kconfig index ec32776ff547..df6569b997b8 100644 --- a/drivers/staging/media/solo6x10/Kconfig +++ b/drivers/staging/media/solo6x10/Kconfig @@ -1,6 +1,7 @@ config SOLO6X10 tristate "Softlogic 6x10 MPEG codec cards" depends on PCI && VIDEO_DEV && SND && I2C + depends on FONTS select VIDEOBUF2_DMA_SG select VIDEOBUF2_DMA_CONTIG select SND_PCM diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c index a88959f9a07a..197c393c4ca7 100644 --- a/drivers/staging/nvec/nvec.c +++ b/drivers/staging/nvec/nvec.c @@ -124,6 +124,20 @@ int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, EXPORT_SYMBOL_GPL(nvec_register_notifier); /** + * nvec_unregister_notifier - Unregister a notifier with nvec + * @nvec: A &struct nvec_chip + * @nb: The notifier block to unregister + * + * Unregisters a notifier with @nvec. The notifier will be removed from the + * atomic notifier chain. + */ +int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nvec->notifier_list, nb); +} +EXPORT_SYMBOL_GPL(nvec_unregister_notifier); + +/** * nvec_status_notifier - The final notifier * * Prints a message about control events not handled in the notifier @@ -185,7 +199,7 @@ static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec, * * Free the given message */ -inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) +void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) { if (msg != &nvec->tx_scratch) dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool); @@ -800,11 +814,6 @@ static int tegra_nvec_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "no mem resource?\n"); - return -ENODEV; - } - base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); @@ -815,7 +824,7 @@ static int tegra_nvec_probe(struct platform_device *pdev) return -ENODEV; } - i2c_clk = clk_get(&pdev->dev, "div-clk"); + i2c_clk = devm_clk_get(&pdev->dev, "div-clk"); if (IS_ERR(i2c_clk)) { dev_err(nvec->dev, "failed to get controller clock\n"); return -ENODEV; @@ -902,8 +911,11 @@ static int tegra_nvec_remove(struct platform_device *pdev) nvec_toggle_global_events(nvec, false); mfd_remove_devices(nvec->dev); + nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier); cancel_work_sync(&nvec->rx_work); cancel_work_sync(&nvec->tx_work); + /* FIXME: needs check wether nvec is responsible for power off */ + pm_power_off = NULL; return 0; } diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h index b7a14bc0ab91..2b1316d87470 100644 --- a/drivers/staging/nvec/nvec.h +++ b/drivers/staging/nvec/nvec.h @@ -197,9 +197,8 @@ extern int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, unsigned int events); -extern int nvec_unregister_notifier(struct device *dev, - struct notifier_block *nb, - unsigned int events); +extern int nvec_unregister_notifier(struct nvec_chip *dev, + struct notifier_block *nb); extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg); diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c index 7445ce6422bb..a0ec52a4114f 100644 --- a/drivers/staging/nvec/nvec_kbd.c +++ b/drivers/staging/nvec/nvec_kbd.c @@ -169,8 +169,15 @@ fail: static int nvec_kbd_remove(struct platform_device *pdev) { + struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); + char disable_kbd[] = { NVEC_KBD, DISABLE_KBD }, + uncnfg_wake_key_reporting[] = { NVEC_KBD, CNFG_WAKE_KEY_REPORTING, + false }; + nvec_write_async(nvec, uncnfg_wake_key_reporting, 3); + nvec_write_async(nvec, disable_kbd, 2); + nvec_unregister_notifier(nvec, &keys_dev.notifier); + input_unregister_device(keys_dev.input); - input_free_device(keys_dev.input); return 0; } @@ -188,4 +195,5 @@ module_platform_driver(nvec_kbd_driver); MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>"); MODULE_DESCRIPTION("NVEC keyboard driver"); +MODULE_ALIAS("platform:nvec-kbd"); MODULE_LICENSE("GPL"); diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c index 296f7b9a8c8c..aacfcd6954a3 100644 --- a/drivers/staging/nvec/nvec_power.c +++ b/drivers/staging/nvec/nvec_power.c @@ -414,6 +414,7 @@ static int nvec_power_remove(struct platform_device *pdev) struct nvec_power *power = platform_get_drvdata(pdev); cancel_delayed_work_sync(&power->poller); + nvec_unregister_notifier(power->nvec, &power->notifier); switch (pdev->id) { case AC: power_supply_unregister(&nvec_psy); diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c index aff6b9b9f9aa..06dbb02085a9 100644 --- a/drivers/staging/nvec/nvec_ps2.c +++ b/drivers/staging/nvec/nvec_ps2.c @@ -106,7 +106,7 @@ static int nvec_mouse_probe(struct platform_device *pdev) struct serio *ser_dev; char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 }; - ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); + ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL); if (ser_dev == NULL) return -ENOMEM; @@ -133,6 +133,11 @@ static int nvec_mouse_probe(struct platform_device *pdev) static int nvec_mouse_remove(struct platform_device *pdev) { + struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); + + ps2_sendcommand(ps2_dev.ser_dev, DISABLE_MOUSE); + ps2_stopstreaming(ps2_dev.ser_dev); + nvec_unregister_notifier(nvec, &ps2_dev.notifier); serio_unregister_port(ps2_dev.ser_dev); return 0; @@ -179,4 +184,5 @@ module_platform_driver(nvec_mouse_driver); MODULE_DESCRIPTION("NVEC mouse driver"); MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>"); +MODULE_ALIAS("platform:nvec-mouse"); MODULE_LICENSE("GPL"); diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig index 185b676d858a..aab945a316ea 100644 --- a/drivers/staging/sep/Kconfig +++ b/drivers/staging/sep/Kconfig @@ -1,6 +1,6 @@ config DX_SEP tristate "Discretix SEP driver" - depends on PCI + depends on PCI && CRYPTO help Discretix SEP driver; used for the security processor subsystem on board the Intel Mobile Internet Device and adds SEP availability diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c index fe667dde43ce..386362c9964f 100644 --- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c +++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c @@ -1087,7 +1087,11 @@ static int synaptics_rmi4_resume(struct device *dev) unsigned char intr_status; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); - regulator_enable(rmi4_data->regulator); + retval = regulator_enable(rmi4_data->regulator); + if (retval) { + dev_err(dev, "Regulator enable failed (%d)\n", retval); + return retval; + } enable_irq(rmi4_data->i2c_client->irq); rmi4_data->touch_stopped = false; diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c index f4f1bf7a30fd..c699a3058b39 100644 --- a/drivers/staging/vt6656/hostap.c +++ b/drivers/staging/vt6656/hostap.c @@ -133,7 +133,7 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->apdev->name); } - kfree(pDevice->apdev); + free_netdev(pDevice->apdev); pDevice->apdev = NULL; pDevice->bEnable8021x = false; pDevice->bEnableHostWEP = false; diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c index c335808211ee..d0cf7d8a20e5 100644 --- a/drivers/staging/vt6656/iwctl.c +++ b/drivers/staging/vt6656/iwctl.c @@ -1345,9 +1345,12 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info, return rc; } + spin_lock_irq(&pDevice->lock); + if (wrq->disabled) { pDevice->ePSMode = WMAC_POWER_CAM; PSvDisablePowerSaving(pDevice); + spin_unlock_irq(&pDevice->lock); return rc; } if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { @@ -1358,6 +1361,9 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info, pDevice->ePSMode = WMAC_POWER_FAST; PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval); } + + spin_unlock_irq(&pDevice->lock); + switch (wrq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n"); diff --git a/drivers/staging/zcache/ramster.h b/drivers/staging/zcache/ramster.h index e1f91d5a0f6a..a858666eae68 100644 --- a/drivers/staging/zcache/ramster.h +++ b/drivers/staging/zcache/ramster.h @@ -11,10 +11,6 @@ #ifndef _ZCACHE_RAMSTER_H_ #define _ZCACHE_RAMSTER_H_ -#ifdef CONFIG_RAMSTER_MODULE -#define CONFIG_RAMSTER -#endif - #ifdef CONFIG_RAMSTER #include "ramster/ramster.h" #else diff --git a/drivers/staging/zcache/ramster/debug.c b/drivers/staging/zcache/ramster/debug.c index 327e4f0d98e1..5b26ee977c2f 100644 --- a/drivers/staging/zcache/ramster/debug.c +++ b/drivers/staging/zcache/ramster/debug.c @@ -1,6 +1,8 @@ #include <linux/atomic.h> #include "debug.h" +ssize_t ramster_foreign_eph_pages; +ssize_t ramster_foreign_pers_pages; #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> diff --git a/drivers/staging/zcache/ramster/ramster-howto.txt b/drivers/staging/zcache/ramster/ramster-howto.txt new file mode 100644 index 000000000000..7b1ee3bbfdd5 --- /dev/null +++ b/drivers/staging/zcache/ramster/ramster-howto.txt @@ -0,0 +1,366 @@ + RAMSTER HOW-TO + +Author: Dan Magenheimer +Ramster maintainer: Konrad Wilk <konrad.wilk@oracle.com> + +This is a HOWTO document for ramster which, as of this writing, is in +the kernel as a subdirectory of zcache in drivers/staging, called ramster. +(Zcache can be built with or without ramster functionality.) If enabled +and properly configured, ramster allows memory capacity load balancing +across multiple machines in a cluster. Further, the ramster code serves +as an example of asynchronous access for zcache (as well as cleancache and +frontswap) that may prove useful for future transcendent memory +implementations, such as KVM and NVRAM. While ramster works today on +any network connection that supports kernel sockets, its features may +become more interesting on future high-speed fabrics/interconnects. + +Ramster requires both kernel and userland support. The userland support, +called ramster-tools, is known to work with EL6-based distros, but is a +set of poorly-hacked slightly-modified cluster tools based on ocfs2, which +includes an init file, a config file, and a userland binary that interfaces +to the kernel. This state of userland support reflects the abysmal userland +skills of this suitably-embarrassed author; any help/patches to turn +ramster-tools into more distributable rpms/debs useful for a wider range +of distros would be appreciated. The source RPM that can be used as a +starting point is available at: + http://oss.oracle.com/projects/tmem/files/RAMster/ + +As a result of this author's ignorance, userland setup described in this +HOWTO assumes an EL6 distro and is described in EL6 syntax. Apologies +if this offends anyone! + +Kernel support has only been tested on x86_64. Systems with an active +ocfs2 filesystem should work, but since ramster leverages a lot of +code from ocfs2, there may be latent issues. A kernel configuration that +includes CONFIG_OCFS2_FS should build OK, and should certainly run OK +if no ocfs2 filesystem is mounted. + +This HOWTO demonstrates memory capacity load balancing for a two-node +cluster, where one node called the "local" node becomes overcommitted +and the other node called the "remote" node provides additional RAM +capacity for use by the local node. Ramster is capable of more complex +topologies; see the last section titled "ADVANCED RAMSTER TOPOLOGIES". + +If you find any terms in this HOWTO unfamiliar or don't understand the +motivation for ramster, the following LWN reading is recommended: +-- Transcendent Memory in a Nutshell (lwn.net/Articles/454795) +-- The future calculus of memory management (lwn.net/Articles/475681) +And since ramster is built on top of zcache, this article may be helpful: +-- In-kernel memory compression (lwn.net/Articles/545244) + +Now that you've memorized the contents of those articles, let's get started! + +A. PRELIMINARY + +1) Install two x86_64 Linux systems that are known to work when + upgraded to a recent upstream Linux kernel version. + +On each system: + +2) Configure, build and install, then boot Linux, just to ensure it + can be done with an unmodified upstream kernel. Confirm you booted + the upstream kernel with "uname -a". + +3) If you plan to do any performance testing or unless you plan to + test only swapping, the "WasActive" patch is also highly recommended. + (Search lkml.org for WasActive, apply the patch, rebuild your kernel.) + For a demo or simple testing, the patch can be ignored. + +4) Install ramster-tools as root. An x86_64 rpm for EL6-based systems + can be found at: + http://oss.oracle.com/projects/tmem/files/RAMster/ + (Sorry but for now, non-EL6 users must recreate ramster-tools on + their own from source. See above.) + +5) Ensure that debugfs is mounted at each boot. Examples below assume it + is mounted at /sys/kernel/debug. + +B. BUILDING RAMSTER INTO THE KERNEL + +Do the following on each system: + +1) Using the kernel configuration mechanism of your choice, change + your config to include: + + CONFIG_CLEANCACHE=y + CONFIG_FRONTSWAP=y + CONFIG_STAGING=y + CONFIG_CONFIGFS_FS=y # NOTE: MUST BE y, not m + CONFIG_ZCACHE=y + CONFIG_RAMSTER=y + + For a linux-3.10 or later kernel, you should also set: + + CONFIG_ZCACHE_DEBUG=y + CONFIG_RAMSTER_DEBUG=y + + Before building the kernel please doublecheck your kernel config + file to ensure all of the settings are correct. + +2) Build this kernel and change your boot file (e.g. /etc/grub.conf) + so that the new kernel will boot. + +3) Add "zcache" and "ramster" as kernel boot parameters for the new kernel. + +4) Reboot each system approximately simultaneously. + +5) Check dmesg to ensure there are some messages from ramster, prefixed + by "ramster:" + + # dmesg | grep ramster + + You should also see a lot of files in: + + # ls /sys/kernel/debug/zcache + # ls /sys/kernel/debug/ramster + + These are mostly counters for various zcache and ramster activities. + You should also see files in: + + # ls /sys/kernel/mm/ramster + + These are sysfs files that control ramster as we shall see. + + Ramster now will act as a single-system zcache on each system + but doesn't yet know anything about the cluster so can't yet do + anything remotely. + +C. CONFIGURING THE RAMSTER CLUSTER + +This part can be error prone unless you are familiar with clustering +filesystems. We need to describe the cluster in a /etc/ramster.conf +file and the init scripts that parse it are extremely picky about +the syntax. + +1) Create a /etc/ramster.conf file and ensure it is identical on both + systems. This file mimics the ocfs2 format and there is a good amount + of documentation that can be searched for ocfs2.conf, but you can use: + + cluster: + name = ramster + node_count = 2 + node: + name = system1 + cluster = ramster + number = 0 + ip_address = my.ip.ad.r1 + ip_port = 7777 + node: + name = system2 + cluster = ramster + number = 1 + ip_address = my.ip.ad.r2 + ip_port = 7777 + + You must ensure that the "name" field in the file exactly matches + the output of "hostname" on each system; if "hostname" shows a + fully-qualified hostname, ensure the name is fully qualified in + /etc/ramster.conf. Obviously, substitute my.ip.ad.rx with proper + ip addresses. + +2) Enable the ramster service and configure it. If you used the + EL6 ramster-tools, this would be: + + # chkconfig --add ramster + # service ramster configure + + Set "load on boot" to "y", cluster to start is "ramster" (or whatever + name you chose in ramster.conf), heartbeat dead threshold as "500", + network idle timeout as "1000000". Leave the others as default. + +3) Reboot both systems. After reboot, try (assuming EL6 ramster-tools): + + # service ramster status + + You should see "Checking RAMSTER cluster "ramster": Online". If you do + not, something is wrong and ramster will not work. Note that you + should also see that the driver for "configfs" is loaded and mounted, + the driver for ocfs2_dlmfs is not loaded, and some numbers for network + parameters. You will also see "Checking RAMSTER heartbeat: Not active". + That's all OK. + +4) Now you need to start the cluster heartbeat; the cluster is not "up" + until all nodes detect a heartbeat. In a real cluster, heartbeat detection + is done via a cluster filesystem, but ramster doesn't require one. Some + hack-y kernel code in ramster can start the heartbeat for you though if + you tell it what nodes are "up". To enable the heartbeat, do: + + # echo 0 > /sys/kernel/mm/ramster/manual_node_up + # echo 1 > /sys/kernel/mm/ramster/manual_node_up + + This must be done on BOTH nodes and, to avoid timeouts, must be done + approximately concurrently on both nodes. On an EL6 system, it is + convenient to put these lines in /etc/rc.local. To confirm that the + cluster is now up, on both systems do: + + # dmesg | grep ramster + + You should see ramster "Accepted connection" messages in dmesg on both + nodes after this. Note that if you check userland status again with + + # service ramster status + + you will still see "Checking RAMSTER heartbeat: Not active". That's + still OK... the ramster kernel heartbeat hack doesn't communicate to + userland. + +5) You now must tell each node the node to which it should "remotify" pages. + On this two node cluster, we will assume the "local" node, node 0, has + memory overcommitted and will use ramster to utilize RAM capacity on + the "remote node", node 1. To configure this, on node 0, you do: + + # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum + + You should see "ramster: node 1 set as remotification target" in dmesg + on node 0. Again, on EL6, /etc/rc.local is a good place to put this + on node 0 so you don't forget to do it at each boot. + +6) One more step: By default, the ramster code does not "remotify" any + pages; this is primarily for testing purposes, but sometimes it is + useful. This may change in the future, but for now, on node 0, you do: + + # echo 1 > /sys/kernel/mm/ramster/pers_remotify_enable + # echo 1 > /sys/kernel/mm/ramster/eph_remotify_enable + + The first enables remotifying swap (persistent, aka frontswap) pages, + the second enables remotifying of page cache (ephemeral, cleancache) + pages. + + On EL6, these lines can also be put in /etc/rc.local (AFTER the + node_up lines), or at the beginning of a script that runs a workload. + +7) Note that most testing has been done with both/all machines booted + roughly simultaneously to avoid cluster timeouts. Ideally, you should + do this too unless you are trying to break ramster rather than just + use it. ;-) + +D. TESTING RAMSTER + +1) Note that ramster has no value unless pages get "remotified". For + swap/frontswap/persistent pages, this doesn't happen unless/until + the workload would cause swapping to occur, at which point pages + are put into frontswap/zcache, and the remotification thread starts + working. To get to the point where the system swaps, you either + need a workload for which the working set exceeds the RAM in the + system; or you need to somehow reduce the amount of RAM one of + the system sees. This latter is easy when testing in a VM, but + harder on physical systems. In some cases, "mem=xxxM" on the + kernel command line restricts memory, but for some values of xxx + the kernel may fail to boot. One may also try creating a fixed + RAMdisk, doing nothing with it, but ensuring that it eats up a fixed + amount of RAM. + +2) To see if ramster is working, on the "remote node", node 1, try: + + # grep . /sys/kernel/debug/ramster/foreign_* + # # note, that is space-dot-space between grep and the pathname + + to monitor the number (and max) ephemeral and persistent pages + that ramster has sent. If these stay at zero, ramster is not working + either because the workload on the local node (node 0) isn't creating + enough memory pressure or because "remotifying" isn't working. On the + local system, node 0, you can watch lots of useful information also. + Try: + + grep . /sys/kernel/debug/zcache/*pageframes* \ + /sys/kernel/debug/zcache/*zbytes* \ + /sys/kernel/debug/zcache/*zpages* \ + /sys/kernel/debug/ramster/*remote* + + Of particular note are the remote_*_pages_succ_get counters. These + show how many disk reads and/or disk writes have been avoided on the + overcommitted local system by storing pages remotely using ramster. + + At the risk of information overload, you can also grep: + + /sys/kernel/debug/cleancache/* and /sys/kernel/debug/frontswap/* + + These show, for example, how many disk reads and/or disk writes have + been avoided by using zcache to optimize RAM on the local system. + + +AUTOMATIC SWAP REPATRIATION + +You may notice that while the systems are idle, the foreign persistent +page count on the remote machine slowly decreases. This is because +ramster implements "frontswap selfshrinking": When possible, swap +pages that have been remotified are slowly repatriated to the local +machine. This is so that local RAM can be used when possible and +so that, in case of remote machine crash, the probability of loss +of data is reduced. + +REBOOTING / POWEROFF + +If a system is shut down while some of its swap pages still reside +on a remote system, the system may lock up during the shutdown +sequence. This will occur if the network is shut down before the +swap mechansim is shut down, which is the default ordering on many +distros. To avoid this annoying problem, simply shut off the swap +subsystem before starting the shutdown sequence, e.g.: + + # swapoff -a + # reboot + +Ideally, this swapoff-before-ifdown ordering should be enforced permanently +using shutdown scripts. + +KNOWN PROBLEMS + +1) You may periodically see messages such as: + + ramster_r2net, message length problem + + This is harmless but indicates that a node is sending messages + containing compressed pages that exceed the maximum for zcache + (PAGE_SIZE*15/16). The sender side needs to be fixed. + +2) If you see a "No longer connected to node..." message or a "No connection + established with node X after N seconds", it is possible you may + be in an unrecoverable state. If you are certain all of the + appropriate cluster configuration steps described above have been + performed, try rebooting the two servers concurrently to see if + the cluster starts. + + Note that "Connection to node... shutdown, state 7" is an intermediate + connection state. As long as you later see "Accepted connection", the + intermediate states are harmless. + +3) There are known issues in counting certain values. As a result + you may see periodic warnings from the kernel. Almost always you + will see "ramster: bad accounting for XXX". There are also "WARN_ONCE" + messages. If you see kernel warnings with a tombstone, please report + them. They are harmless but reflect bugs that need to be eventually fixed. + +ADVANCED RAMSTER TOPOLOGIES + +The kernel code for ramster can support up to eight nodes in a cluster, +but no testing has been done with more than three nodes. + +In the example described above, the "remote" node serves as a RAM +overflow for the "local" node. This can be made symmetric by appropriate +settings of the sysfs remote_target_nodenum file. For example, by setting: + + # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum + +on node 0, and + + # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum + +on node 1, each node can serve as a RAM overflow for the other. + +For more than two nodes, a "RAM server" can be configured. For a +three node system, set: + + # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum + +on node 1, and + + # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum + +on node 2. Then node 0 is a RAM server for node 1 and node 2. + +In this implementation of ramster, any remote node is potentially a single +point of failure (SPOF). Though the probability of failure is reduced +by automatic swap repatriation (see above), a proposed future enhancement +to ramster improves high-availability for the cluster by sending a copy +of each page of date to two other nodes. Patches welcome! diff --git a/drivers/staging/zcache/ramster/ramster.c b/drivers/staging/zcache/ramster/ramster.c index b18b887db79f..a937ce1fa27a 100644 --- a/drivers/staging/zcache/ramster/ramster.c +++ b/drivers/staging/zcache/ramster/ramster.c @@ -66,8 +66,6 @@ static int ramster_remote_target_nodenum __read_mostly = -1; /* Used by this code. */ long ramster_flnodes; -ssize_t ramster_foreign_eph_pages; -ssize_t ramster_foreign_pers_pages; /* FIXME frontswap selfshrinking knobs in debugfs? */ static LIST_HEAD(ramster_rem_op_list); @@ -399,14 +397,18 @@ void ramster_count_foreign_pages(bool eph, int count) inc_ramster_foreign_eph_pages(); } else { dec_ramster_foreign_eph_pages(); +#ifdef CONFIG_RAMSTER_DEBUG WARN_ON_ONCE(ramster_foreign_eph_pages < 0); +#endif } } else { if (count > 0) { inc_ramster_foreign_pers_pages(); } else { dec_ramster_foreign_pers_pages(); +#ifdef CONFIG_RAMSTER_DEBUG WARN_ON_ONCE(ramster_foreign_pers_pages < 0); +#endif } } } diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 522cb8e55142..dcceed29d31a 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1922,15 +1922,15 @@ out: #ifdef CONFIG_ZCACHE_MODULE #ifdef CONFIG_RAMSTER -module_param(ramster_enabled, int, S_IRUGO); +module_param(ramster_enabled, bool, S_IRUGO); module_param(disable_frontswap_selfshrink, int, S_IRUGO); #endif -module_param(disable_cleancache, int, S_IRUGO); -module_param(disable_frontswap, int, S_IRUGO); +module_param(disable_cleancache, bool, S_IRUGO); +module_param(disable_frontswap, bool, S_IRUGO); #ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS module_param(frontswap_has_exclusive_gets, bool, S_IRUGO); #endif -module_param(disable_frontswap_ignore_nonactive, int, S_IRUGO); +module_param(disable_frontswap_ignore_nonactive, bool, S_IRUGO); module_param(zcache_comp_name, charp, S_IRUGO); module_init(zcache_init); MODULE_LICENSE("GPL"); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index ffbc6a94be52..d7705e5824fb 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -651,7 +651,7 @@ static int iscsit_add_reject( cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); if (!cmd->buf_ptr) { pr_err("Unable to allocate memory for cmd->buf_ptr\n"); - iscsit_release_cmd(cmd); + iscsit_free_cmd(cmd, false); return -1; } @@ -697,7 +697,7 @@ int iscsit_add_reject_from_cmd( cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); if (!cmd->buf_ptr) { pr_err("Unable to allocate memory for cmd->buf_ptr\n"); - iscsit_release_cmd(cmd); + iscsit_free_cmd(cmd, false); return -1; } @@ -1250,7 +1250,7 @@ static u32 iscsit_do_crypto_hash_sg( static void iscsit_do_crypto_hash_buf( struct hash_desc *hash, - unsigned char *buf, + const void *buf, u32 payload_length, u32 padding, u8 *pad_bytes, @@ -1743,7 +1743,7 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, return 0; out: if (cmd) - iscsit_release_cmd(cmd); + iscsit_free_cmd(cmd, false); ping_out: kfree(ping_data); return ret; @@ -2251,7 +2251,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { pr_err("Received logout request on connection that" " is not in logged in state, ignoring request.\n"); - iscsit_release_cmd(cmd); + iscsit_free_cmd(cmd, false); return 0; } @@ -2524,9 +2524,8 @@ static int iscsit_send_conn_drop_async_message( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->tx_size += ISCSI_CRC_LEN; pr_debug("Attaching CRC32C HeaderDigest to" @@ -2662,9 +2661,8 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -2841,9 +2839,8 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0], + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -2900,9 +2897,8 @@ static int iscsit_send_unsolicited_nopin( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); tx_size += ISCSI_CRC_LEN; pr_debug("Attaching CRC32C HeaderDigest to" @@ -2949,9 +2945,8 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3040,9 +3035,8 @@ static int iscsit_send_r2t( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3256,9 +3250,8 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3329,9 +3322,8 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3504,9 +3496,8 @@ static int iscsit_send_text_rsp( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3557,11 +3548,11 @@ static int iscsit_send_reject( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { - u32 iov_count = 0, tx_size = 0; - struct iscsi_reject *hdr; + struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; struct kvec *iov; + u32 iov_count = 0, tx_size; - iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]); + iscsit_build_reject(cmd, conn, hdr); iov = &cmd->iov_misc[0]; iov[iov_count].iov_base = cmd->pdu; @@ -3574,9 +3565,8 @@ static int iscsit_send_reject( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3585,9 +3575,8 @@ static int iscsit_send_reject( } if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)&cmd->data_crc); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc); iov[iov_count].iov_base = &cmd->data_crc; iov[iov_count++].iov_len = ISCSI_CRC_LEN; @@ -3676,7 +3665,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, false); break; case ISTATE_SEND_NOPIN_WANT_RESPONSE: iscsit_mod_nopin_response_timer(conn); @@ -4133,7 +4122,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) iscsit_increment_maxcmdsn(cmd, sess); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); } diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 7816af6cdd12..40d9dbca987b 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -823,7 +823,7 @@ static int iscsit_attach_ooo_cmdsn( /* * CmdSN is greater than the tail of the list. */ - if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn) + if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn)) list_add_tail(&ooo_cmdsn->ooo_list, &sess->sess_ooo_cmdsn_list); else { @@ -833,11 +833,12 @@ static int iscsit_attach_ooo_cmdsn( */ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, ooo_list) { - if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) + if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn)) continue; + /* Insert before this entry */ list_add(&ooo_cmdsn->ooo_list, - &ooo_tmp->ooo_list); + ooo_tmp->ooo_list.prev); break; } } diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index ba6091bf93fc..45a5afd5ea13 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -143,7 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_del(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -165,7 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_del(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -248,7 +248,7 @@ void iscsit_discard_cr_cmds_by_expstatsn( iscsit_remove_cmd_from_connection_recovery(cmd, sess); spin_unlock(&cr->conn_recovery_cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -302,7 +302,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); } spin_unlock_bh(&conn->cmd_lock); @@ -355,7 +355,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); continue; } @@ -375,7 +375,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); continue; } diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index f690be9e5293..e38222191a33 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -436,7 +436,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) /* * Extra parameters for ISER from RFC-5046 */ - param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS, + param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS, PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND, USE_LEADING_ONLY); if (!param) @@ -529,7 +529,7 @@ int iscsi_set_keys_to_negotiate( SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, OFMARKINT)) { SET_PSTATE_NEGOTIATE(param); - } else if (!strcmp(param->name, RDMAEXTENTIONS)) { + } else if (!strcmp(param->name, RDMAEXTENSIONS)) { if (iser == true) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { @@ -580,7 +580,7 @@ int iscsi_set_keys_irrelevant_for_discovery( param->state &= ~PSTATE_NEGOTIATE; else if (!strcmp(param->name, OFMARKINT)) param->state &= ~PSTATE_NEGOTIATE; - else if (!strcmp(param->name, RDMAEXTENTIONS)) + else if (!strcmp(param->name, RDMAEXTENSIONS)) param->state &= ~PSTATE_NEGOTIATE; else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) param->state &= ~PSTATE_NEGOTIATE; @@ -758,9 +758,9 @@ static int iscsi_add_notunderstood_response( } INIT_LIST_HEAD(&extra_response->er_list); - strncpy(extra_response->key, key, strlen(key) + 1); - strncpy(extra_response->value, NOTUNDERSTOOD, - strlen(NOTUNDERSTOOD) + 1); + strlcpy(extra_response->key, key, sizeof(extra_response->key)); + strlcpy(extra_response->value, NOTUNDERSTOOD, + sizeof(extra_response->value)); list_add_tail(&extra_response->er_list, ¶m_list->extra_response_list); @@ -1629,8 +1629,6 @@ int iscsi_decode_text_input( if (phase & PHASE_SECURITY) { if (iscsi_check_for_auth_key(key) > 0) { - char *tmpptr = key + strlen(key); - *tmpptr = '='; kfree(tmpbuf); return 1; } @@ -1977,7 +1975,7 @@ void iscsi_set_session_parameters( ops->SessionType = !strcmp(param->value, DISCOVERY); pr_debug("SessionType: %s\n", param->value); - } else if (!strcmp(param->name, RDMAEXTENTIONS)) { + } else if (!strcmp(param->name, RDMAEXTENSIONS)) { ops->RDMAExtensions = !strcmp(param->value, YES); pr_debug("RDMAExtensions: %s\n", param->value); diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h index f31b9c4b83f2..a47046a752aa 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.h +++ b/drivers/target/iscsi/iscsi_target_parameters.h @@ -1,8 +1,10 @@ #ifndef ISCSI_PARAMETERS_H #define ISCSI_PARAMETERS_H +#include <scsi/iscsi_proto.h> + struct iscsi_extra_response { - char key[64]; + char key[KEY_MAXLEN]; char value[32]; struct list_head er_list; } ____cacheline_aligned; @@ -91,7 +93,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, /* * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 */ -#define RDMAEXTENTIONS "RDMAExtensions" +#define RDMAEXTENSIONS "RDMAExtensions" #define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" #define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" @@ -142,7 +144,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, /* * Initial values for iSER parameters following RFC-5046 Section 6 */ -#define INITIAL_RDMAEXTENTIONS NO +#define INITIAL_RDMAEXTENSIONS NO #define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" #define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 2cc6c9a3ffb8..08a3bacef0c5 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -676,40 +676,56 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) void iscsit_release_cmd(struct iscsi_cmd *cmd) { - struct iscsi_conn *conn = cmd->conn; - - iscsit_free_r2ts_from_list(cmd); - iscsit_free_all_datain_reqs(cmd); - kfree(cmd->buf_ptr); kfree(cmd->pdu_list); kfree(cmd->seq_list); kfree(cmd->tmr_req); kfree(cmd->iov_data); - if (conn) { + kmem_cache_free(lio_cmd_cache, cmd); +} + +static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, + bool check_queues) +{ + struct iscsi_conn *conn = cmd->conn; + + if (scsi_cmd) { + if (cmd->data_direction == DMA_TO_DEVICE) { + iscsit_stop_dataout_timer(cmd); + iscsit_free_r2ts_from_list(cmd); + } + if (cmd->data_direction == DMA_FROM_DEVICE) + iscsit_free_all_datain_reqs(cmd); + } + + if (conn && check_queues) { iscsit_remove_cmd_from_immediate_queue(cmd, conn); iscsit_remove_cmd_from_response_queue(cmd, conn); } - - kmem_cache_free(lio_cmd_cache, cmd); } -void iscsit_free_cmd(struct iscsi_cmd *cmd) +void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) { + struct se_cmd *se_cmd = NULL; + int rc; /* * Determine if a struct se_cmd is associated with * this struct iscsi_cmd. */ switch (cmd->iscsi_opcode) { case ISCSI_OP_SCSI_CMD: - if (cmd->data_direction == DMA_TO_DEVICE) - iscsit_stop_dataout_timer(cmd); + se_cmd = &cmd->se_cmd; + __iscsit_free_cmd(cmd, true, shutdown); /* * Fallthrough */ case ISCSI_OP_SCSI_TMFUNC: - transport_generic_free_cmd(&cmd->se_cmd, 1); + rc = transport_generic_free_cmd(&cmd->se_cmd, 1); + if (!rc && shutdown && se_cmd && se_cmd->se_sess) { + __iscsit_free_cmd(cmd, true, shutdown); + target_put_sess_cmd(se_cmd->se_sess, se_cmd); + } break; case ISCSI_OP_REJECT: /* @@ -718,11 +734,19 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd) * associated cmd->se_cmd needs to be released. */ if (cmd->se_cmd.se_tfo != NULL) { - transport_generic_free_cmd(&cmd->se_cmd, 1); + se_cmd = &cmd->se_cmd; + __iscsit_free_cmd(cmd, true, shutdown); + + rc = transport_generic_free_cmd(&cmd->se_cmd, 1); + if (!rc && shutdown && se_cmd->se_sess) { + __iscsit_free_cmd(cmd, true, shutdown); + target_put_sess_cmd(se_cmd->se_sess, se_cmd); + } break; } /* Fall-through */ default: + __iscsit_free_cmd(cmd, false, shutdown); cmd->release_cmd(cmd); break; } diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 4f8e01a47081..a4422659d049 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -29,7 +29,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); extern void iscsit_release_cmd(struct iscsi_cmd *); -extern void iscsit_free_cmd(struct iscsi_cmd *); +extern void iscsit_free_cmd(struct iscsi_cmd *, bool); extern int iscsit_check_session_usage_count(struct iscsi_session *); extern void iscsit_dec_session_usage_count(struct iscsi_session *); extern void iscsit_inc_session_usage_count(struct iscsi_session *); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 43b7ac6c5b1c..4a8bd36d3958 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -1584,6 +1584,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = { .store = target_core_store_dev_udev_path, }; +static ssize_t target_core_show_dev_enable(void *p, char *page) +{ + struct se_device *dev = p; + + return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED)); +} + static ssize_t target_core_store_dev_enable( void *p, const char *page, @@ -1609,8 +1616,8 @@ static ssize_t target_core_store_dev_enable( static struct target_core_configfs_attribute target_core_attr_dev_enable = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "enable", - .ca_mode = S_IWUSR }, - .show = NULL, + .ca_mode = S_IRUGO | S_IWUSR }, + .show = target_core_show_dev_enable, .store = target_core_store_dev_enable, }; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 2e4d655471bc..4630481b6043 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -68,7 +68,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) struct se_dev_entry *deve = se_cmd->se_deve; deve->total_cmds++; - deve->total_bytes += se_cmd->data_length; if ((se_cmd->data_direction == DMA_TO_DEVICE) && (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { @@ -85,8 +84,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) else if (se_cmd->data_direction == DMA_FROM_DEVICE) deve->read_bytes += se_cmd->data_length; - deve->deve_cmds++; - se_lun = deve->se_lun; se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; @@ -275,17 +272,6 @@ int core_free_device_list_for_node( return 0; } -void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) -{ - struct se_dev_entry *deve; - unsigned long flags; - - spin_lock_irqsave(&se_nacl->device_list_lock, flags); - deve = se_nacl->device_list[se_cmd->orig_fe_lun]; - deve->deve_cmds--; - spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); -} - void core_update_device_list_access( u32 mapped_lun, u32 lun_access, diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 58ed683e04ae..b11890d85120 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -153,10 +153,7 @@ static int fd_configure_device(struct se_device *dev) struct request_queue *q = bdev_get_queue(inode->i_bdev); unsigned long long dev_size; - dev->dev_attrib.hw_block_size = - bdev_logical_block_size(inode->i_bdev); - dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); - + fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); /* * Determine the number of bytes from i_size_read() minus * one (1) logical sector from underlying struct block_device @@ -203,9 +200,7 @@ static int fd_configure_device(struct se_device *dev) goto fail; } - dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; - dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; - + fd_dev->fd_block_size = FD_BLOCKSIZE; /* * Limit UNMAP emulation to 8k Number of LBAs (NoLB) */ @@ -224,8 +219,8 @@ static int fd_configure_device(struct se_device *dev) dev->dev_attrib.max_write_same_len = 0x1000; } - fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; - + dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; + dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { @@ -699,11 +694,12 @@ static sector_t fd_get_blocks(struct se_device *dev) * to handle underlying block_device resize operations. */ if (S_ISBLK(i->i_mode)) - dev_size = (i_size_read(i) - fd_dev->fd_block_size); + dev_size = i_size_read(i); else dev_size = fd_dev->fd_dev_size; - return div_u64(dev_size, dev->dev_attrib.block_size); + return div_u64(dev_size - dev->dev_attrib.block_size, + dev->dev_attrib.block_size); } static struct sbc_ops fd_sbc_ops = { diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 07f5f94634bb..aa1620abec6d 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -615,6 +615,8 @@ iblock_execute_rw(struct se_cmd *cmd) rw = WRITE_FUA; else if (!(q->flush_flags & REQ_FLUSH)) rw = WRITE_FUA; + else + rw = WRITE; } else { rw = WRITE; } diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 853bab60e362..18d49df4d0ac 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -8,7 +8,6 @@ extern struct t10_alua_lu_gp *default_lu_gp; struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); int core_free_device_list_for_node(struct se_node_acl *, struct se_portal_group *); -void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); void core_update_device_list_access(u32, u32, struct se_node_acl *); int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32, u32, struct se_node_acl *, struct se_portal_group *); diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index e0b3c379aa14..0921a64b5550 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -291,6 +291,11 @@ rd_execute_rw(struct se_cmd *cmd) u32 src_len; u64 tmp; + if (dev->rd_flags & RDF_NULLIO) { + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; + } + tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; rd_offset = do_div(tmp, PAGE_SIZE); rd_page = tmp; @@ -373,11 +378,12 @@ rd_execute_rw(struct se_cmd *cmd) } enum { - Opt_rd_pages, Opt_err + Opt_rd_pages, Opt_rd_nullio, Opt_err }; static match_table_t tokens = { {Opt_rd_pages, "rd_pages=%d"}, + {Opt_rd_nullio, "rd_nullio=%d"}, {Opt_err, NULL} }; @@ -408,6 +414,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev, " Count: %u\n", rd_dev->rd_page_count); rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; break; + case Opt_rd_nullio: + match_int(args, &arg); + if (arg != 1) + break; + + pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg); + rd_dev->rd_flags |= RDF_NULLIO; + break; default: break; } @@ -424,8 +438,9 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", rd_dev->rd_dev_id); bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" - " SG_table_count: %u\n", rd_dev->rd_page_count, - PAGE_SIZE, rd_dev->sg_table_count); + " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count, + PAGE_SIZE, rd_dev->sg_table_count, + !!(rd_dev->rd_flags & RDF_NULLIO)); return bl; } diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 933b38b6e563..1789d1e14395 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -22,6 +22,7 @@ struct rd_dev_sg_table { } ____cacheline_aligned; #define RDF_HAS_PAGE_COUNT 0x01 +#define RDF_NULLIO 0x02 struct rd_dev { struct se_device dev; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index f8388b4024aa..21e315874a54 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -65,7 +65,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev); static int transport_generic_get_mem(struct se_cmd *cmd); -static void transport_put_cmd(struct se_cmd *cmd); +static int transport_put_cmd(struct se_cmd *cmd); static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) @@ -221,6 +221,7 @@ struct se_session *transport_init_session(void) INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); INIT_LIST_HEAD(&se_sess->sess_cmd_list); + INIT_LIST_HEAD(&se_sess->sess_wait_list); spin_lock_init(&se_sess->sess_cmd_lock); kref_init(&se_sess->sess_kref); @@ -1943,7 +1944,7 @@ static inline void transport_free_pages(struct se_cmd *cmd) * This routine unconditionally frees a command, and reference counting * or list removal must be done in the caller. */ -static void transport_release_cmd(struct se_cmd *cmd) +static int transport_release_cmd(struct se_cmd *cmd) { BUG_ON(!cmd->se_tfo); @@ -1955,11 +1956,11 @@ static void transport_release_cmd(struct se_cmd *cmd) * If this cmd has been setup with target_get_sess_cmd(), drop * the kref and call ->release_cmd() in kref callback. */ - if (cmd->check_release != 0) { - target_put_sess_cmd(cmd->se_sess, cmd); - return; - } + if (cmd->check_release != 0) + return target_put_sess_cmd(cmd->se_sess, cmd); + cmd->se_tfo->release_cmd(cmd); + return 1; } /** @@ -1968,7 +1969,7 @@ static void transport_release_cmd(struct se_cmd *cmd) * * This routine releases our reference to the command and frees it if possible. */ -static void transport_put_cmd(struct se_cmd *cmd) +static int transport_put_cmd(struct se_cmd *cmd) { unsigned long flags; @@ -1976,7 +1977,7 @@ static void transport_put_cmd(struct se_cmd *cmd) if (atomic_read(&cmd->t_fe_count) && !atomic_dec_and_test(&cmd->t_fe_count)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; + return 0; } if (cmd->transport_state & CMD_T_DEV_ACTIVE) { @@ -1986,8 +1987,7 @@ static void transport_put_cmd(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_free_pages(cmd); - transport_release_cmd(cmd); - return; + return transport_release_cmd(cmd); } void *transport_kmap_data_sg(struct se_cmd *cmd) @@ -2152,24 +2152,25 @@ static void transport_write_pending_qf(struct se_cmd *cmd) } } -void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) +int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { + int ret = 0; + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) transport_wait_for_tasks(cmd); - transport_release_cmd(cmd); + ret = transport_release_cmd(cmd); } else { if (wait_for_tasks) transport_wait_for_tasks(cmd); - core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); - if (cmd->se_lun) transport_lun_remove_cmd(cmd); - transport_put_cmd(cmd); + ret = transport_put_cmd(cmd); } + return ret; } EXPORT_SYMBOL(transport_generic_free_cmd); @@ -2213,21 +2214,19 @@ static void target_release_cmd_kref(struct kref *kref) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); struct se_session *se_sess = se_cmd->se_sess; - unsigned long flags; - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (list_empty(&se_cmd->se_cmd_list)) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + spin_unlock(&se_sess->sess_cmd_lock); se_cmd->se_tfo->release_cmd(se_cmd); return; } if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + spin_unlock(&se_sess->sess_cmd_lock); complete(&se_cmd->cmd_wait_comp); return; } list_del(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + spin_unlock(&se_sess->sess_cmd_lock); se_cmd->se_tfo->release_cmd(se_cmd); } @@ -2238,7 +2237,8 @@ static void target_release_cmd_kref(struct kref *kref) */ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) { - return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); + return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, + &se_sess->sess_cmd_lock); } EXPORT_SYMBOL(target_put_sess_cmd); @@ -2253,11 +2253,14 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) unsigned long flags; spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - - WARN_ON(se_sess->sess_tearing_down); + if (se_sess->sess_tearing_down) { + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + return; + } se_sess->sess_tearing_down = 1; + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); - list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) + list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) se_cmd->cmd_wait_set = 1; spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); @@ -2266,44 +2269,32 @@ EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); /* target_wait_for_sess_cmds - Wait for outstanding descriptors * @se_sess: session to wait for active I/O - * @wait_for_tasks: Make extra transport_wait_for_tasks call */ -void target_wait_for_sess_cmds( - struct se_session *se_sess, - int wait_for_tasks) +void target_wait_for_sess_cmds(struct se_session *se_sess) { struct se_cmd *se_cmd, *tmp_cmd; - bool rc = false; + unsigned long flags; list_for_each_entry_safe(se_cmd, tmp_cmd, - &se_sess->sess_cmd_list, se_cmd_list) { + &se_sess->sess_wait_list, se_cmd_list) { list_del(&se_cmd->se_cmd_list); pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" " %d\n", se_cmd, se_cmd->t_state, se_cmd->se_tfo->get_cmd_state(se_cmd)); - if (wait_for_tasks) { - pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," - " fabric state: %d\n", se_cmd, se_cmd->t_state, - se_cmd->se_tfo->get_cmd_state(se_cmd)); - - rc = transport_wait_for_tasks(se_cmd); - - pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," - " fabric state: %d\n", se_cmd, se_cmd->t_state, - se_cmd->se_tfo->get_cmd_state(se_cmd)); - } - - if (!rc) { - wait_for_completion(&se_cmd->cmd_wait_comp); - pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" - " fabric state: %d\n", se_cmd, se_cmd->t_state, - se_cmd->se_tfo->get_cmd_state(se_cmd)); - } + wait_for_completion(&se_cmd->cmd_wait_comp); + pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" + " fabric state: %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); se_cmd->se_tfo->release_cmd(se_cmd); } + + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + WARN_ON(!list_empty(&se_sess->sess_cmd_list)); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + } EXPORT_SYMBOL(target_wait_for_sess_cmds); diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c index 5b4d75fd7b49..54ffd64ca3f7 100644 --- a/drivers/thermal/armada_thermal.c +++ b/drivers/thermal/armada_thermal.c @@ -169,21 +169,11 @@ static int armada_thermal_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Failed to get platform resource\n"); - return -ENODEV; - } - priv->sensor = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->sensor)) return PTR_ERR(priv->sensor); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(&pdev->dev, "Failed to get platform resource\n"); - return -ENODEV; - } - priv->control = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->control)) return PTR_ERR(priv->control); diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c index 4b15a5f270dc..a088d1365ca5 100644 --- a/drivers/thermal/dove_thermal.c +++ b/drivers/thermal/dove_thermal.c @@ -149,10 +149,6 @@ static int dove_thermal_probe(struct platform_device *pdev) return PTR_ERR(priv->sensor); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(&pdev->dev, "Failed to get platform resource\n"); - return -ENODEV; - } priv->control = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->control)) return PTR_ERR(priv->control); diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c index d20ce9e61403..788b1ddcac6c 100644 --- a/drivers/thermal/exynos_thermal.c +++ b/drivers/thermal/exynos_thermal.c @@ -925,11 +925,6 @@ static int exynos_tmu_probe(struct platform_device *pdev) INIT_WORK(&data->irq_work, exynos_tmu_work); data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!data->mem) { - dev_err(&pdev->dev, "Failed to get platform resource\n"); - return -ENOENT; - } - data->base = devm_ioremap_resource(&pdev->dev, data->mem); if (IS_ERR(data->base)) return PTR_ERR(data->base); diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c index 6d0c27cd03da..9bffcec5ad82 100644 --- a/drivers/tty/ehv_bytechan.c +++ b/drivers/tty/ehv_bytechan.c @@ -859,6 +859,7 @@ error: */ static void __exit ehv_bc_exit(void) { + platform_driver_unregister(&ehv_bc_tty_driver); tty_unregister_driver(ehv_bc_driver); put_tty_driver(ehv_bc_driver); kfree(bcs); diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 71d6eb2c93b1..4c4a23674569 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -1618,8 +1618,12 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp) if (ip->type == PORT_16550A) me->fifo[p] = 1; - opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2); - opmode &= OP_MODE_MASK; + if (ip->board->chip_flag == MOXA_MUST_MU860_HWID) { + opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2); + opmode &= OP_MODE_MASK; + } else { + opmode = RS232_MODE; + } me->iftype[p] = opmode; mutex_unlock(&port->mutex); } @@ -1676,6 +1680,9 @@ static int mxser_ioctl(struct tty_struct *tty, int shiftbit; unsigned char val, mask; + if (info->board->chip_flag != MOXA_MUST_MU860_HWID) + return -EFAULT; + p = tty->index % 4; if (cmd == MOXA_SET_OP_MODE) { if (get_user(opmode, (int __user *) argp)) diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index d655416087b7..6c7fe90ad72d 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1573,6 +1573,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) ldata->real_raw = 0; } n_tty_set_room(tty); + /* + * Fix tty hang when I_IXON(tty) is cleared, but the tty + * been stopped by STOP_CHAR(tty) before it. + */ + if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { + start_tty(tty); + } + /* The termios change make the tty ready for I/O */ wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->read_wait); diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index 82d35c5a58fd..354564ea47c5 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -150,12 +150,14 @@ static Word_t aiop_intr_bits[AIOP_CTL_SIZE] = { AIOP_INTR_BIT_3 }; +#ifdef CONFIG_PCI static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = { UPCI_AIOP_INTR_BIT_0, UPCI_AIOP_INTR_BIT_1, UPCI_AIOP_INTR_BIT_2, UPCI_AIOP_INTR_BIT_3 }; +#endif static Byte_t RData[RDATASIZE] = { 0x00, 0x09, 0xf6, 0x82, @@ -227,7 +229,6 @@ static unsigned long nextLineNumber; static int __init init_ISA(int i); static void rp_wait_until_sent(struct tty_struct *tty, int timeout); static void rp_flush_buffer(struct tty_struct *tty); -static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model); static unsigned char GetLineNumber(int ctrl, int aiop, int ch); static unsigned char SetLineNumber(int ctrl, int aiop, int ch); static void rp_start(struct tty_struct *tty); @@ -241,11 +242,6 @@ static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags); static void sModemReset(CONTROLLER_T * CtlP, int chan, int on); static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on); static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data); -static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, - ByteIO_t * AiopIOList, int AiopIOListSize, - WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, - int PeriodicOnly, int altChanRingIndicator, - int UPCIRingInd); static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO, ByteIO_t * AiopIOList, int AiopIOListSize, int IRQNum, Byte_t Frequency, int PeriodicOnly); @@ -1775,6 +1771,145 @@ static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = { }; MODULE_DEVICE_TABLE(pci, rocket_pci_ids); +/* Resets the speaker controller on RocketModem II and III devices */ +static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model) +{ + ByteIO_t addr; + + /* RocketModem II speaker control is at the 8th port location of offset 0x40 */ + if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) { + addr = CtlP->AiopIO[0] + 0x4F; + sOutB(addr, 0); + } + + /* RocketModem III speaker control is at the 1st port location of offset 0x80 */ + if ((model == MODEL_UPCI_RM3_8PORT) + || (model == MODEL_UPCI_RM3_4PORT)) { + addr = CtlP->AiopIO[0] + 0x88; + sOutB(addr, 0); + } +} + +/*************************************************************************** +Function: sPCIInitController +Purpose: Initialization of controller global registers and controller + structure. +Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize, + IRQNum,Frequency,PeriodicOnly) + CONTROLLER_T *CtlP; Ptr to controller structure + int CtlNum; Controller number + ByteIO_t *AiopIOList; List of I/O addresses for each AIOP. + This list must be in the order the AIOPs will be found on the + controller. Once an AIOP in the list is not found, it is + assumed that there are no more AIOPs on the controller. + int AiopIOListSize; Number of addresses in AiopIOList + int IRQNum; Interrupt Request number. Can be any of the following: + 0: Disable global interrupts + 3: IRQ 3 + 4: IRQ 4 + 5: IRQ 5 + 9: IRQ 9 + 10: IRQ 10 + 11: IRQ 11 + 12: IRQ 12 + 15: IRQ 15 + Byte_t Frequency: A flag identifying the frequency + of the periodic interrupt, can be any one of the following: + FREQ_DIS - periodic interrupt disabled + FREQ_137HZ - 137 Hertz + FREQ_69HZ - 69 Hertz + FREQ_34HZ - 34 Hertz + FREQ_17HZ - 17 Hertz + FREQ_9HZ - 9 Hertz + FREQ_4HZ - 4 Hertz + If IRQNum is set to 0 the Frequency parameter is + overidden, it is forced to a value of FREQ_DIS. + int PeriodicOnly: 1 if all interrupts except the periodic + interrupt are to be blocked. + 0 is both the periodic interrupt and + other channel interrupts are allowed. + If IRQNum is set to 0 the PeriodicOnly parameter is + overidden, it is forced to a value of 0. +Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller + initialization failed. + +Comments: + If periodic interrupts are to be disabled but AIOP interrupts + are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0. + + If interrupts are to be completely disabled set IRQNum to 0. + + Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an + invalid combination. + + This function performs initialization of global interrupt modes, + but it does not actually enable global interrupts. To enable + and disable global interrupts use functions sEnGlobalInt() and + sDisGlobalInt(). Enabling of global interrupts is normally not + done until all other initializations are complete. + + Even if interrupts are globally enabled, they must also be + individually enabled for each channel that is to generate + interrupts. + +Warnings: No range checking on any of the parameters is done. + + No context switches are allowed while executing this function. + + After this function all AIOPs on the controller are disabled, + they can be enabled with sEnAiop(). +*/ +static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, + ByteIO_t * AiopIOList, int AiopIOListSize, + WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, + int PeriodicOnly, int altChanRingIndicator, + int UPCIRingInd) +{ + int i; + ByteIO_t io; + + CtlP->AltChanRingIndicator = altChanRingIndicator; + CtlP->UPCIRingInd = UPCIRingInd; + CtlP->CtlNum = CtlNum; + CtlP->CtlID = CTLID_0001; /* controller release 1 */ + CtlP->BusType = isPCI; /* controller release 1 */ + + if (ConfigIO) { + CtlP->isUPCI = 1; + CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL; + CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL; + CtlP->AiopIntrBits = upci_aiop_intr_bits; + } else { + CtlP->isUPCI = 0; + CtlP->PCIIO = + (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC); + CtlP->AiopIntrBits = aiop_intr_bits; + } + + sPCIControllerEOI(CtlP); /* clear EOI if warm init */ + /* Init AIOPs */ + CtlP->NumAiop = 0; + for (i = 0; i < AiopIOListSize; i++) { + io = AiopIOList[i]; + CtlP->AiopIO[i] = (WordIO_t) io; + CtlP->AiopIntChanIO[i] = io + _INT_CHAN; + + CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */ + if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ + break; /* done looking for AIOPs */ + + CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */ + sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */ + sOutB(io + _INDX_DATA, sClockPrescale); + CtlP->NumAiop++; /* bump count of AIOPs */ + } + + if (CtlP->NumAiop == 0) + return (-1); + else + return (CtlP->NumAiop); +} + /* * Called when a PCI card is found. Retrieves and stores model information, * init's aiopic and serial port hardware. @@ -2519,147 +2654,6 @@ static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO, return (CtlP->NumAiop); } -#ifdef CONFIG_PCI -/*************************************************************************** -Function: sPCIInitController -Purpose: Initialization of controller global registers and controller - structure. -Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize, - IRQNum,Frequency,PeriodicOnly) - CONTROLLER_T *CtlP; Ptr to controller structure - int CtlNum; Controller number - ByteIO_t *AiopIOList; List of I/O addresses for each AIOP. - This list must be in the order the AIOPs will be found on the - controller. Once an AIOP in the list is not found, it is - assumed that there are no more AIOPs on the controller. - int AiopIOListSize; Number of addresses in AiopIOList - int IRQNum; Interrupt Request number. Can be any of the following: - 0: Disable global interrupts - 3: IRQ 3 - 4: IRQ 4 - 5: IRQ 5 - 9: IRQ 9 - 10: IRQ 10 - 11: IRQ 11 - 12: IRQ 12 - 15: IRQ 15 - Byte_t Frequency: A flag identifying the frequency - of the periodic interrupt, can be any one of the following: - FREQ_DIS - periodic interrupt disabled - FREQ_137HZ - 137 Hertz - FREQ_69HZ - 69 Hertz - FREQ_34HZ - 34 Hertz - FREQ_17HZ - 17 Hertz - FREQ_9HZ - 9 Hertz - FREQ_4HZ - 4 Hertz - If IRQNum is set to 0 the Frequency parameter is - overidden, it is forced to a value of FREQ_DIS. - int PeriodicOnly: 1 if all interrupts except the periodic - interrupt are to be blocked. - 0 is both the periodic interrupt and - other channel interrupts are allowed. - If IRQNum is set to 0 the PeriodicOnly parameter is - overidden, it is forced to a value of 0. -Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller - initialization failed. - -Comments: - If periodic interrupts are to be disabled but AIOP interrupts - are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0. - - If interrupts are to be completely disabled set IRQNum to 0. - - Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an - invalid combination. - - This function performs initialization of global interrupt modes, - but it does not actually enable global interrupts. To enable - and disable global interrupts use functions sEnGlobalInt() and - sDisGlobalInt(). Enabling of global interrupts is normally not - done until all other initializations are complete. - - Even if interrupts are globally enabled, they must also be - individually enabled for each channel that is to generate - interrupts. - -Warnings: No range checking on any of the parameters is done. - - No context switches are allowed while executing this function. - - After this function all AIOPs on the controller are disabled, - they can be enabled with sEnAiop(). -*/ -static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, - ByteIO_t * AiopIOList, int AiopIOListSize, - WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, - int PeriodicOnly, int altChanRingIndicator, - int UPCIRingInd) -{ - int i; - ByteIO_t io; - - CtlP->AltChanRingIndicator = altChanRingIndicator; - CtlP->UPCIRingInd = UPCIRingInd; - CtlP->CtlNum = CtlNum; - CtlP->CtlID = CTLID_0001; /* controller release 1 */ - CtlP->BusType = isPCI; /* controller release 1 */ - - if (ConfigIO) { - CtlP->isUPCI = 1; - CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL; - CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL; - CtlP->AiopIntrBits = upci_aiop_intr_bits; - } else { - CtlP->isUPCI = 0; - CtlP->PCIIO = - (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC); - CtlP->AiopIntrBits = aiop_intr_bits; - } - - sPCIControllerEOI(CtlP); /* clear EOI if warm init */ - /* Init AIOPs */ - CtlP->NumAiop = 0; - for (i = 0; i < AiopIOListSize; i++) { - io = AiopIOList[i]; - CtlP->AiopIO[i] = (WordIO_t) io; - CtlP->AiopIntChanIO[i] = io + _INT_CHAN; - - CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */ - if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ - break; /* done looking for AIOPs */ - - CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */ - sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */ - sOutB(io + _INDX_DATA, sClockPrescale); - CtlP->NumAiop++; /* bump count of AIOPs */ - } - - if (CtlP->NumAiop == 0) - return (-1); - else - return (CtlP->NumAiop); -} - -/* Resets the speaker controller on RocketModem II and III devices */ -static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model) -{ - ByteIO_t addr; - - /* RocketModem II speaker control is at the 8th port location of offset 0x40 */ - if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) { - addr = CtlP->AiopIO[0] + 0x4F; - sOutB(addr, 0); - } - - /* RocketModem III speaker control is at the 1st port location of offset 0x80 */ - if ((model == MODEL_UPCI_RM3_8PORT) - || (model == MODEL_UPCI_RM3_4PORT)) { - addr = CtlP->AiopIO[0] + 0x88; - sOutB(addr, 0); - } -} -#endif - /*************************************************************************** Function: sReadAiopID Purpose: Read the AIOP idenfication number directly from an AIOP. diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 46528d57be72..86c00b1c5583 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -2755,7 +2755,7 @@ static void __init serial8250_isa_init_ports(void) if (nr_uarts > UART_NR) nr_uarts = UART_NR; - for (i = 0; i < UART_NR; i++) { + for (i = 0; i < nr_uarts; i++) { struct uart_8250_port *up = &serial8250_ports[i]; struct uart_port *port = &up->port; @@ -2916,7 +2916,7 @@ static int __init serial8250_console_setup(struct console *co, char *options) * if so, search for the first available port that does have * console support. */ - if (co->index >= UART_NR) + if (co->index >= nr_uarts) co->index = 0; port = &serial8250_ports[co->index].port; if (!port->iobase && !port->membase) @@ -2957,7 +2957,7 @@ int serial8250_find_port(struct uart_port *p) int line; struct uart_port *port; - for (line = 0; line < UART_NR; line++) { + for (line = 0; line < nr_uarts; line++) { port = &serial8250_ports[line].port; if (uart_match_port(p, port)) return line; @@ -3110,7 +3110,7 @@ static int serial8250_remove(struct platform_device *dev) { int i; - for (i = 0; i < UART_NR; i++) { + for (i = 0; i < nr_uarts; i++) { struct uart_8250_port *up = &serial8250_ports[i]; if (up->port.dev == &dev->dev) @@ -3178,7 +3178,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port * /* * First, find a port entry which matches. */ - for (i = 0; i < UART_NR; i++) + for (i = 0; i < nr_uarts; i++) if (uart_match_port(&serial8250_ports[i].port, port)) return &serial8250_ports[i]; @@ -3187,7 +3187,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port * * free entry. We look for one which hasn't been previously * used (indicated by zero iobase). */ - for (i = 0; i < UART_NR; i++) + for (i = 0; i < nr_uarts; i++) if (serial8250_ports[i].port.type == PORT_UNKNOWN && serial8250_ports[i].port.iobase == 0) return &serial8250_ports[i]; @@ -3196,7 +3196,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port * * That also failed. Last resort is to find any entry which * doesn't have a real port associated with it. */ - for (i = 0; i < UART_NR; i++) + for (i = 0; i < nr_uarts; i++) if (serial8250_ports[i].port.type == PORT_UNKNOWN) return &serial8250_ports[i]; diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index beaa283f5cc6..d07b6af3a937 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -338,7 +338,8 @@ static int dw8250_runtime_suspend(struct device *dev) { struct dw8250_data *data = dev_get_drvdata(dev); - clk_disable_unprepare(data->clk); + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); return 0; } @@ -347,7 +348,8 @@ static int dw8250_runtime_resume(struct device *dev) { struct dw8250_data *data = dev_get_drvdata(dev); - clk_prepare_enable(data->clk); + if (!IS_ERR(data->clk)) + clk_prepare_enable(data->clk); return 0; } @@ -367,6 +369,7 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match); static const struct acpi_device_id dw8250_acpi_match[] = { { "INT33C4", 0 }, { "INT33C5", 0 }, + { "80860F0A", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 8ab70a620919..e2774f9ecd59 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -332,7 +332,7 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port * dmaengine_slave_config(chan, &rx_conf); uap->dmarx.chan = chan; - if (plat->dma_rx_poll_enable) { + if (plat && plat->dma_rx_poll_enable) { /* Set poll rate if specified. */ if (plat->dma_rx_poll_rate) { uap->dmarx.auto_poll_rate = false; diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 147c9e193595..8cdfbd365892 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -761,6 +761,8 @@ static int imx_startup(struct uart_port *port) temp = readl(sport->port.membase + UCR2); temp |= (UCR2_RXEN | UCR2_TXEN); + if (!sport->have_rtscts) + temp |= UCR2_IRTS; writel(temp, sport->port.membase + UCR2); if (USE_IRDA(sport)) { diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c index e956377a38fe..65be0c00c4bf 100644 --- a/drivers/tty/serial/mcf.c +++ b/drivers/tty/serial/mcf.c @@ -707,8 +707,10 @@ static int __init mcf_init(void) if (rc) return rc; rc = platform_driver_register(&mcf_platform_driver); - if (rc) + if (rc) { + uart_unregister_driver(&mcf_driver); return rc; + } return 0; } diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c index 018bad922554..f51b280f3bf2 100644 --- a/drivers/tty/serial/mpc52xx_uart.c +++ b/drivers/tty/serial/mpc52xx_uart.c @@ -1497,18 +1497,23 @@ mpc52xx_uart_init(void) if (psc_ops && psc_ops->fifoc_init) { ret = psc_ops->fifoc_init(); if (ret) - return ret; + goto err_init; } ret = platform_driver_register(&mpc52xx_uart_of_driver); if (ret) { printk(KERN_ERR "%s: platform_driver_register failed (%i)\n", __FILE__, ret); - uart_unregister_driver(&mpc52xx_uart_driver); - return ret; + goto err_reg; } return 0; +err_reg: + if (psc_ops && psc_ops->fifoc_uninit) + psc_ops->fifoc_uninit(); +err_init: + uart_unregister_driver(&mpc52xx_uart_driver); + return ret; } static void __exit diff --git a/drivers/tty/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c index 77287c54f331..549c70a2a63e 100644 --- a/drivers/tty/serial/nwpserial.c +++ b/drivers/tty/serial/nwpserial.c @@ -199,7 +199,7 @@ static void nwpserial_shutdown(struct uart_port *port) dcr_write(up->dcr_host, UART_IER, up->ier); /* free irq */ - free_irq(up->port.irq, port); + free_irq(up->port.irq, up); } static int nwpserial_verify_port(struct uart_port *port, diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 30d4f7a783cd..f0b9f6b52b32 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -202,26 +202,6 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up) return pdata->get_context_loss_count(up->dev); } -static void serial_omap_set_forceidle(struct uart_omap_port *up) -{ - struct omap_uart_port_info *pdata = up->dev->platform_data; - - if (!pdata || !pdata->set_forceidle) - return; - - pdata->set_forceidle(up->dev); -} - -static void serial_omap_set_noidle(struct uart_omap_port *up) -{ - struct omap_uart_port_info *pdata = up->dev->platform_data; - - if (!pdata || !pdata->set_noidle) - return; - - pdata->set_noidle(up->dev); -} - static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable) { struct omap_uart_port_info *pdata = up->dev->platform_data; @@ -298,8 +278,6 @@ static void serial_omap_stop_tx(struct uart_port *port) serial_out(up, UART_IER, up->ier); } - serial_omap_set_forceidle(up); - pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); } @@ -364,7 +342,6 @@ static void serial_omap_start_tx(struct uart_port *port) pm_runtime_get_sync(up->dev); serial_omap_enable_ier_thri(up); - serial_omap_set_noidle(up); pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); } diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 074b9194144f..0c8a9fa2be6c 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1166,6 +1166,18 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, ourport->tx_irq = ret; ourport->clk = clk_get(&platdev->dev, "uart"); + if (IS_ERR(ourport->clk)) { + pr_err("%s: Controller clock not found\n", + dev_name(&platdev->dev)); + return PTR_ERR(ourport->clk); + } + + ret = clk_prepare_enable(ourport->clk); + if (ret) { + pr_err("uart: clock failed to prepare+enable: %d\n", ret); + clk_put(ourport->clk); + return ret; + } /* Keep all interrupts masked and cleared */ if (s3c24xx_serial_has_interrupt_mask(port)) { @@ -1180,6 +1192,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, /* reset the fifos (and setup the uart) */ s3c24xx_serial_resetport(port, cfg); + clk_disable_unprepare(ourport->clk); return 0; } @@ -1803,6 +1816,7 @@ static int __init s3c24xx_serial_modinit(void) static void __exit s3c24xx_serial_modexit(void) { + platform_driver_unregister(&samsung_serial_driver); uart_unregister_driver(&s3c24xx_uart_drv); } diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c index 6953dc82850c..a4fdce74f883 100644 --- a/drivers/tty/tty_audit.c +++ b/drivers/tty/tty_audit.c @@ -60,24 +60,22 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf) tty_audit_buf_free(buf); } -static void tty_audit_log(const char *description, struct task_struct *tsk, - kuid_t loginuid, unsigned sessionid, int major, - int minor, unsigned char *data, size_t size) +static void tty_audit_log(const char *description, int major, int minor, + unsigned char *data, size_t size) { struct audit_buffer *ab; + struct task_struct *tsk = current; + uid_t uid = from_kuid(&init_user_ns, task_uid(tsk)); + uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk)); + u32 sessionid = audit_get_sessionid(tsk); ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); if (ab) { char name[sizeof(tsk->comm)]; - kuid_t uid = task_uid(tsk); - - audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u " - "major=%d minor=%d comm=", description, - tsk->pid, - from_kuid(&init_user_ns, uid), - from_kuid(&init_user_ns, loginuid), - sessionid, - major, minor); + + audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d" + " minor=%d comm=", description, tsk->pid, uid, + loginuid, sessionid, major, minor); get_task_comm(name, tsk); audit_log_untrustedstring(ab, name); audit_log_format(ab, " data="); @@ -90,11 +88,9 @@ static void tty_audit_log(const char *description, struct task_struct *tsk, * tty_audit_buf_push - Push buffered data out * * Generate an audit message from the contents of @buf, which is owned by - * @tsk with @loginuid. @buf->mutex must be locked. + * the current task. @buf->mutex must be locked. */ -static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid, - unsigned int sessionid, - struct tty_audit_buf *buf) +static void tty_audit_buf_push(struct tty_audit_buf *buf) { if (buf->valid == 0) return; @@ -102,25 +98,11 @@ static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid, buf->valid = 0; return; } - tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor, - buf->data, buf->valid); + tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid); buf->valid = 0; } /** - * tty_audit_buf_push_current - Push buffered data out - * - * Generate an audit message from the contents of @buf, which is owned by - * the current task. @buf->mutex must be locked. - */ -static void tty_audit_buf_push_current(struct tty_audit_buf *buf) -{ - kuid_t auid = audit_get_loginuid(current); - unsigned int sessionid = audit_get_sessionid(current); - tty_audit_buf_push(current, auid, sessionid, buf); -} - -/** * tty_audit_exit - Handle a task exit * * Make sure all buffered data is written out and deallocate the buffer. @@ -130,15 +112,13 @@ void tty_audit_exit(void) { struct tty_audit_buf *buf; - spin_lock_irq(¤t->sighand->siglock); buf = current->signal->tty_audit_buf; current->signal->tty_audit_buf = NULL; - spin_unlock_irq(¤t->sighand->siglock); if (!buf) return; mutex_lock(&buf->mutex); - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -151,9 +131,8 @@ void tty_audit_exit(void) */ void tty_audit_fork(struct signal_struct *sig) { - spin_lock_irq(¤t->sighand->siglock); sig->audit_tty = current->signal->audit_tty; - spin_unlock_irq(¤t->sighand->siglock); + sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd; } /** @@ -163,20 +142,21 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch) { struct tty_audit_buf *buf; int major, minor, should_audit; + unsigned long flags; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); should_audit = current->signal->audit_tty; buf = current->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); major = tty->driver->major; minor = tty->driver->minor_start + tty->index; if (buf) { mutex_lock(&buf->mutex); if (buf->major == major && buf->minor == minor) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } @@ -187,24 +167,20 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch) auid = audit_get_loginuid(current); sessionid = audit_get_sessionid(current); - tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major, - minor, &ch, 1); + tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1); } } /** - * tty_audit_push_task - Flush task's pending audit data - * @tsk: task pointer - * @loginuid: sender login uid - * @sessionid: sender session id + * tty_audit_push_current - Flush current's pending audit data * - * Called with a ref on @tsk held. Try to lock sighand and get a - * reference to the tty audit buffer if available. + * Try to lock sighand and get a reference to the tty audit buffer if available. * Flush the buffer or return an appropriate error code. */ -int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid) +int tty_audit_push_current(void) { struct tty_audit_buf *buf = ERR_PTR(-EPERM); + struct task_struct *tsk = current; unsigned long flags; if (!lock_task_sighand(tsk, &flags)) @@ -225,7 +201,7 @@ int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid) return PTR_ERR(buf); mutex_lock(&buf->mutex); - tty_audit_buf_push(tsk, loginuid, sessionid, buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -243,10 +219,11 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, unsigned icanon) { struct tty_audit_buf *buf, *buf2; + unsigned long flags; buf = NULL; buf2 = NULL; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (likely(!current->signal->audit_tty)) goto out; buf = current->signal->tty_audit_buf; @@ -254,7 +231,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, atomic_inc(&buf->count); goto out; } - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); buf2 = tty_audit_buf_alloc(tty->driver->major, tty->driver->minor_start + tty->index, @@ -264,7 +241,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, return NULL; } - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (!current->signal->audit_tty) goto out; buf = current->signal->tty_audit_buf; @@ -276,7 +253,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, atomic_inc(&buf->count); /* Fall through */ out: - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (buf2) tty_audit_buf_free(buf2); return buf; @@ -292,10 +269,18 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, { struct tty_audit_buf *buf; int major, minor; + int audit_log_tty_passwd; + unsigned long flags; if (unlikely(size == 0)) return; + spin_lock_irqsave(¤t->sighand->siglock, flags); + audit_log_tty_passwd = current->signal->audit_tty_log_passwd; + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + if (!audit_log_tty_passwd && icanon && !L_ECHO(tty)) + return; + if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) return; @@ -309,7 +294,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, minor = tty->driver->minor_start + tty->index; if (buf->major != major || buf->minor != minor || buf->icanon != icanon) { - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); buf->major = major; buf->minor = minor; buf->icanon = icanon; @@ -325,7 +310,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, data += run; size -= run; if (buf->valid == N_TTY_BUF_SIZE) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); } while (size != 0); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -339,16 +324,17 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, void tty_audit_push(struct tty_struct *tty) { struct tty_audit_buf *buf; + unsigned long flags; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (likely(!current->signal->audit_tty)) { - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); return; } buf = current->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (buf) { int major, minor; @@ -357,7 +343,7 @@ void tty_audit_push(struct tty_struct *tty) minor = tty->driver->minor_start + tty->index; mutex_lock(&buf->mutex); if (buf->major == major && buf->minor == minor) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index fbd447b390f7..740202d8a5c4 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -779,7 +779,6 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ con_set_default_unimap(vc); vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) { - tty_port_destroy(&vc->port); kfree(vc); vc_cons[currcons].d = NULL; return -ENOMEM; @@ -986,26 +985,25 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws) return ret; } -void vc_deallocate(unsigned int currcons) +struct vc_data *vc_deallocate(unsigned int currcons) { + struct vc_data *vc = NULL; + WARN_CONSOLE_UNLOCKED(); if (vc_cons_allocated(currcons)) { - struct vc_data *vc = vc_cons[currcons].d; - struct vt_notifier_param param = { .vc = vc }; + struct vt_notifier_param param; + param.vc = vc = vc_cons[currcons].d; atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m); vcs_remove_sysfs(currcons); vc->vc_sw->con_deinit(vc); put_pid(vc->vt_pid); module_put(vc->vc_sw->owner); kfree(vc->vc_screenbuf); - if (currcons >= MIN_NR_CONSOLES) { - tty_port_destroy(&vc->port); - kfree(vc); - } vc_cons[currcons].d = NULL; } + return vc; } /* diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 98ff1735eafc..fc2c06c66e89 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -283,6 +283,51 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_ return 0; } +/* deallocate a single console, if possible (leave 0) */ +static int vt_disallocate(unsigned int vc_num) +{ + struct vc_data *vc = NULL; + int ret = 0; + + if (!vc_num) + return 0; + + console_lock(); + if (VT_BUSY(vc_num)) + ret = -EBUSY; + else + vc = vc_deallocate(vc_num); + console_unlock(); + + if (vc && vc_num >= MIN_NR_CONSOLES) { + tty_port_destroy(&vc->port); + kfree(vc); + } + + return ret; +} + +/* deallocate all unused consoles, but leave 0 */ +static void vt_disallocate_all(void) +{ + struct vc_data *vc[MAX_NR_CONSOLES]; + int i; + + console_lock(); + for (i = 1; i < MAX_NR_CONSOLES; i++) + if (!VT_BUSY(i)) + vc[i] = vc_deallocate(i); + else + vc[i] = NULL; + console_unlock(); + + for (i = 1; i < MAX_NR_CONSOLES; i++) { + if (vc[i] && i >= MIN_NR_CONSOLES) { + tty_port_destroy(&vc[i]->port); + kfree(vc[i]); + } + } +} /* @@ -769,24 +814,10 @@ int vt_ioctl(struct tty_struct *tty, ret = -ENXIO; break; } - if (arg == 0) { - /* deallocate all unused consoles, but leave 0 */ - console_lock(); - for (i=1; i<MAX_NR_CONSOLES; i++) - if (! VT_BUSY(i)) - vc_deallocate(i); - console_unlock(); - } else { - /* deallocate a single console, if possible */ - arg--; - if (VT_BUSY(arg)) - ret = -EBUSY; - else if (arg) { /* leave 0 */ - console_lock(); - vc_deallocate(arg); - console_unlock(); - } - } + if (arg == 0) + vt_disallocate_all(); + else + ret = vt_disallocate(--arg); break; case VT_RESIZE: diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig index e92eeaf251fe..5295be0342c1 100644 --- a/drivers/uio/Kconfig +++ b/drivers/uio/Kconfig @@ -45,6 +45,7 @@ config UIO_PDRV_GENIRQ config UIO_DMEM_GENIRQ tristate "Userspace platform driver with generic irq and dynamic memory" + depends on HAS_DMA help Platform driver for Userspace I/O devices, including generic interrupt handling code. Shared interrupts are not supported. diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index b7eb86ad6bf2..8a7eb77233b4 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ { int ret, len; __le32 *buf; - int offb, offd; + int offb; + unsigned int offd; const int stride = CMD_PACKET_SIZE / (4 * 2) - 1; int buflen = ((size - 1) / stride + 1 + size * 2) * 4; diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig index 608a2aeb400c..b2df442eb3e5 100644 --- a/drivers/usb/chipidea/Kconfig +++ b/drivers/usb/chipidea/Kconfig @@ -20,7 +20,7 @@ config USB_CHIPIDEA_UDC config USB_CHIPIDEA_HOST bool "ChipIdea host controller" depends on USB=y || USB=USB_CHIPIDEA - depends on USB_EHCI_HCD + depends on USB_EHCI_HCD=y select USB_EHCI_ROOT_HUB_TT help Say Y here to enable host controller functionality of the diff --git a/drivers/usb/chipidea/ci13xxx_imx.c b/drivers/usb/chipidea/ci13xxx_imx.c index 8faec9dbbb84..73f9d5f15adb 100644 --- a/drivers/usb/chipidea/ci13xxx_imx.c +++ b/drivers/usb/chipidea/ci13xxx_imx.c @@ -173,17 +173,10 @@ static int ci13xxx_imx_probe(struct platform_device *pdev) ci13xxx_imx_platdata.phy = data->phy; - if (!pdev->dev.dma_mask) { - pdev->dev.dma_mask = devm_kzalloc(&pdev->dev, - sizeof(*pdev->dev.dma_mask), GFP_KERNEL); - if (!pdev->dev.dma_mask) { - ret = -ENOMEM; - dev_err(&pdev->dev, "Failed to alloc dma_mask!\n"); - goto err; - } - *pdev->dev.dma_mask = DMA_BIT_MASK(32); - dma_set_coherent_mask(&pdev->dev, *pdev->dev.dma_mask); - } + if (!pdev->dev.dma_mask) + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); if (usbmisc_ops && usbmisc_ops->init) { ret = usbmisc_ops->init(&pdev->dev); diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 450107e5f657..49b098bedf9b 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -370,11 +370,6 @@ static int ci_hdrc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing resource\n"); - return -ENODEV; - } - base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 8772b3659296..db535b0aa172 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -51,7 +51,7 @@ config USB_DYNAMIC_MINORS config USB_OTG bool "OTG support" - depends on USB_SUSPEND + depends on PM_RUNTIME default n help The most notable feature of USB OTG is support for a diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index caefc800f298..c88c4fb9459d 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1287,9 +1287,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, goto error; } for (totlen = u = 0; u < uurb->number_of_packets; u++) { - /* arbitrary limit, - * sufficient for USB 2.0 high-bandwidth iso */ - if (isopkt[u].length > 8192) { + /* + * arbitrary limit need for USB 3.0 + * bMaxBurst (0~15 allowed, 1~16 packets) + * bmAttributes (bit 1:0, mult 0~2, 1~3 packets) + * sizemax: 1024 * 16 * 3 = 49152 + */ + if (isopkt[u].length > 49152) { ret = -EINVAL; goto error; } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index ab5638d9c707..a63598895077 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -88,6 +88,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Edirol SD-20 */ { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Alcor Micro Corp. Hub */ + { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME }, + /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index ea5ee9c21c35..757aa18027d0 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -19,21 +19,21 @@ choice config USB_DWC3_HOST bool "Host only mode" - depends on USB + depends on USB=y || USB=USB_DWC3 help Select this when you want to use DWC3 in host mode only, thereby the gadget feature will be regressed. config USB_DWC3_GADGET bool "Gadget only mode" - depends on USB_GADGET + depends on USB_GADGET=y || USB_GADGET=USB_DWC3 help Select this when you want to use DWC3 in gadget mode only, thereby the host feature will be regressed. config USB_DWC3_DUAL_ROLE bool "Dual Role mode" - depends on (USB && USB_GADGET) + depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3)) help This is the default mode of working of DWC3 controller where both host and gadget features are enabled. diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index a8afe6e26621..8ce9d7fd6cfc 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -95,8 +95,6 @@ static int dwc3_exynos_remove_child(struct device *dev, void *unused) return 0; } -static u64 dwc3_exynos_dma_mask = DMA_BIT_MASK(32); - static int dwc3_exynos_probe(struct platform_device *pdev) { struct dwc3_exynos *exynos; @@ -118,7 +116,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev) * Once we move to full device tree support this will vanish off. */ if (!dev->dma_mask) - dev->dma_mask = &dwc3_exynos_dma_mask; + dev->dma_mask = &dev->coherent_dma_mask; + if (!dev->coherent_dma_mask) + dev->coherent_dma_mask = DMA_BIT_MASK(32); platform_set_drvdata(pdev, exynos); @@ -164,9 +164,9 @@ static int dwc3_exynos_remove(struct platform_device *pdev) { struct dwc3_exynos *exynos = platform_get_drvdata(pdev); + device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child); platform_device_unregister(exynos->usb2_phy); platform_device_unregister(exynos->usb3_phy); - device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child); clk_disable_unprepare(exynos->clk); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 227d4a7acad7..eba9e2baf32b 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -196,9 +196,9 @@ static void dwc3_pci_remove(struct pci_dev *pci) { struct dwc3_pci *glue = pci_get_drvdata(pci); + platform_device_unregister(glue->dwc3); platform_device_unregister(glue->usb2_phy); platform_device_unregister(glue->usb3_phy); - platform_device_unregister(glue->dwc3); pci_set_drvdata(pci, NULL); pci_disable_device(pci); } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 2b6e7e001207..b5e5b35df49c 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1706,11 +1706,19 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) dep = dwc->eps[epnum]; if (!dep) continue; - - dwc3_free_trb_pool(dep); - - if (epnum != 0 && epnum != 1) + /* + * Physical endpoints 0 and 1 are special; they form the + * bi-directional USB endpoint 0. + * + * For those two physical endpoints, we don't allocate a TRB + * pool nor do we add them the endpoints list. Due to that, we + * shouldn't do these two operations otherwise we would end up + * with all sorts of bugs when removing dwc3.ko. + */ + if (epnum != 0 && epnum != 1) { + dwc3_free_trb_pool(dep); list_del(&dep->endpoint.ep_list); + } kfree(dep); } diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 83300d94a893..f41aa0d0c414 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -146,7 +146,6 @@ config USB_LPC32XX depends on ARCH_LPC32XX depends on USB_PHY select USB_ISP1301 - select USB_OTG_UTILS help This option selects the USB device controller in the LPC32xx SoC. diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index f2a970f75bfa..5a5128a226f7 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c @@ -1992,8 +1992,6 @@ err_map_regs: err_get_hclk: clk_put(pclk); - platform_set_drvdata(pdev, NULL); - return ret; } diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c index 6e6518264c42..fd24cb4540a4 100644 --- a/drivers/usb/gadget/bcm63xx_udc.c +++ b/drivers/usb/gadget/bcm63xx_udc.c @@ -2334,21 +2334,11 @@ static int bcm63xx_udc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "error finding USBD resource\n"); - return -ENXIO; - } - udc->usbd_regs = devm_ioremap_resource(dev, res); if (IS_ERR(udc->usbd_regs)) return PTR_ERR(udc->usbd_regs); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(dev, "error finding IUDMA resource\n"); - return -ENXIO; - } - udc->iudma_regs = devm_ioremap_resource(dev, res); if (IS_ERR(udc->iudma_regs)) return PTR_ERR(udc->iudma_regs); @@ -2420,7 +2410,6 @@ static int bcm63xx_udc_remove(struct platform_device *pdev) usb_del_gadget_udc(&udc->gadget); BUG_ON(udc->driver); - platform_set_drvdata(pdev, NULL); bcm63xx_uninit_udc_hw(udc); return 0; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 3d5cfc9c2c78..80e7f75a56c7 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -821,8 +821,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget, gi->gstrings[i] = NULL; s = usb_gstrings_attach(&gi->cdev, gi->gstrings, USB_GADGET_FIRST_AVAIL_IDX); - if (IS_ERR(s)) + if (IS_ERR(s)) { + ret = PTR_ERR(s); goto err_comp_cleanup; + } gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id; gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id; @@ -847,8 +849,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget, } cfg->gstrings[i] = NULL; s = usb_gstrings_attach(&gi->cdev, cfg->gstrings, 1); - if (IS_ERR(s)) + if (IS_ERR(s)) { + ret = PTR_ERR(s); goto err_comp_cleanup; + } c->iConfiguration = s[0].id; } diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index a792e322f4f1..c588e8e486e5 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c @@ -1001,7 +1001,6 @@ static int dummy_udc_remove(struct platform_device *pdev) struct dummy *dum = platform_get_drvdata(pdev); usb_del_gadget_udc(&dum->gadget); - platform_set_drvdata(pdev, NULL); device_remove_file(&dum->gadget.dev, &dev_attr_function); return 0; } @@ -2661,8 +2660,10 @@ static int __init init(void) } for (i = 0; i < mod_data.num; i++) { dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL); - if (!dum[i]) + if (!dum[i]) { + retval = -ENOMEM; goto err_add_pdata; + } retval = platform_device_add_data(the_hcd_pdev[i], &dum[i], sizeof(void *)); if (retval) diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c index d893d6929079..abf8a31ae146 100644 --- a/drivers/usb/gadget/f_ecm.c +++ b/drivers/usb/gadget/f_ecm.c @@ -816,6 +816,7 @@ ecm_unbind(struct usb_configuration *c, struct usb_function *f) * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded + * @dev: eth_dev structure * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c index 185d6f5e4e4d..7be04b342494 100644 --- a/drivers/usb/gadget/f_subset.c +++ b/drivers/usb/gadget/f_subset.c @@ -373,6 +373,7 @@ geth_unbind(struct usb_configuration *c, struct usb_function *f) * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded + * @dev: eth_dev structure * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. diff --git a/drivers/usb/gadget/f_uac2.c b/drivers/usb/gadget/f_uac2.c index c7468b6c07b0..03c1fb686644 100644 --- a/drivers/usb/gadget/f_uac2.c +++ b/drivers/usb/gadget/f_uac2.c @@ -456,8 +456,6 @@ static int snd_uac2_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); - platform_set_drvdata(pdev, NULL); - if (card) return snd_card_free(card); diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c index cec8871b77f9..b8632d40f8bf 100644 --- a/drivers/usb/gadget/fusb300_udc.c +++ b/drivers/usb/gadget/fusb300_udc.c @@ -1461,8 +1461,10 @@ static int __init fusb300_probe(struct platform_device *pdev) fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep, GFP_KERNEL); - if (fusb300->ep0_req == NULL) + if (fusb300->ep0_req == NULL) { + ret = -ENOMEM; goto clean_up3; + } init_controller(fusb300); ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget); diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c index b5cebd6b0d7a..9b2d24e4c95f 100644 --- a/drivers/usb/gadget/imx_udc.c +++ b/drivers/usb/gadget/imx_udc.c @@ -1511,8 +1511,6 @@ static int __exit imx_udc_remove(struct platform_device *pdev) if (pdata->exit) pdata->exit(&pdev->dev); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c index 866ef0999247..51cfe72da5bb 100644 --- a/drivers/usb/gadget/m66592-udc.c +++ b/drivers/usb/gadget/m66592-udc.c @@ -1660,8 +1660,10 @@ static int __init m66592_probe(struct platform_device *pdev) m66592->epaddr2ep[0] = &m66592->ep[0]; m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); - if (m66592->ep0_req == NULL) + if (m66592->ep0_req == NULL) { + ret = -ENOMEM; goto clean_up3; + } m66592->ep0_req->complete = nop_completion; init_controller(m66592); diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index ef47495dec8f..95c531d5aa4f 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c @@ -2236,7 +2236,6 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev) dev->transceiver = NULL; } - platform_set_drvdata(pdev, NULL); the_controller = NULL; return 0; } diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 0b742d171843..7ff7d9cf2061 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c @@ -1977,8 +1977,10 @@ static int __init r8a66597_probe(struct platform_device *pdev) r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep, GFP_KERNEL); - if (r8a66597->ep0_req == NULL) + if (r8a66597->ep0_req == NULL) { + ret = -ENOMEM; goto clean_up3; + } r8a66597->ep0_req->complete = nop_completion; ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget); diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index a3cdc32115d5..af22f24046b2 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c @@ -437,7 +437,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg, if (hs_req->req.length == 0) return; - usb_gadget_unmap_request(&hsotg->gadget, hs_req, hs_ep->dir_in); + usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); } /** diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c index d0e75e1b3ccb..09c4f70c93c4 100644 --- a/drivers/usb/gadget/s3c2410_udc.c +++ b/drivers/usb/gadget/s3c2410_udc.c @@ -1851,6 +1851,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev) irq = gpio_to_irq(udc_info->vbus_pin); if (irq < 0) { dev_err(dev, "no irq for gpio vbus pin\n"); + retval = irq; goto err_gpio_claim; } @@ -1948,8 +1949,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev) iounmap(base_addr); release_mem_region(rsrc_start, rsrc_len); - platform_set_drvdata(pdev, NULL); - if (!IS_ERR(udc_clock) && udc_clock != NULL) { clk_disable(udc_clock); clk_put(udc_clock); diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c index 2cd6262e8b71..0deb9d6cde26 100644 --- a/drivers/usb/gadget/zero.c +++ b/drivers/usb/gadget/zero.c @@ -284,12 +284,16 @@ static int __init zero_bind(struct usb_composite_dev *cdev) ss_opts->bulk_buflen = gzero_options.bulk_buflen; func_ss = usb_get_function(func_inst_ss); - if (IS_ERR(func_ss)) + if (IS_ERR(func_ss)) { + status = PTR_ERR(func_ss); goto err_put_func_inst_ss; + } func_inst_lb = usb_get_function_instance("Loopback"); - if (IS_ERR(func_inst_lb)) + if (IS_ERR(func_inst_lb)) { + status = PTR_ERR(func_inst_lb); goto err_put_func_ss; + } lb_opts = container_of(func_inst_lb, struct f_lb_opts, func_inst); lb_opts->bulk_buflen = gzero_options.bulk_buflen; diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index de94f2699063..344d5e2f87d7 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -507,7 +507,7 @@ endif # USB_OHCI_HCD config USB_UHCI_HCD tristate "UHCI HCD (most Intel and VIA) support" - depends on PCI || SPARC_LEON || ARCH_VT8500 + depends on PCI || USB_UHCI_SUPPORT_NON_PCI_HC ---help--- The Universal Host Controller Interface is a standard by Intel for accessing the USB hardware in the PC (which is also called the USB @@ -524,26 +524,19 @@ config USB_UHCI_HCD config USB_UHCI_SUPPORT_NON_PCI_HC bool - depends on USB_UHCI_HCD - default y if (SPARC_LEON || ARCH_VT8500) + default y if (SPARC_LEON || USB_UHCI_PLATFORM) config USB_UHCI_PLATFORM - bool "Generic UHCI Platform Driver support" - depends on USB_UHCI_SUPPORT_NON_PCI_HC + bool default y if ARCH_VT8500 - ---help--- - Enable support for generic UHCI platform devices that require no - additional configuration. config USB_UHCI_BIG_ENDIAN_MMIO bool - depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON - default y + default y if SPARC_LEON config USB_UHCI_BIG_ENDIAN_DESC bool - depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON - default y + default y if SPARC_LEON config USB_FHCI_HCD tristate "Freescale QE USB Host Controller support" diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c index 66420097c242..02f4611faa62 100644 --- a/drivers/usb/host/ehci-atmel.c +++ b/drivers/usb/host/ehci-atmel.c @@ -63,8 +63,6 @@ static void atmel_stop_ehci(struct platform_device *pdev) /*-------------------------------------------------------------------------*/ -static u64 at91_ehci_dma_mask = DMA_BIT_MASK(32); - static int ehci_atmel_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd; @@ -93,7 +91,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &at91_ehci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 312fc10da3c7..246e124e6ac5 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -1286,23 +1286,6 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ehci_hcd_sead3_driver #endif -#if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \ - !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \ - !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \ - !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \ - !IS_ENABLED(CONFIG_USB_EHCI_HCD_OMAP) && \ - !IS_ENABLED(CONFIG_USB_EHCI_HCD_ORION) && \ - !IS_ENABLED(CONFIG_USB_EHCI_HCD_SPEAR) && \ - !IS_ENABLED(CONFIG_USB_EHCI_S5P) && \ - !IS_ENABLED(CONFIG_USB_EHCI_HCD_AT91) && \ - !IS_ENABLED(CONFIG_USB_EHCI_MSM) && \ - !defined(PLATFORM_DRIVER) && \ - !defined(PS3_SYSTEM_BUS_DRIVER) && \ - !defined(OF_PLATFORM_DRIVER) && \ - !defined(XILINX_OF_PLATFORM_DRIVER) -#error "missing bus glue for ehci-hcd" -#endif - static int __init ehci_hcd_init(void) { int retval = 0; diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 3d1491b5f360..16d7150e8557 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -90,8 +90,6 @@ static const struct ehci_driver_overrides ehci_omap_overrides __initdata = { .extra_priv_size = sizeof(struct omap_hcd), }; -static u64 omap_ehci_dma_mask = DMA_BIT_MASK(32); - /** * ehci_hcd_omap_probe - initialize TI-based HCDs * @@ -146,8 +144,10 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ - if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &omap_ehci_dma_mask; + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + if (!dev->coherent_dma_mask) + dev->coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(&ehci_omap_hc_driver, dev, dev_name(dev)); diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 54c579485150..efbc588b48c5 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -137,8 +137,6 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd, } } -static u64 ehci_orion_dma_mask = DMA_BIT_MASK(32); - static int ehci_orion_drv_probe(struct platform_device *pdev) { struct orion_ehci_data *pd = pdev->dev.platform_data; @@ -183,7 +181,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev) * now. Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &ehci_orion_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); if (!request_mem_region(res->start, resource_size(res), ehci_orion_hc_driver.description)) { diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c index 635775278c7f..379037f51a2f 100644 --- a/drivers/usb/host/ehci-s5p.c +++ b/drivers/usb/host/ehci-s5p.c @@ -71,8 +71,6 @@ static void s5p_setup_vbus_gpio(struct platform_device *pdev) dev_err(dev, "can't request ehci vbus gpio %d", gpio); } -static u64 ehci_s5p_dma_mask = DMA_BIT_MASK(32); - static int s5p_ehci_probe(struct platform_device *pdev) { struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; @@ -90,7 +88,7 @@ static int s5p_ehci_probe(struct platform_device *pdev) * Once we move to full device tree support this will vanish off. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &ehci_s5p_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); @@ -107,6 +105,7 @@ static int s5p_ehci_probe(struct platform_device *pdev) if (IS_ERR(phy)) { /* Fallback to pdata */ if (!pdata) { + usb_put_hcd(hcd); dev_warn(&pdev->dev, "no platform data or transceiver defined\n"); return -EPROBE_DEFER; } else { diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index acff5b8f6e89..f80d0330d548 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) } static const unsigned char -max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; /* carryover low/fullspeed bandwidth that crosses uframe boundries */ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) @@ -646,6 +646,10 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) /* reschedule QH iff another request is queued */ if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) { rc = qh_schedule(ehci, qh); + if (rc == 0) { + qh_refresh(ehci, qh); + qh_link_periodic(ehci, qh); + } /* An error here likely indicates handshake failure * or no space left in the schedule. Neither fault @@ -653,9 +657,10 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) * * FIXME kill the now-dysfunctional queued urbs */ - if (rc != 0) + else { ehci_err(ehci, "can't reschedule qh %p, err %d\n", qh, rc); + } } /* maybe turn off periodic schedule */ diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c index 61ecfb3d52f5..bd3e5cbc6240 100644 --- a/drivers/usb/host/ehci-spear.c +++ b/drivers/usb/host/ehci-spear.c @@ -58,8 +58,6 @@ static int ehci_spear_drv_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend, ehci_spear_drv_resume); -static u64 spear_ehci_dma_mask = DMA_BIT_MASK(32); - static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd ; @@ -84,7 +82,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &spear_ehci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); usbh_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(usbh_clk)) { diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index e3eddc31ac83..59d111bf44a9 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -637,8 +637,6 @@ static void tegra_ehci_set_phcd(struct usb_phy *x, bool enable) writel(val, base + TEGRA_USB_PORTSC1); } -static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32); - static int tegra_ehci_probe(struct platform_device *pdev) { struct resource *res; @@ -661,7 +659,9 @@ static int tegra_ehci_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &tegra_ehci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); setup_vbus_gpio(pdev, pdata); diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index 125e261f5bfc..2facee53eab1 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf) int retval = 1; unsigned long flags; - /* if !USB_SUSPEND, root hub timers won't get shut down ... */ + /* if !PM_RUNTIME, root hub timers won't get shut down ... */ if (!HC_IS_RUNNING(hcd->state)) return 0; diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c index bbb791bd7617..a13709ee4e5d 100644 --- a/drivers/usb/host/isp1760-if.c +++ b/drivers/usb/host/isp1760-if.c @@ -373,8 +373,10 @@ static int isp1760_plat_probe(struct platform_device *pdev) irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq_res) { pr_warning("isp1760: IRQ resource not available\n"); - return -ENODEV; + ret = -ENODEV; + goto cleanup; } + irqflags |= irq_res->flags & IRQF_TRIGGER_MASK; if (priv) { diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index a0cb44f0e724..2ee1496dbc1d 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -504,8 +504,6 @@ static const struct of_device_id at91_ohci_dt_ids[] = { MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids); -static u64 at91_ohci_dma_mask = DMA_BIT_MASK(32); - static int ohci_at91_of_init(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -522,7 +520,9 @@ static int ohci_at91_of_init(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &at91_ohci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index 07592c00af26..b0b542c14e31 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -98,8 +98,6 @@ static const struct hc_driver exynos_ohci_hc_driver = { .start_port_reset = ohci_start_port_reset, }; -static u64 ohci_exynos_dma_mask = DMA_BIT_MASK(32); - static int exynos_ohci_probe(struct platform_device *pdev) { struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data; @@ -117,7 +115,7 @@ static int exynos_ohci_probe(struct platform_device *pdev) * Once we move to full device tree support this will vanish off. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &ohci_exynos_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 9e6de9586ae4..fc627fd54116 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -233,14 +233,14 @@ static int ohci_urb_enqueue ( urb->start_frame = frame; } } else if (ed->type == PIPE_ISOCHRONOUS) { - u16 next = ohci_frame_no(ohci) + 2; + u16 next = ohci_frame_no(ohci) + 1; u16 frame = ed->last_iso + ed->interval; /* Behind the scheduling threshold? */ if (unlikely(tick_before(frame, next))) { /* USB_ISO_ASAP: Round up to the first available slot */ - if (urb->transfer_flags & URB_ISO_ASAP) + if (urb->transfer_flags & URB_ISO_ASAP) { frame += (next - frame + ed->interval - 1) & -ed->interval; @@ -248,21 +248,25 @@ static int ohci_urb_enqueue ( * Not ASAP: Use the next slot in the stream. If * the entire URB falls before the threshold, fail. */ - else if (tick_before(frame + ed->interval * + } else { + if (tick_before(frame + ed->interval * (urb->number_of_packets - 1), next)) { - retval = -EXDEV; - usb_hcd_unlink_urb_from_ep(hcd, urb); - goto fail; - } + retval = -EXDEV; + usb_hcd_unlink_urb_from_ep(hcd, urb); + goto fail; + } - /* - * Some OHCI hardware doesn't handle late TDs - * correctly. After retiring them it proceeds to - * the next ED instead of the next TD. Therefore - * we have to omit the late TDs entirely. - */ - urb_priv->td_cnt = DIV_ROUND_UP(next - frame, - ed->interval); + /* + * Some OHCI hardware doesn't handle late TDs + * correctly. After retiring them it proceeds + * to the next ED instead of the next TD. + * Therefore we have to omit the late TDs + * entirely. + */ + urb_priv->td_cnt = DIV_ROUND_UP( + (u16) (next - frame), + ed->interval); + } } urb->start_frame = frame; } diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c index f4988fbe78e7..5d7eb72c5064 100644 --- a/drivers/usb/host/ohci-nxp.c +++ b/drivers/usb/host/ohci-nxp.c @@ -223,8 +223,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) isp1301_i2c_client = isp1301_get_client(isp1301_node); if (!isp1301_i2c_client) { - ret = -EPROBE_DEFER; - goto out; + return -EPROBE_DEFER; } pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); @@ -234,7 +233,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (usb_disabled()) { dev_err(&pdev->dev, "USB is disabled\n"); ret = -ENODEV; - goto out; + goto fail_disable; } /* Enable AHB slave USB clock, needed for further USB clock control */ @@ -245,19 +244,19 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (IS_ERR(usb_pll_clk)) { dev_err(&pdev->dev, "failed to acquire USB PLL\n"); ret = PTR_ERR(usb_pll_clk); - goto out1; + goto fail_pll; } ret = clk_enable(usb_pll_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB PLL\n"); - goto out2; + goto fail_pllen; } ret = clk_set_rate(usb_pll_clk, 48000); if (ret < 0) { dev_err(&pdev->dev, "failed to set USB clock rate\n"); - goto out3; + goto fail_rate; } /* Enable USB device clock */ @@ -265,13 +264,13 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (IS_ERR(usb_dev_clk)) { dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n"); ret = PTR_ERR(usb_dev_clk); - goto out4; + goto fail_dev; } ret = clk_enable(usb_dev_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB DEV Clock\n"); - goto out5; + goto fail_deven; } /* Enable USB otg clocks */ @@ -279,7 +278,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (IS_ERR(usb_otg_clk)) { dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n"); ret = PTR_ERR(usb_otg_clk); - goto out6; + goto fail_otg; } __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL); @@ -287,7 +286,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) ret = clk_enable(usb_otg_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB DEV Clock\n"); - goto out7; + goto fail_otgen; } isp1301_configure(); @@ -296,20 +295,14 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (!hcd) { dev_err(&pdev->dev, "Failed to allocate HC buffer\n"); ret = -ENOMEM; - goto out8; + goto fail_hcd; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Failed to get MEM resource\n"); - ret = -ENOMEM; - goto out8; - } - hcd->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); - goto out8; + goto fail_resource; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); @@ -317,7 +310,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENXIO; - goto out8; + goto fail_resource; } nxp_start_hc(); @@ -331,23 +324,24 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) return ret; nxp_stop_hc(); -out8: +fail_resource: usb_put_hcd(hcd); -out7: +fail_hcd: clk_disable(usb_otg_clk); -out6: +fail_otgen: clk_put(usb_otg_clk); -out5: +fail_otg: clk_disable(usb_dev_clk); -out4: +fail_deven: clk_put(usb_dev_clk); -out3: +fail_dev: +fail_rate: clk_disable(usb_pll_clk); -out2: +fail_pllen: clk_put(usb_pll_clk); -out1: +fail_pll: +fail_disable: isp1301_i2c_client = NULL; -out: return ret; } diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c index ddfc31427bc0..8663851c8d8e 100644 --- a/drivers/usb/host/ohci-omap3.c +++ b/drivers/usb/host/ohci-omap3.c @@ -114,8 +114,6 @@ static const struct hc_driver ohci_omap3_hc_driver = { /*-------------------------------------------------------------------------*/ -static u64 omap_ohci_dma_mask = DMA_BIT_MASK(32); - /* * configure so an HC device and id are always provided * always called with process context; sleeping is OK @@ -168,8 +166,10 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev) * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ - if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &omap_ohci_dma_mask; + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + if (!dev->coherent_dma_mask) + dev->coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev, dev_name(dev)); diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index efe71f3ca477..279b2ef17411 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -282,8 +282,6 @@ static const struct of_device_id pxa_ohci_dt_ids[] = { MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids); -static u64 pxa_ohci_dma_mask = DMA_BIT_MASK(32); - static int ohci_pxa_of_init(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -298,7 +296,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pxa_ohci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c index 9020bf0e2eca..3e19e0170d11 100644 --- a/drivers/usb/host/ohci-spear.c +++ b/drivers/usb/host/ohci-spear.c @@ -91,8 +91,6 @@ static const struct hc_driver ohci_spear_hc_driver = { .start_port_reset = ohci_start_port_reset, }; -static u64 spear_ohci_dma_mask = DMA_BIT_MASK(32); - static int spear_ohci_hcd_drv_probe(struct platform_device *pdev) { const struct hc_driver *driver = &ohci_spear_hc_driver; @@ -114,7 +112,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &spear_ohci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); usbh_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(usbh_clk)) { diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 4f0f0339532f..0f401dbfaf07 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -3084,7 +3084,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf) int ports, i, retval = 1; unsigned long flags; - /* if !USB_SUSPEND, root hub timers won't get shut down ... */ + /* if !PM_RUNTIME, root hub timers won't get shut down ... */ if (!HC_IS_RUNNING(hcd->state)) return 0; diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index ad4483efb6d6..b2ec7fe758dd 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -22,7 +22,7 @@ * and usb-storage. * * TODO: - * - usb suspend/resume triggered by sl811 (with USB_SUSPEND) + * - usb suspend/resume triggered by sl811 (with PM_RUNTIME) * - various issues noted in the code * - performance work; use both register banks; ... * - use urb->iso_frame_desc[] with ISO transfers diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index f87bee6d2789..9189bc984c98 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c @@ -225,7 +225,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf) /* auto-stop if nothing connected for 1 second */ if (any_ports_active(uhci)) uhci->rh_state = UHCI_RH_RUNNING; - else if (time_after_eq(jiffies, uhci->auto_stop_time)) + else if (time_after_eq(jiffies, uhci->auto_stop_time) && + !uhci->wait_for_hp) suspend_rh(uhci, UHCI_RH_AUTO_STOPPED); break; diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c index 8c4dace4b14a..f1db61ada6a8 100644 --- a/drivers/usb/host/uhci-platform.c +++ b/drivers/usb/host/uhci-platform.c @@ -60,8 +60,6 @@ static const struct hc_driver uhci_platform_hc_driver = { .hub_control = uhci_hub_control, }; -static u64 platform_uhci_dma_mask = DMA_BIT_MASK(32); - static int uhci_hcd_platform_probe(struct platform_device *pdev) { struct usb_hcd *hcd; @@ -78,7 +76,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &platform_uhci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev, pdev->name); diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index f0976d8190bc..041c6ddb695c 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -1287,7 +1287,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, return -EINVAL; /* Can't change the period */ } else { - next = uhci->frame_number + 2; + next = uhci->frame_number + 1; /* Find the next unused frame */ if (list_empty(&qh->queue)) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 965b539bc474..fbf75e57628b 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1423,15 +1423,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); /* Set the max packet size and max burst */ + max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); + max_burst = 0; switch (udev->speed) { case USB_SPEED_SUPER: - max_packet = usb_endpoint_maxp(&ep->desc); - ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); /* dig out max burst from ep companion desc */ - max_packet = ep->ss_ep_comp.bMaxBurst; - ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); + max_burst = ep->ss_ep_comp.bMaxBurst; break; case USB_SPEED_HIGH: + /* Some devices get this wrong */ + if (usb_endpoint_xfer_bulk(&ep->desc)) + max_packet = 512; /* bits 11:12 specify the number of additional transaction * opportunities per microframe (USB 2.0, section 9.6.6) */ @@ -1439,17 +1441,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, usb_endpoint_xfer_int(&ep->desc)) { max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; - ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); } - /* Fall through */ + break; case USB_SPEED_FULL: case USB_SPEED_LOW: - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); - ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); break; default: BUG(); } + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) | + MAX_BURST(max_burst)); max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); @@ -1826,6 +1827,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) } spin_unlock_irqrestore(&xhci->lock, flags); + if (!xhci->rh_bw) + goto no_bw; + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); for (i = 0; i < num_ports; i++) { struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; @@ -1844,6 +1848,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) } } +no_bw: xhci->num_usb2_ports = 0; xhci->num_usb3_ports = 0; xhci->num_active_eps = 0; @@ -2255,6 +2260,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) u32 page_size, temp; int i; + INIT_LIST_HEAD(&xhci->lpm_failed_devs); + INIT_LIST_HEAD(&xhci->cancel_cmd_list); + page_size = xhci_readl(xhci, &xhci->op_regs->page_size); xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); for (i = 0; i < 16; i++) { @@ -2333,7 +2341,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); if (!xhci->cmd_ring) goto fail; - INIT_LIST_HEAD(&xhci->cancel_cmd_list); xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); xhci_dbg(xhci, "First segment DMA is 0x%llx\n", (unsigned long long)xhci->cmd_ring->first_seg->dma); @@ -2444,8 +2451,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) if (xhci_setup_port_arrays(xhci, flags)) goto fail; - INIT_LIST_HEAD(&xhci->lpm_failed_devs); - /* Enable USB 3.0 device notifications for function remote wake, which * is necessary for allowing USB 3.0 devices to do remote wakeup from * U3 (device suspend). diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1a30c380043c..cc24e39b97d5 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -221,6 +221,14 @@ static void xhci_pci_remove(struct pci_dev *dev) static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + /* + * Systems with the TI redriver that loses port status change events + * need to have the registers polled during D3, so avoid D3cold. + */ + if (xhci_compliance_mode_recovery_timer_quirk_check()) + pdev->no_d3cold = true; return xhci_suspend(xhci); } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index b4aa79d154b2..d8f640b12dd9 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -466,7 +466,7 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) * Systems: * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 */ -static bool compliance_mode_recovery_timer_quirk_check(void) +bool xhci_compliance_mode_recovery_timer_quirk_check(void) { const char *dmi_product_name, *dmi_sys_vendor; @@ -517,7 +517,7 @@ int xhci_init(struct usb_hcd *hcd) xhci_dbg(xhci, "Finished xhci_init\n"); /* Initializing Compliance Mode Recovery Data If Needed */ - if (compliance_mode_recovery_timer_quirk_check()) { + if (xhci_compliance_mode_recovery_timer_quirk_check()) { xhci->quirks |= XHCI_COMP_MODE_QUIRK; compliance_mode_recovery_timer_init(xhci); } @@ -956,6 +956,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; + bool comp_timer_running = false; /* Wait a bit if either of the roothubs need to settle from the * transition into bus suspend. @@ -993,6 +994,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) /* If restore operation fails, re-initialize the HC during resume */ if ((temp & STS_SRE) || hibernated) { + + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + !(xhci_all_ports_seen_u0(xhci))) { + del_timer_sync(&xhci->comp_mode_recovery_timer); + xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n"); + } + /* Let the USB core know _both_ roothubs lost power. */ usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); @@ -1035,6 +1043,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) retval = xhci_init(hcd->primary_hcd); if (retval) return retval; + comp_timer_running = true; + xhci_dbg(xhci, "Start the primary HCD\n"); retval = xhci_run(hcd->primary_hcd); if (!retval) { @@ -1076,7 +1086,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) * to suffer the Compliance Mode issue again. It doesn't matter if * ports have entered previously to U0 before system's suspension. */ - if (xhci->quirks & XHCI_COMP_MODE_QUIRK) + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) compliance_mode_recovery_timer_init(xhci); /* Re-enable port polling. */ diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 29c978e37135..77600cefcaf1 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1853,4 +1853,7 @@ struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); +/* xHCI quirks */ +bool xhci_compliance_mode_recovery_timer_quirk_check(void); + #endif /* __LINUX_XHCI_HCD_H */ diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 3a18e44e9391..e1b661d04021 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -560,6 +560,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, u8 id) if (!config) { dev_err(&pdev->dev, "failed to allocate musb hdrc config\n"); + ret = -ENOMEM; goto err2; } diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 8914dec49f01..9d3044bdebe5 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -1232,7 +1232,6 @@ void musb_host_tx(struct musb *musb, u8 epnum) void __iomem *mbase = musb->mregs; struct dma_channel *dma; bool transfer_pending = false; - static bool use_sg; musb_ep_select(mbase, epnum); tx_csr = musb_readw(epio, MUSB_TXCSR); @@ -1463,9 +1462,9 @@ done: * NULL. */ if (!urb->transfer_buffer) - use_sg = true; + qh->use_sg = true; - if (use_sg) { + if (qh->use_sg) { /* sg_miter_start is already done in musb_ep_program */ if (!sg_miter_next(&qh->sg_miter)) { dev_err(musb->controller, "error: sg list empty\n"); @@ -1484,9 +1483,9 @@ done: qh->segsize = length; - if (use_sg) { + if (qh->use_sg) { if (offset + length >= urb->transfer_buffer_length) - use_sg = false; + qh->use_sg = false; } musb_ep_select(mbase, epnum); @@ -1552,7 +1551,6 @@ void musb_host_rx(struct musb *musb, u8 epnum) bool done = false; u32 status; struct dma_channel *dma; - static bool use_sg; unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; musb_ep_select(mbase, epnum); @@ -1878,12 +1876,12 @@ void musb_host_rx(struct musb *musb, u8 epnum) * NULL. */ if (!urb->transfer_buffer) { - use_sg = true; + qh->use_sg = true; sg_miter_start(&qh->sg_miter, urb->sg, 1, sg_flags); } - if (use_sg) { + if (qh->use_sg) { if (!sg_miter_next(&qh->sg_miter)) { dev_err(musb->controller, "error: sg list empty\n"); sg_miter_stop(&qh->sg_miter); @@ -1913,8 +1911,8 @@ finish: urb->actual_length += xfer_len; qh->offset += xfer_len; if (done) { - if (use_sg) - use_sg = false; + if (qh->use_sg) + qh->use_sg = false; if (urb->status == -EINPROGRESS) urb->status = status; diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h index 5a9c8feec10c..738f7eb60df9 100644 --- a/drivers/usb/musb/musb_host.h +++ b/drivers/usb/musb/musb_host.h @@ -74,6 +74,7 @@ struct musb_qh { u16 frame; /* for periodic schedule */ unsigned iso_idx; /* in urb->iso_frame_desc[] */ struct sg_mapping_iter sg_miter; /* for highmem in PIO mode */ + bool use_sg; /* to track urb using sglist */ }; /* map from control or bulk queue head to the first qh on that ring */ diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 3551f1a30c65..628b93fe5ccc 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -549,7 +549,8 @@ static int omap2430_probe(struct platform_device *pdev) glue->control_otghs = omap_get_control_dev(); if (IS_ERR(glue->control_otghs)) { dev_vdbg(&pdev->dev, "Failed to get control device\n"); - return -ENODEV; + ret = PTR_ERR(glue->control_otghs); + goto err2; } } else { glue->control_otghs = ERR_PTR(-ENODEV); diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 371d0e74e909..7ef3eb8617a6 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -25,7 +25,7 @@ config AB8500_USB config FSL_USB2_OTG bool "Freescale USB OTG Transceiver Driver" - depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND + depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME select USB_OTG help Enable this to support Freescale USB OTG transceiver. @@ -139,7 +139,6 @@ config USB_ISP1301 tristate "NXP ISP1301 USB transceiver support" depends on USB || USB_GADGET depends on I2C - select USB_OTG_UTILS help Say Y here to add support for the NXP ISP1301 USB transceiver driver. This chip is typically used as USB transceiver for USB host, gadget @@ -162,7 +161,7 @@ config USB_MSM_OTG config USB_MV_OTG tristate "Marvell USB OTG support" - depends on USB_EHCI_MV && USB_MV_UDC && USB_SUSPEND + depends on USB_EHCI_MV && USB_MV_UDC && PM_RUNTIME select USB_OTG help Say Y here if you want to build Marvell USB OTG transciever diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c index 4acef26a2ef5..e5eb1b5a04eb 100644 --- a/drivers/usb/phy/phy-ab8500-usb.c +++ b/drivers/usb/phy/phy-ab8500-usb.c @@ -892,8 +892,6 @@ static int ab8500_usb_remove(struct platform_device *pdev) else if (ab->mode == USB_PERIPHERAL) ab8500_usb_peri_phy_dis(ab); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c index 97b9308507c3..e771bafb9f1d 100644 --- a/drivers/usb/phy/phy-fsl-usb.c +++ b/drivers/usb/phy/phy-fsl-usb.c @@ -799,6 +799,7 @@ static int fsl_otg_conf(struct platform_device *pdev) /* initialize the otg structure */ fsl_otg_tc->phy.label = DRIVER_DESC; + fsl_otg_tc->phy.dev = &pdev->dev; fsl_otg_tc->phy.set_power = fsl_otg_set_power; fsl_otg_tc->phy.otg->phy = &fsl_otg_tc->phy; diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c index 4c76074e518d..8443335c2ea0 100644 --- a/drivers/usb/phy/phy-gpio-vbus-usb.c +++ b/drivers/usb/phy/phy-gpio-vbus-usb.c @@ -266,6 +266,7 @@ static int __init gpio_vbus_probe(struct platform_device *pdev) platform_set_drvdata(pdev, gpio_vbus); gpio_vbus->dev = &pdev->dev; gpio_vbus->phy.label = "gpio-vbus"; + gpio_vbus->phy.dev = gpio_vbus->dev; gpio_vbus->phy.set_power = gpio_vbus_set_power; gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend; gpio_vbus->phy.state = OTG_STATE_UNDEFINED; @@ -343,7 +344,6 @@ err_irq: gpio_free(pdata->gpio_pullup); gpio_free(pdata->gpio_vbus); err_gpio: - platform_set_drvdata(pdev, NULL); kfree(gpio_vbus->phy.otg); kfree(gpio_vbus); return err; @@ -365,7 +365,6 @@ static int __exit gpio_vbus_remove(struct platform_device *pdev) if (gpio_is_valid(pdata->gpio_pullup)) gpio_free(pdata->gpio_pullup); gpio_free(gpio); - platform_set_drvdata(pdev, NULL); kfree(gpio_vbus->phy.otg); kfree(gpio_vbus); diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c index 225ae6c97eeb..8a55b37d1a02 100644 --- a/drivers/usb/phy/phy-isp1301.c +++ b/drivers/usb/phy/phy-isp1301.c @@ -102,6 +102,7 @@ static int isp1301_probe(struct i2c_client *client, mutex_init(&isp->mutex); phy = &isp->phy; + phy->dev = &client->dev; phy->label = DRV_NAME; phy->init = isp1301_phy_init; phy->set_vbus = isp1301_phy_set_vbus; diff --git a/drivers/usb/phy/phy-mv-u3d-usb.c b/drivers/usb/phy/phy-mv-u3d-usb.c index f7838a43347c..1568ea63e338 100644 --- a/drivers/usb/phy/phy-mv-u3d-usb.c +++ b/drivers/usb/phy/phy-mv-u3d-usb.c @@ -278,11 +278,6 @@ static int mv_u3d_phy_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing mem resource\n"); - return -ENODEV; - } - phy_base = devm_ioremap_resource(dev, res); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c index c987bbe27851..4a6b03c73876 100644 --- a/drivers/usb/phy/phy-mv-usb.c +++ b/drivers/usb/phy/phy-mv-usb.c @@ -667,7 +667,6 @@ int mv_otg_remove(struct platform_device *pdev) mv_otg_disable(mvotg); usb_remove_phy(&mvotg->phy); - platform_set_drvdata(pdev, NULL); return 0; } @@ -850,8 +849,6 @@ err_destroy_workqueue: flush_workqueue(mvotg->qwork); destroy_workqueue(mvotg->qwork); - platform_set_drvdata(pdev, NULL); - return retval; } diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 9d4381e64d51..bd601c537c8d 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -130,11 +130,6 @@ static int mxs_phy_probe(struct platform_device *pdev) int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "can't get device resources\n"); - return -ENOENT; - } - base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); @@ -160,6 +155,7 @@ static int mxs_phy_probe(struct platform_device *pdev) mxs_phy->phy.set_suspend = mxs_phy_suspend; mxs_phy->phy.notify_connect = mxs_phy_on_connect; mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect; + mxs_phy->phy.type = USB_PHY_TYPE_USB2; ATOMIC_INIT_NOTIFIER_HEAD(&mxs_phy->phy.notifier); @@ -180,8 +176,6 @@ static int mxs_phy_remove(struct platform_device *pdev) usb_remove_phy(&mxs_phy->phy); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/drivers/usb/phy/phy-nop.c b/drivers/usb/phy/phy-nop.c index 2b10cc969bbb..638cc5dade35 100644 --- a/drivers/usb/phy/phy-nop.c +++ b/drivers/usb/phy/phy-nop.c @@ -254,8 +254,6 @@ static int nop_usb_xceiv_remove(struct platform_device *pdev) usb_remove_phy(&nop->phy); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/drivers/usb/phy/phy-samsung-usb2.c b/drivers/usb/phy/phy-samsung-usb2.c index 45ffe036dacc..9d5e273abcc7 100644 --- a/drivers/usb/phy/phy-samsung-usb2.c +++ b/drivers/usb/phy/phy-samsung-usb2.c @@ -363,11 +363,6 @@ static int samsung_usb2phy_probe(struct platform_device *pdev) int ret; phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!phy_mem) { - dev_err(dev, "%s: missing mem resource\n", __func__); - return -ENODEV; - } - phy_base = devm_ioremap_resource(dev, phy_mem); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); diff --git a/drivers/usb/phy/phy-samsung-usb3.c b/drivers/usb/phy/phy-samsung-usb3.c index 133f3d0c554f..5a9efcbcb532 100644 --- a/drivers/usb/phy/phy-samsung-usb3.c +++ b/drivers/usb/phy/phy-samsung-usb3.c @@ -239,11 +239,6 @@ static int samsung_usb3phy_probe(struct platform_device *pdev) int ret; phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!phy_mem) { - dev_err(dev, "%s: missing mem resource\n", __func__); - return -ENODEV; - } - phy_base = devm_ioremap_resource(dev, phy_mem); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index 3b16118cbf62..40e7fd94646f 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -43,7 +43,7 @@ #define DRIVER_NAME "ark3116" /* usb timeout of 1 second */ -#define ARK_TIMEOUT (1*HZ) +#define ARK_TIMEOUT 1000 static const struct usb_device_id id_table[] = { { USB_DEVICE(0x6547, 0x0232) }, diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index d341555d37d8..082120198f87 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -65,6 +65,7 @@ static const struct usb_device_id id_table_earthmate[] = { static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, + { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { } /* Terminating entry */ }; @@ -78,6 +79,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, + { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, { } /* Terminating entry */ }; @@ -229,6 +231,12 @@ static struct usb_serial_driver * const serial_drivers[] = { * Cypress serial helper functions *****************************************************************************/ +/* FRWD Dongle hidcom needs to skip reset and speed checks */ +static inline bool is_frwd(struct usb_device *dev) +{ + return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) && + (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD)); +} static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate) { @@ -238,6 +246,10 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate) if (unstable_bauds) return new_rate; + /* FRWD Dongle uses 115200 bps */ + if (is_frwd(port->serial->dev)) + return new_rate; + /* * The general purpose firmware for the Cypress M8 allows for * a maximum speed of 57600bps (I have no idea whether DeLorme @@ -448,7 +460,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port) return -ENOMEM; } - usb_reset_configuration(serial->dev); + /* Skip reset for FRWD device. It is a workaound: + device hangs if it receives SET_CONFIGURE in Configured + state. */ + if (!is_frwd(serial->dev)) + usb_reset_configuration(serial->dev); priv->cmd_ctrl = 0; priv->line_control = 0; diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h index 67cf60826884..b461311a2ae7 100644 --- a/drivers/usb/serial/cypress_m8.h +++ b/drivers/usb/serial/cypress_m8.h @@ -24,6 +24,10 @@ #define VENDOR_ID_CYPRESS 0x04b4 #define PRODUCT_ID_CYPHIDCOM 0x5500 +/* FRWD Dongle - a GPS sports watch */ +#define VENDOR_ID_FRWD 0x6737 +#define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001 + /* Powercom UPS, chip CY7C63723 */ #define VENDOR_ID_POWERCOM 0x0d9f #define PRODUCT_ID_UPS 0x0002 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 242b5776648a..7260ec660347 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -189,6 +189,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) }, { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, @@ -924,8 +926,8 @@ static int ftdi_tiocmset(struct tty_struct *tty, static int ftdi_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); static void ftdi_break_ctl(struct tty_struct *tty, int break_state); -static int ftdi_chars_in_buffer(struct tty_struct *tty); -static int ftdi_get_modem_status(struct tty_struct *tty, +static bool ftdi_tx_empty(struct usb_serial_port *port); +static int ftdi_get_modem_status(struct usb_serial_port *port, unsigned char status[2]); static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base); @@ -961,7 +963,7 @@ static struct usb_serial_driver ftdi_sio_device = { .ioctl = ftdi_ioctl, .set_termios = ftdi_set_termios, .break_ctl = ftdi_break_ctl, - .chars_in_buffer = ftdi_chars_in_buffer, + .tx_empty = ftdi_tx_empty, }; static struct usb_serial_driver * const serial_drivers[] = { @@ -2056,27 +2058,18 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state) } -static int ftdi_chars_in_buffer(struct tty_struct *tty) +static bool ftdi_tx_empty(struct usb_serial_port *port) { - struct usb_serial_port *port = tty->driver_data; - int chars; unsigned char buf[2]; int ret; - chars = usb_serial_generic_chars_in_buffer(tty); - if (chars) - goto out; - - /* Check if hardware buffer is empty. */ - ret = ftdi_get_modem_status(tty, buf); + ret = ftdi_get_modem_status(port, buf); if (ret == 2) { if (!(buf[1] & FTDI_RS_TEMT)) - chars = 1; + return false; } -out: - dev_dbg(&port->dev, "%s - %d\n", __func__, chars); - return chars; + return true; } /* old_termios contains the original termios settings and tty->termios contains @@ -2268,10 +2261,9 @@ no_c_cflag_changes: * Returns the number of status bytes retrieved (device dependant), or * negative error code. */ -static int ftdi_get_modem_status(struct tty_struct *tty, +static int ftdi_get_modem_status(struct usb_serial_port *port, unsigned char status[2]) { - struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned char *buf; int len; @@ -2336,7 +2328,7 @@ static int ftdi_tiocmget(struct tty_struct *tty) unsigned char buf[2]; int ret; - ret = ftdi_get_modem_status(tty, buf); + ret = ftdi_get_modem_status(port, buf); if (ret < 0) return ret; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 98528270c43c..6dd79253205d 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -772,6 +772,8 @@ */ #define NEWPORT_VID 0x104D #define NEWPORT_AGILIS_PID 0x3000 +#define NEWPORT_CONEX_CC_PID 0x3002 +#define NEWPORT_CONEX_AGP_PID 0x3006 /* Interbiometrics USB I/O Board */ /* Developed for Interbiometrics by Rudolf Gugler */ diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 297665fdd16d..ba45170c78e5 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -253,6 +253,37 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty) } EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer); +void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) +{ + struct usb_serial_port *port = tty->driver_data; + unsigned int bps; + unsigned long period; + unsigned long expire; + + bps = tty_get_baud_rate(tty); + if (!bps) + bps = 9600; /* B0 */ + /* + * Use a poll-period of roughly the time it takes to send one + * character or at least one jiffy. + */ + period = max_t(unsigned long, (10 * HZ / bps), 1); + period = min_t(unsigned long, period, timeout); + + dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", + __func__, jiffies_to_msecs(timeout), + jiffies_to_msecs(period)); + expire = jiffies + timeout; + while (!port->serial->type->tx_empty(port)) { + schedule_timeout_interruptible(period); + if (signal_pending(current)) + break; + if (time_after(jiffies, expire)) + break; + } +} +EXPORT_SYMBOL_GPL(usb_serial_generic_wait_until_sent); + static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port, int index, gfp_t mem_flags) { diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 158bf4bc29cc..1be6ba7bee27 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -2019,8 +2019,6 @@ static int edge_chars_in_buffer(struct tty_struct *tty) struct edgeport_port *edge_port = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; - int ret; - if (edge_port == NULL) return 0; @@ -2028,16 +2026,22 @@ static int edge_chars_in_buffer(struct tty_struct *tty) chars = kfifo_len(&edge_port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); - if (!chars) { - ret = tx_active(edge_port); - if (ret > 0) - chars = ret; - } - dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); return chars; } +static bool edge_tx_empty(struct usb_serial_port *port) +{ + struct edgeport_port *edge_port = usb_get_serial_port_data(port); + int ret; + + ret = tx_active(edge_port); + if (ret > 0) + return false; + + return true; +} + static void edge_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; @@ -2557,6 +2561,7 @@ static struct usb_serial_driver edgeport_1port_device = { .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, + .tx_empty = edge_tx_empty, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, @@ -2589,6 +2594,7 @@ static struct usb_serial_driver edgeport_2port_device = { .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, + .tx_empty = edge_tx_empty, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index 9d74c278b7b5..790673e5faa7 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -287,7 +287,7 @@ static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count) usb_bulk_msg(serial->dev, usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress), buf, - count, &actual, HZ * 1); + count, &actual, 1000); if (status != IUU_OPERATION_OK) dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status); @@ -307,7 +307,7 @@ static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count) usb_bulk_msg(serial->dev, usb_rcvbulkpipe(serial->dev, port->bulk_in_endpointAddress), buf, - count, &actual, HZ * 1); + count, &actual, 1000); if (status != IUU_OPERATION_OK) dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status); diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index eb30d7b01f36..3549d073df22 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1548,7 +1548,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial, struct keyspan_serial_private *s_priv; struct keyspan_port_private *p_priv; const struct keyspan_device_details *d_details; - int outcont_urb; struct urb *this_urb; int device_port, err; @@ -1559,7 +1558,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial, d_details = s_priv->device_details; device_port = port->number - port->serial->minor; - outcont_urb = d_details->outcont_endpoints[port->number]; this_urb = p_priv->outcont_urb; dev_dbg(&port->dev, "%s - endpoint %d\n", __func__, usb_pipeendpoint(this_urb->pipe)); @@ -1685,14 +1683,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial, err = usb_submit_urb(this_urb, GFP_ATOMIC); if (err != 0) dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err); -#if 0 - else { - dev_dbg(&port->dev, "%s - usb_submit_urb(%d) OK %d bytes (end %d)\n", __func__ - outcont_urb, this_urb->transfer_buffer_length, - usb_pipeendpoint(this_urb->pipe)); - } -#endif - return 0; } diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index cc0e54345df9..f27c621a9297 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -40,7 +40,7 @@ #define DRIVER_DESC "Moschip USB Serial Driver" /* default urb timeout */ -#define MOS_WDR_TIMEOUT (HZ * 5) +#define MOS_WDR_TIMEOUT 5000 #define MOS_MAX_PORT 0x02 #define MOS_WRITE 0x0E @@ -227,11 +227,22 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum, __u8 requesttype = (__u8)0xc0; __u16 index = get_reg_index(reg); __u16 value = get_reg_value(reg, serial_portnum); - int status = usb_control_msg(usbdev, pipe, request, requesttype, value, - index, data, 1, MOS_WDR_TIMEOUT); - if (status < 0) + u8 *buf; + int status; + + buf = kmalloc(1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + status = usb_control_msg(usbdev, pipe, request, requesttype, value, + index, buf, 1, MOS_WDR_TIMEOUT); + if (status == 1) + *data = *buf; + else if (status < 0) dev_err(&usbdev->dev, "mos7720: usb_control_msg() failed: %d", status); + kfree(buf); + return status; } @@ -1618,7 +1629,7 @@ static void change_port_settings(struct tty_struct *tty, mos7720_port->shadowMCR |= (UART_MCR_XONANY); /* To set hardware flow control to the specified * * serial port, in SP1/2_CONTROL_REG */ - if (port->number) + if (port_number) write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01); else write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02); @@ -1927,7 +1938,7 @@ static int mos7720_startup(struct usb_serial *serial) /* setting configuration feature to one */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ); + (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); /* start the interrupt urb */ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); @@ -1970,7 +1981,7 @@ static void mos7720_release(struct usb_serial *serial) /* wait for synchronous usb calls to return */ if (mos_parport->msg_pending) wait_for_completion_timeout(&mos_parport->syncmsg_compl, - MOS_WDR_TIMEOUT); + msecs_to_jiffies(MOS_WDR_TIMEOUT)); parport_remove_port(mos_parport->pp); usb_set_serial_data(serial, NULL); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index a0d5ea545982..7e998081e1cd 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -2142,13 +2142,21 @@ static int mos7840_ioctl(struct tty_struct *tty, static int mos7810_check(struct usb_serial *serial) { int i, pass_count = 0; + u8 *buf; __u16 data = 0, mcr_data = 0; __u16 test_pattern = 0x55AA; + int res; + + buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); + if (!buf) + return 0; /* failed to identify 7810 */ /* Store MCR setting */ - usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), + res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER, - &mcr_data, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); + buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); + if (res == VENDOR_READ_LENGTH) + mcr_data = *buf; for (i = 0; i < 16; i++) { /* Send the 1-bit test pattern out to MCS7810 test pin */ @@ -2158,9 +2166,12 @@ static int mos7810_check(struct usb_serial *serial) MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT); /* Read the test pattern back */ - usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), - MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data, - VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); + res = usb_control_msg(serial->dev, + usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ, + MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, + VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); + if (res == VENDOR_READ_LENGTH) + data = *buf; /* If this is a MCS7810 device, both test patterns must match */ if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001) @@ -2174,6 +2185,8 @@ static int mos7810_check(struct usb_serial *serial) MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT); + kfree(buf); + if (pass_count == 16) return 1; @@ -2183,11 +2196,17 @@ static int mos7810_check(struct usb_serial *serial) static int mos7840_calc_num_ports(struct usb_serial *serial) { __u16 data = 0x00; + u8 *buf; int mos7840_num_ports; - usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), - MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data, - VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); + buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); + if (buf) { + usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), + MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, + VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); + data = *buf; + kfree(buf); + } if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 || serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) { diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 734372846abb..bd4323ddae1a 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ +#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -249,13 +250,7 @@ static void option_instat_callback(struct urb *urb); #define ZTE_PRODUCT_MF622 0x0001 #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 -#define ZTE_PRODUCT_CDMA_TECH 0xfffe -#define ZTE_PRODUCT_AC8710 0xfff1 -#define ZTE_PRODUCT_AC2726 0xfff5 -#define ZTE_PRODUCT_AC8710T 0xffff #define ZTE_PRODUCT_MC2718 0xffe8 -#define ZTE_PRODUCT_AD3812 0xffeb -#define ZTE_PRODUCT_MC2716 0xffed #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 @@ -341,8 +336,8 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_EU3_E 0x0051 #define CINTERION_PRODUCT_EU3_P 0x0052 #define CINTERION_PRODUCT_PH8 0x0053 -#define CINTERION_PRODUCT_AH6 0x0055 -#define CINTERION_PRODUCT_PLS8 0x0060 +#define CINTERION_PRODUCT_AHXX 0x0055 +#define CINTERION_PRODUCT_PLXX 0x0060 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -494,18 +489,10 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = { .reserved = BIT(4), }; -static const struct option_blacklist_info zte_ad3812_z_blacklist = { - .sendsetup = BIT(0) | BIT(1) | BIT(2), -}; - static const struct option_blacklist_info zte_mc2718_z_blacklist = { .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), }; -static const struct option_blacklist_info zte_mc2716_z_blacklist = { - .sendsetup = BIT(1) | BIT(2) | BIT(3), -}; - static const struct option_blacklist_info huawei_cdc12_blacklist = { .reserved = BIT(1) | BIT(2), }; @@ -592,6 +579,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */ + .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, @@ -771,6 +760,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -795,7 +785,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ @@ -966,6 +955,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), @@ -1195,16 +1186,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, + /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, @@ -1264,8 +1248,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 59b32b782126..bd794b43898c 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ + {USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */ /* non Gobi Qualcomm serial devices */ {USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 0)}, /* Sierra Wireless MC7700 Device Management */ diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index cac47aef2918..c92c5ed4e580 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -101,6 +101,7 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count); static int ti_write_room(struct tty_struct *tty); static int ti_chars_in_buffer(struct tty_struct *tty); +static bool ti_tx_empty(struct usb_serial_port *port); static void ti_throttle(struct tty_struct *tty); static void ti_unthrottle(struct tty_struct *tty); static int ti_ioctl(struct tty_struct *tty, @@ -222,6 +223,7 @@ static struct usb_serial_driver ti_1port_device = { .write = ti_write, .write_room = ti_write_room, .chars_in_buffer = ti_chars_in_buffer, + .tx_empty = ti_tx_empty, .throttle = ti_throttle, .unthrottle = ti_unthrottle, .ioctl = ti_ioctl, @@ -253,6 +255,7 @@ static struct usb_serial_driver ti_2port_device = { .write = ti_write, .write_room = ti_write_room, .chars_in_buffer = ti_chars_in_buffer, + .tx_empty = ti_tx_empty, .throttle = ti_throttle, .unthrottle = ti_unthrottle, .ioctl = ti_ioctl, @@ -684,8 +687,6 @@ static int ti_chars_in_buffer(struct tty_struct *tty) struct ti_port *tport = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; - int ret; - u8 lsr; if (tport == NULL) return 0; @@ -694,16 +695,22 @@ static int ti_chars_in_buffer(struct tty_struct *tty) chars = kfifo_len(&tport->write_fifo); spin_unlock_irqrestore(&tport->tp_lock, flags); - if (!chars) { - ret = ti_get_lsr(tport, &lsr); - if (!ret && !(lsr & TI_LSR_TX_EMPTY)) - chars = 1; - } - dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); return chars; } +static bool ti_tx_empty(struct usb_serial_port *port) +{ + struct ti_port *tport = usb_get_serial_port_data(port); + int ret; + u8 lsr; + + ret = ti_get_lsr(tport, &lsr); + if (!ret && !(lsr & TI_LSR_TX_EMPTY)) + return false; + + return true; +} static void ti_throttle(struct tty_struct *tty) { diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index cf75beb1251b..5f6b1ff9d29e 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -359,20 +359,29 @@ static int serial_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; - int count = 0; dev_dbg(tty->dev, "%s\n", __func__); - mutex_lock(&serial->disc_mutex); - /* if the device was unplugged then any remaining characters - fell out of the connector ;) */ if (serial->disconnected) - count = 0; - else - count = serial->type->chars_in_buffer(tty); - mutex_unlock(&serial->disc_mutex); + return 0; - return count; + return serial->type->chars_in_buffer(tty); +} + +static void serial_wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct usb_serial_port *port = tty->driver_data; + struct usb_serial *serial = port->serial; + + dev_dbg(tty->dev, "%s\n", __func__); + + if (!port->serial->type->wait_until_sent) + return; + + mutex_lock(&serial->disc_mutex); + if (!serial->disconnected) + port->serial->type->wait_until_sent(tty, timeout); + mutex_unlock(&serial->disc_mutex); } static void serial_throttle(struct tty_struct *tty) @@ -399,7 +408,7 @@ static int serial_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; - int retval = -ENODEV; + int retval = -ENOIOCTLCMD; dev_dbg(tty->dev, "%s - cmd 0x%.4x\n", __func__, cmd); @@ -411,8 +420,6 @@ static int serial_ioctl(struct tty_struct *tty, default: if (port->serial->type->ioctl) retval = port->serial->type->ioctl(tty, cmd, arg); - else - retval = -ENOIOCTLCMD; } return retval; @@ -1191,6 +1198,7 @@ static const struct tty_operations serial_ops = { .unthrottle = serial_unthrottle, .break_ctl = serial_break, .chars_in_buffer = serial_chars_in_buffer, + .wait_until_sent = serial_wait_until_sent, .tiocmget = serial_tiocmget, .tiocmset = serial_tiocmset, .get_icount = serial_get_icount, @@ -1316,6 +1324,8 @@ static void usb_serial_operations_init(struct usb_serial_driver *device) set_to_generic_if_null(device, close); set_to_generic_if_null(device, write_room); set_to_generic_if_null(device, chars_in_buffer); + if (device->tx_empty) + set_to_generic_if_null(device, wait_until_sent); set_to_generic_if_null(device, read_bulk_callback); set_to_generic_if_null(device, write_bulk_callback); set_to_generic_if_null(device, process_read_urb); diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index 7573ec8a084f..9910aa2edf4b 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -560,10 +560,19 @@ static int treo_attach(struct usb_serial *serial) */ #define COPY_PORT(dest, src) \ do { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(src->read_urbs); ++i) { \ + dest->read_urbs[i] = src->read_urbs[i]; \ + dest->read_urbs[i]->context = dest; \ + dest->bulk_in_buffers[i] = src->bulk_in_buffers[i]; \ + } \ dest->read_urb = src->read_urb; \ dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\ dest->bulk_in_buffer = src->bulk_in_buffer; \ + dest->bulk_in_size = src->bulk_in_size; \ dest->interrupt_in_urb = src->interrupt_in_urb; \ + dest->interrupt_in_urb->context = dest; \ dest->interrupt_in_endpointAddress = \ src->interrupt_in_endpointAddress;\ dest->interrupt_in_buffer = src->interrupt_in_buffer; \ diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index b9fca3586d74..347caad47a12 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -649,7 +649,7 @@ static void firm_setup_port(struct tty_struct *tty) struct whiteheat_port_settings port_settings; unsigned int cflag = tty->termios.c_cflag; - port_settings.port = port->number + 1; + port_settings.port = port->number - port->serial->minor + 1; /* get the byte size */ switch (cflag & CSIZE) { diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c index 39ee7373b4ee..fca4c752a4ed 100644 --- a/drivers/usb/serial/zte_ev.c +++ b/drivers/usb/serial/zte_ev.c @@ -41,9 +41,6 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, int len; unsigned char *buf; - if (port->number != 0) - return -ENODEV; - buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -53,7 +50,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0001, 0x0000, NULL, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 2st cmd and recieve data */ @@ -65,7 +62,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 3 cmd */ @@ -84,7 +81,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 4 cmd */ @@ -95,7 +92,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 5 cmd */ @@ -107,7 +104,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 6 cmd */ @@ -126,7 +123,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); kfree(buf); @@ -166,9 +163,6 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) int len; unsigned char *buf; - if (port->number != 0) - return; - buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL); if (!buf) return; @@ -178,7 +172,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0002, 0x0000, NULL, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 2st ctl cmd(CTL 21 22 03 00 00 00 00 00 ) */ @@ -186,7 +180,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 3st cmd and recieve data */ @@ -198,7 +192,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 4 cmd */ @@ -217,7 +211,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 5 cmd */ @@ -228,7 +222,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 6 cmd */ @@ -240,7 +234,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 7 cmd */ @@ -259,7 +253,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 8 cmd */ @@ -270,7 +264,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, - HZ * USB_CTRL_GET_TIMEOUT); + USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); kfree(buf); @@ -279,11 +273,29 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) } static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x19d2, 0xffff) }, /* AC8700 */ - { USB_DEVICE(0x19d2, 0xfffe) }, - { USB_DEVICE(0x19d2, 0xfffd) }, /* MG880 */ + /* AC8710, AC8710T */ + { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) }, + /* AC8700 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) }, + /* MG880 */ + { USB_DEVICE(0x19d2, 0xfffd) }, + { USB_DEVICE(0x19d2, 0xfffc) }, + { USB_DEVICE(0x19d2, 0xfffb) }, + /* AC2726, AC8710_V3 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) }, + { USB_DEVICE(0x19d2, 0xfff6) }, + { USB_DEVICE(0x19d2, 0xfff7) }, + { USB_DEVICE(0x19d2, 0xfff8) }, + { USB_DEVICE(0x19d2, 0xfff9) }, + { USB_DEVICE(0x19d2, 0xffee) }, + /* AC2716, MC2716 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) }, + /* AD3812 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) }, + { USB_DEVICE(0x19d2, 0xffec) }, { USB_DEVICE(0x05C6, 0x3197) }, { USB_DEVICE(0x05C6, 0x6000) }, + { USB_DEVICE(0x05C6, 0x9008) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 8623577bbbe7..281be56d5648 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -105,8 +105,9 @@ struct rts51x_chip { int status_len; u32 flag; -#ifdef CONFIG_REALTEK_AUTOPM struct us_data *us; + +#ifdef CONFIG_REALTEK_AUTOPM struct timer_list rts51x_suspend_timer; unsigned long timer_expires; int pwr_state; @@ -988,6 +989,7 @@ static int init_realtek_cr(struct us_data *us) us->extra = chip; us->extra_destructor = realtek_cr_destructor; us->max_lun = chip->max_lun = rts51x_get_max_lun(us); + chip->us = us; usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun); @@ -1010,10 +1012,8 @@ static int init_realtek_cr(struct us_data *us) SET_AUTO_DELINK(chip); } #ifdef CONFIG_REALTEK_AUTOPM - if (ss_en) { - chip->us = us; + if (ss_en) realtek_cr_autosuspend_setup(us); - } #endif usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag); diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index acb7121a9316..6d78736563de 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1360,7 +1360,7 @@ static const struct file_operations vfio_device_fops = { */ static char *vfio_devnode(struct device *dev, umode_t *mode) { - if (MINOR(dev->devt) == 0) + if (mode && (MINOR(dev->devt) == 0)) *mode = S_IRUGO | S_IWUGO; return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 2b51e2336aa2..f80d3dd41d8c 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -155,14 +155,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) static void vhost_net_clear_ubuf_info(struct vhost_net *n) { - - bool zcopy; int i; - for (i = 0; i < n->dev.nvqs; ++i) { - zcopy = vhost_net_zcopy_mask & (0x1 << i); - if (zcopy) - kfree(n->vqs[i].ubuf_info); + for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { + kfree(n->vqs[i].ubuf_info); + n->vqs[i].ubuf_info = NULL; } } @@ -171,7 +168,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n) bool zcopy; int i; - for (i = 0; i < n->dev.nvqs; ++i) { + for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { zcopy = vhost_net_zcopy_mask & (0x1 << i); if (!zcopy) continue; @@ -183,12 +180,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n) return 0; err: - while (i--) { - zcopy = vhost_net_zcopy_mask & (0x1 << i); - if (!zcopy) - continue; - kfree(n->vqs[i].ubuf_info); - } + vhost_net_clear_ubuf_info(n); return -ENOMEM; } @@ -196,12 +188,12 @@ void vhost_net_vq_reset(struct vhost_net *n) { int i; + vhost_net_clear_ubuf_info(n); + for (i = 0; i < VHOST_NET_VQ_MAX; i++) { n->vqs[i].done_idx = 0; n->vqs[i].upend_idx = 0; n->vqs[i].ubufs = NULL; - kfree(n->vqs[i].ubuf_info); - n->vqs[i].ubuf_info = NULL; n->vqs[i].vhost_hlen = 0; n->vqs[i].sock_hlen = 0; } @@ -436,7 +428,8 @@ static void handle_tx(struct vhost_net *net) kref_get(&ubufs->kref); } nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; - } + } else + msg.msg_control = NULL; /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(NULL, sock, &msg, len); if (unlikely(err < 0)) { @@ -1053,6 +1046,10 @@ static long vhost_net_set_owner(struct vhost_net *n) int r; mutex_lock(&n->dev.mutex); + if (vhost_dev_has_owner(&n->dev)) { + r = -EBUSY; + goto out; + } r = vhost_net_set_ubuf_info(n); if (r) goto out; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index beee7f5787e6..60aa5ad09a2f 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -344,13 +344,19 @@ static int vhost_attach_cgroups(struct vhost_dev *dev) } /* Caller should have device mutex */ +bool vhost_dev_has_owner(struct vhost_dev *dev) +{ + return dev->mm; +} + +/* Caller should have device mutex */ long vhost_dev_set_owner(struct vhost_dev *dev) { struct task_struct *worker; int err; /* Is there an owner already? */ - if (dev->mm) { + if (vhost_dev_has_owner(dev)) { err = -EBUSY; goto err_mm; } diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index a7ad63592987..64adcf99ff33 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -133,6 +133,7 @@ struct vhost_dev { long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); long vhost_dev_set_owner(struct vhost_dev *dev); +bool vhost_dev_has_owner(struct vhost_dev *dev); long vhost_dev_check_owner(struct vhost_dev *); struct vhost_memory *vhost_dev_reset_owner_prepare(void); void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *); diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index bff0775e258c..5174ebac288d 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -3,6 +3,7 @@ * * Since these may be in userspace, we use (inline) accessors. */ +#include <linux/module.h> #include <linux/vringh.h> #include <linux/virtio_ring.h> #include <linux/kernel.h> @@ -1005,3 +1006,5 @@ int vringh_need_notify_kern(struct vringh *vrh) return __vringh_need_notify(vrh, getu16_kern); } EXPORT_SYMBOL(vringh_need_notify_kern); + +MODULE_LICENSE("GPL"); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index d71d60f94fc1..2e937bdace6f 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -2199,7 +2199,7 @@ config FB_XILINX config FB_GOLDFISH tristate "Goldfish Framebuffer" - depends on FB + depends on FB && HAS_DMA select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -2453,6 +2453,23 @@ config FB_HYPERV help This framebuffer driver supports Microsoft Hyper-V Synthetic Video. +config FB_SIMPLE + bool "Simple framebuffer support" + depends on (FB = y) && OF + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + Say Y if you want support for a simple frame-buffer. + + This driver assumes that the display hardware has been initialized + before the kernel boots, and the kernel will simply render to the + pre-allocated frame buffer surface. + + Configuration re: surface address, size, and format must be provided + through device tree, or potentially plain old platform data in the + future. + source "drivers/video/omap/Kconfig" source "drivers/video/omap2/Kconfig" source "drivers/video/exynos/Kconfig" diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 7234e4a959e8..e8bae8dd4804 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -166,6 +166,7 @@ obj-$(CONFIG_FB_MX3) += mx3fb.o obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o obj-$(CONFIG_FB_MXS) += mxsfb.o obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o +obj-$(CONFIG_FB_SIMPLE) += simplefb.o # the test framebuffer is last obj-$(CONFIG_FB_VIRTUAL) += vfb.o diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 540909de6247..effdb373b8db 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -223,8 +223,14 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo) static void exit_backlight(struct atmel_lcdfb_info *sinfo) { - if (sinfo->backlight) - backlight_device_unregister(sinfo->backlight); + if (!sinfo->backlight) + return; + + if (sinfo->backlight->ops) { + sinfo->backlight->props.power = FB_BLANK_POWERDOWN; + sinfo->backlight->ops->update_status(sinfo->backlight); + } + backlight_device_unregister(sinfo->backlight); } #else @@ -461,8 +467,11 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var, if (info->fix.smem_len) { unsigned int smem_len = (var->xres_virtual * var->yres_virtual * ((var->bits_per_pixel + 7) / 8)); - if (smem_len > info->fix.smem_len) + if (smem_len > info->fix.smem_len) { + dev_err(dev, "Frame buffer is too small (%u) for screen size (need at least %u)\n", + info->fix.smem_len, smem_len); return -EINVAL; + } } /* Saturate vertical and horizontal timings at maximum values */ diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile index a862e9173ebe..48da25c96cd3 100644 --- a/drivers/video/console/Makefile +++ b/drivers/video/console/Makefile @@ -18,6 +18,8 @@ font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o font-objs += $(font-objs-y) +obj-$(CONFIG_FONTS) += font.o + # Each configuration option enables a list of files. obj-$(CONFIG_DUMMY_CONSOLE) += dummycon.o diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index 60cc6fee6548..c9c2252e3719 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c @@ -53,6 +53,8 @@ static char *def_disp_name; module_param_named(def_disp, def_disp_name, charp, 0); MODULE_PARM_DESC(def_disp, "default display name"); +static bool dss_initialized; + const char *omapdss_get_default_display_name(void) { return core.default_display_name; @@ -66,6 +68,12 @@ enum omapdss_version omapdss_get_version(void) } EXPORT_SYMBOL(omapdss_get_version); +bool omapdss_is_initialized(void) +{ + return dss_initialized; +} +EXPORT_SYMBOL(omapdss_is_initialized); + struct platform_device *dss_get_core_pdev(void) { return core.pdev; @@ -603,6 +611,8 @@ static int __init omap_dss_init(void) return r; } + dss_initialized = true; + return 0; } @@ -633,7 +643,15 @@ static int __init omap_dss_init(void) static int __init omap_dss_init2(void) { - return omap_dss_register_drivers(); + int r; + + r = omap_dss_register_drivers(); + if (r) + return r; + + dss_initialized = true; + + return 0; } core_initcall(omap_dss_init); diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index 17f4d55c621c..a109934c0478 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -1065,10 +1065,6 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev) mutex_init(&hdmi.ip_data.lock); res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0); - if (!res) { - DSSERR("can't get IORESOURCE_MEM HDMI\n"); - return -EINVAL; - } /* Base address taken from platform */ hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index c84bb8a4d0c4..856917b33616 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -2416,6 +2416,9 @@ static int omapfb_probe(struct platform_device *pdev) DBG("omapfb_probe\n"); + if (omapdss_is_initialized() == false) + return -EPROBE_DEFER; + if (pdev->num_resources != 0) { dev_err(&pdev->dev, "probed for an unknown device\n"); r = -ENODEV; diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c index 5261229c79af..f346b02eee1d 100644 --- a/drivers/video/omap2/vrfb.c +++ b/drivers/video/omap2/vrfb.c @@ -353,11 +353,6 @@ static int __init vrfb_probe(struct platform_device *pdev) /* first resource is the register res, the rest are vrfb contexts */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - dev_err(&pdev->dev, "can't get vrfb base address\n"); - return -EINVAL; - } - vrfb_base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(vrfb_base)) return PTR_ERR(vrfb_base); diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c index d9f08c653d62..dbfe2c18a434 100644 --- a/drivers/video/ps3fb.c +++ b/drivers/video/ps3fb.c @@ -710,7 +710,7 @@ static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma) r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len); dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n", - info->fix.smem_start + vma->vm_pgoff << PAGE_SHIFT, + info->fix.smem_start + (vma->vm_pgoff << PAGE_SHIFT), vma->vm_start); return r; diff --git a/drivers/video/simplefb.c b/drivers/video/simplefb.c new file mode 100644 index 000000000000..e2e9e3e61b72 --- /dev/null +++ b/drivers/video/simplefb.c @@ -0,0 +1,234 @@ +/* + * Simplest possible simple frame-buffer driver, as a platform device + * + * Copyright (c) 2013, Stephen Warren + * + * Based on q40fb.c, which was: + * Copyright (C) 2001 Richard Zidlicky <rz@linux-m68k.org> + * + * Also based on offb.c, which was: + * Copyright (C) 1997 Geert Uytterhoeven + * Copyright (C) 1996 Paul Mackerras + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/errno.h> +#include <linux/fb.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +static struct fb_fix_screeninfo simplefb_fix = { + .id = "simple", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .accel = FB_ACCEL_NONE, +}; + +static struct fb_var_screeninfo simplefb_var = { + .height = -1, + .width = -1, + .activate = FB_ACTIVATE_NOW, + .vmode = FB_VMODE_NONINTERLACED, +}; + +static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *info) +{ + u32 *pal = info->pseudo_palette; + u32 cr = red >> (16 - info->var.red.length); + u32 cg = green >> (16 - info->var.green.length); + u32 cb = blue >> (16 - info->var.blue.length); + u32 value; + + if (regno >= 16) + return -EINVAL; + + value = (cr << info->var.red.offset) | + (cg << info->var.green.offset) | + (cb << info->var.blue.offset); + if (info->var.transp.length > 0) { + u32 mask = (1 << info->var.transp.length) - 1; + mask <<= info->var.transp.offset; + value |= mask; + } + pal[regno] = value; + + return 0; +} + +static struct fb_ops simplefb_ops = { + .owner = THIS_MODULE, + .fb_setcolreg = simplefb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, +}; + +struct simplefb_format { + const char *name; + u32 bits_per_pixel; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; +}; + +static struct simplefb_format simplefb_formats[] = { + { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0} }, +}; + +struct simplefb_params { + u32 width; + u32 height; + u32 stride; + struct simplefb_format *format; +}; + +static int simplefb_parse_dt(struct platform_device *pdev, + struct simplefb_params *params) +{ + struct device_node *np = pdev->dev.of_node; + int ret; + const char *format; + int i; + + ret = of_property_read_u32(np, "width", ¶ms->width); + if (ret) { + dev_err(&pdev->dev, "Can't parse width property\n"); + return ret; + } + + ret = of_property_read_u32(np, "height", ¶ms->height); + if (ret) { + dev_err(&pdev->dev, "Can't parse height property\n"); + return ret; + } + + ret = of_property_read_u32(np, "stride", ¶ms->stride); + if (ret) { + dev_err(&pdev->dev, "Can't parse stride property\n"); + return ret; + } + + ret = of_property_read_string(np, "format", &format); + if (ret) { + dev_err(&pdev->dev, "Can't parse format property\n"); + return ret; + } + params->format = NULL; + for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) { + if (strcmp(format, simplefb_formats[i].name)) + continue; + params->format = &simplefb_formats[i]; + break; + } + if (!params->format) { + dev_err(&pdev->dev, "Invalid format value\n"); + return -EINVAL; + } + + return 0; +} + +static int simplefb_probe(struct platform_device *pdev) +{ + int ret; + struct simplefb_params params; + struct fb_info *info; + struct resource *mem; + + if (fb_get_options("simplefb", NULL)) + return -ENODEV; + + ret = simplefb_parse_dt(pdev, ¶ms); + if (ret) + return ret; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "No memory resource\n"); + return -EINVAL; + } + + info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev); + if (!info) + return -ENOMEM; + platform_set_drvdata(pdev, info); + + info->fix = simplefb_fix; + info->fix.smem_start = mem->start; + info->fix.smem_len = resource_size(mem); + info->fix.line_length = params.stride; + + info->var = simplefb_var; + info->var.xres = params.width; + info->var.yres = params.height; + info->var.xres_virtual = params.width; + info->var.yres_virtual = params.height; + info->var.bits_per_pixel = params.format->bits_per_pixel; + info->var.red = params.format->red; + info->var.green = params.format->green; + info->var.blue = params.format->blue; + info->var.transp = params.format->transp; + + info->fbops = &simplefb_ops; + info->flags = FBINFO_DEFAULT; + info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start, + info->fix.smem_len); + if (!info->screen_base) { + framebuffer_release(info); + return -ENODEV; + } + info->pseudo_palette = (void *)(info + 1); + + ret = register_framebuffer(info); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); + framebuffer_release(info); + return ret; + } + + dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); + + return 0; +} + +static int simplefb_remove(struct platform_device *pdev) +{ + struct fb_info *info = platform_get_drvdata(pdev); + + unregister_framebuffer(info); + framebuffer_release(info); + + return 0; +} + +static const struct of_device_id simplefb_of_match[] = { + { .compatible = "simple-framebuffer", }, + { }, +}; +MODULE_DEVICE_TABLE(of, simplefb_of_match); + +static struct platform_driver simplefb_driver = { + .driver = { + .name = "simple-framebuffer", + .owner = THIS_MODULE, + .of_match_table = simplefb_of_match, + }, + .probe = simplefb_probe, + .remove = simplefb_remove, +}; +module_platform_driver(simplefb_driver); + +MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>"); +MODULE_DESCRIPTION("Simple framebuffer driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index db2390aed387..6e94d8dd3d00 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c @@ -555,11 +555,6 @@ static int omap_hdq_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hdq_data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_dbg(&pdev->dev, "unable to get resource\n"); - return -ENXIO; - } - hdq_data->hdq_base = devm_ioremap_resource(dev, res); if (IS_ERR(hdq_data->hdq_base)) return PTR_ERR(hdq_data->hdq_base); diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c index d184c48a0482..37cb09b27b63 100644 --- a/drivers/watchdog/ath79_wdt.c +++ b/drivers/watchdog/ath79_wdt.c @@ -248,11 +248,6 @@ static int ath79_wdt_probe(struct platform_device *pdev) return -EBUSY; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "no memory resource found\n"); - return -EINVAL; - } - wdt_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(wdt_base)) return PTR_ERR(wdt_base); diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index 100d4fbfde2a..bead7740c86a 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c @@ -217,11 +217,6 @@ static int davinci_wdt_probe(struct platform_device *pdev) dev_info(dev, "heartbeat %d sec\n", heartbeat); wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (wdt_mem == NULL) { - dev_err(dev, "failed to get memory region resource\n"); - return -ENOENT; - } - wdt_base = devm_ioremap_resource(dev, wdt_mem); if (IS_ERR(wdt_base)) return PTR_ERR(wdt_base); diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index ff908823688c..62946c2cb4f8 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -257,11 +257,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "can't get device resources\n"); - return -ENODEV; - } - imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(imx2_wdt.base)) return PTR_ERR(imx2_wdt.base); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index dd4d9cb86243..9e02d60a364b 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -19,11 +19,10 @@ config XEN_SELFBALLOONING by the current usage of anonymous memory ("committed AS") and controlled by various sysfs-settable parameters. Configuring FRONTSWAP is highly recommended; if it is not configured, self- - ballooning is disabled by default but can be enabled with the - 'selfballooning' kernel boot parameter. If FRONTSWAP is configured, + ballooning is disabled by default. If FRONTSWAP is configured, frontswap-selfshrinking is enabled by default but can be disabled - with the 'noselfshrink' kernel boot parameter; and self-ballooning - is enabled by default but can be disabled with the 'noselfballooning' + with the 'tmem.selfshrink=0' kernel boot parameter; and self-ballooning + is enabled by default but can be disabled with the 'tmem.selfballooning=0' kernel boot parameter. Note that systems without a sufficiently large swap device should not enable self-ballooning. @@ -141,7 +140,7 @@ config XEN_GRANT_DEV_ALLOC config SWIOTLB_XEN def_bool y - depends on PCI + depends on PCI && X86 select SWIOTLB config XEN_TMEM diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index a56776dbe095..930fb6817901 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -407,7 +407,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { - if ((page = alloc_page(gfp)) == NULL) { + page = alloc_page(gfp); + if (page == NULL) { nr_pages = i; state = BP_EAGAIN; break; diff --git a/drivers/xen/events.c b/drivers/xen/events.c index d8cc8127f19c..6a6bbe4ede92 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -167,6 +167,8 @@ static void xen_irq_info_common_init(struct irq_info *info, info->cpu = cpu; evtchn_to_irq[evtchn] = irq; + + irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); } static void xen_irq_info_evtchn_init(unsigned irq, @@ -874,7 +876,6 @@ int bind_evtchn_to_irq(unsigned int evtchn) struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_EVTCHN); } - irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); out: mutex_unlock(&irq_mapping_update_lock); diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index ca2b00e9d558..2cfc24d76fc5 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -504,7 +504,7 @@ static void privcmd_close(struct vm_area_struct *vma) struct page **pages = vma->vm_private_data; int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages)) + if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) return; xen_unmap_domain_mfn_range(vma, numpgs, pages); diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index e3600be4e7fa..0f0493c63371 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c @@ -11,11 +11,7 @@ #include <linux/init.h> #include <linux/pagemap.h> #include <linux/cleancache.h> - -/* temporary ifdef until include/linux/frontswap.h is upstream */ -#ifdef CONFIG_FRONTSWAP #include <linux/frontswap.h> -#endif #include <xen/xen.h> #include <xen/interface/xen.h> @@ -24,6 +20,36 @@ #include <asm/xen/hypervisor.h> #include <xen/tmem.h> +#ifndef CONFIG_XEN_TMEM_MODULE +bool __read_mostly tmem_enabled = false; + +static int __init enable_tmem(char *s) +{ + tmem_enabled = true; + return 1; +} +__setup("tmem", enable_tmem); +#endif + +#ifdef CONFIG_CLEANCACHE +static bool cleancache __read_mostly = true; +module_param(cleancache, bool, S_IRUGO); +static bool selfballooning __read_mostly = true; +module_param(selfballooning, bool, S_IRUGO); +#endif /* CONFIG_CLEANCACHE */ + +#ifdef CONFIG_FRONTSWAP +static bool frontswap __read_mostly = true; +module_param(frontswap, bool, S_IRUGO); +#else /* CONFIG_FRONTSWAP */ +#define frontswap (0) +#endif /* CONFIG_FRONTSWAP */ + +#ifdef CONFIG_XEN_SELFBALLOONING +static bool selfshrinking __read_mostly = true; +module_param(selfshrinking, bool, S_IRUGO); +#endif /* CONFIG_XEN_SELFBALLOONING */ + #define TMEM_CONTROL 0 #define TMEM_NEW_POOL 1 #define TMEM_DESTROY_POOL 2 @@ -129,16 +155,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid) return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); } -#ifndef CONFIG_XEN_TMEM_MODULE -bool __read_mostly tmem_enabled = false; - -static int __init enable_tmem(char *s) -{ - tmem_enabled = true; - return 1; -} -__setup("tmem", enable_tmem); -#endif #ifdef CONFIG_CLEANCACHE static int xen_tmem_destroy_pool(u32 pool_id) @@ -230,20 +246,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize) return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); } -static bool disable_cleancache __read_mostly; -static bool disable_selfballooning __read_mostly; -#ifdef CONFIG_XEN_TMEM_MODULE -module_param(disable_cleancache, bool, S_IRUGO); -module_param(disable_selfballooning, bool, S_IRUGO); -#else -static int __init no_cleancache(char *s) -{ - disable_cleancache = true; - return 1; -} -__setup("nocleancache", no_cleancache); -#endif - static struct cleancache_ops tmem_cleancache_ops = { .put_page = tmem_cleancache_put_page, .get_page = tmem_cleancache_get_page, @@ -361,20 +363,6 @@ static void tmem_frontswap_init(unsigned ignored) xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); } -static bool disable_frontswap __read_mostly; -static bool disable_frontswap_selfshrinking __read_mostly; -#ifdef CONFIG_XEN_TMEM_MODULE -module_param(disable_frontswap, bool, S_IRUGO); -module_param(disable_frontswap_selfshrinking, bool, S_IRUGO); -#else -static int __init no_frontswap(char *s) -{ - disable_frontswap = true; - return 1; -} -__setup("nofrontswap", no_frontswap); -#endif - static struct frontswap_ops tmem_frontswap_ops = { .store = tmem_frontswap_store, .load = tmem_frontswap_load, @@ -382,8 +370,6 @@ static struct frontswap_ops tmem_frontswap_ops = { .invalidate_area = tmem_frontswap_flush_area, .init = tmem_frontswap_init }; -#else /* CONFIG_FRONTSWAP */ -#define disable_frontswap_selfshrinking 1 #endif static int xen_tmem_init(void) @@ -391,12 +377,12 @@ static int xen_tmem_init(void) if (!xen_domain()) return 0; #ifdef CONFIG_FRONTSWAP - if (tmem_enabled && !disable_frontswap) { + if (tmem_enabled && frontswap) { char *s = ""; - struct frontswap_ops *old_ops = - frontswap_register_ops(&tmem_frontswap_ops); + struct frontswap_ops *old_ops; tmem_frontswap_poolid = -1; + old_ops = frontswap_register_ops(&tmem_frontswap_ops); if (IS_ERR(old_ops) || old_ops) { if (IS_ERR(old_ops)) return PTR_ERR(old_ops); @@ -408,7 +394,7 @@ static int xen_tmem_init(void) #endif #ifdef CONFIG_CLEANCACHE BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); - if (tmem_enabled && !disable_cleancache) { + if (tmem_enabled && cleancache) { char *s = ""; struct cleancache_ops *old_ops = cleancache_register_ops(&tmem_cleancache_ops); @@ -419,8 +405,15 @@ static int xen_tmem_init(void) } #endif #ifdef CONFIG_XEN_SELFBALLOONING - xen_selfballoon_init(!disable_selfballooning, - !disable_frontswap_selfshrinking); + /* + * There is no point of driving pages to the swap system if they + * aren't going anywhere in tmem universe. + */ + if (!frontswap) { + selfshrinking = false; + selfballooning = false; + } + xen_selfballoon_init(selfballooning, selfshrinking); #endif return 0; } diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index a2278ba7fb27..4e8ba38aa0c9 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -106,7 +106,7 @@ static void pcistub_device_release(struct kref *kref) else pci_restore_state(dev); - if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { + if (dev->msix_cap) { struct physdev_pci_device ppdev = { .seg = pci_domain_nr(dev->bus), .bus = dev->bus->number, @@ -371,7 +371,7 @@ static int pcistub_init_device(struct pci_dev *dev) if (err) goto config_release; - if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { + if (dev->msix_cap) { struct physdev_pci_device ppdev = { .seg = pci_domain_nr(dev->bus), .bus = dev->bus->number, diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index f2ef569c7cc1..f70984a892aa 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c @@ -53,15 +53,12 @@ * System configuration note: Selfballooning should not be enabled on * systems without a sufficiently large swap device configured; for best * results, it is recommended that total swap be increased by the size - * of the guest memory. Also, while technically not required to be - * configured, it is highly recommended that frontswap also be configured - * and enabled when selfballooning is running. So, selfballooning - * is disabled by default if frontswap is not configured and can only - * be enabled with the "selfballooning" kernel boot option; similarly - * selfballooning is enabled by default if frontswap is configured and - * can be disabled with the "noselfballooning" kernel boot option. Finally, - * when frontswap is configured, frontswap-selfshrinking can be disabled - * with the "noselfshrink" kernel boot option. + * of the guest memory. Note, that selfballooning should be disabled by default + * if frontswap is not configured. Similarly selfballooning should be enabled + * by default if frontswap is configured and can be disabled with the + * "tmem.selfballooning=0" kernel boot option. Finally, when frontswap is + * configured, frontswap-selfshrinking can be disabled with the + * "tmem.selfshrink=0" kernel boot option. * * Selfballooning is disallowed in domain0 and force-disabled. * @@ -120,9 +117,6 @@ static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process); /* Enable/disable with sysfs. */ static bool frontswap_selfshrinking __read_mostly; -/* Enable/disable with kernel boot option. */ -static bool use_frontswap_selfshrink = true; - /* * The default values for the following parameters were deemed reasonable * by experimentation, may be workload-dependent, and can all be @@ -176,35 +170,6 @@ static void frontswap_selfshrink(void) frontswap_shrink(tgt_frontswap_pages); } -static int __init xen_nofrontswap_selfshrink_setup(char *s) -{ - use_frontswap_selfshrink = false; - return 1; -} - -__setup("noselfshrink", xen_nofrontswap_selfshrink_setup); - -/* Disable with kernel boot option. */ -static bool use_selfballooning = true; - -static int __init xen_noselfballooning_setup(char *s) -{ - use_selfballooning = false; - return 1; -} - -__setup("noselfballooning", xen_noselfballooning_setup); -#else /* !CONFIG_FRONTSWAP */ -/* Enable with kernel boot option. */ -static bool use_selfballooning; - -static int __init xen_selfballooning_setup(char *s) -{ - use_selfballooning = true; - return 1; -} - -__setup("selfballooning", xen_selfballooning_setup); #endif /* CONFIG_FRONTSWAP */ #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 61786be9138b..ec097d6f964d 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -534,7 +534,7 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); if (err) - goto out_err; + goto out_err_free_ballooned_pages; spin_lock(&xenbus_valloc_lock); list_add(&node->next, &xenbus_valloc_pages); @@ -543,8 +543,9 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, *vaddr = addr; return 0; - out_err: + out_err_free_ballooned_pages: free_xenballooned_pages(1, &node->page); + out_err: kfree(node); return err; } diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h index c8abd3b8a6c4..e74f9c1fbd80 100644 --- a/drivers/xen/xenbus/xenbus_comms.h +++ b/drivers/xen/xenbus/xenbus_comms.h @@ -45,6 +45,7 @@ int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; +extern enum xenstore_init xen_store_domain_type; extern const struct file_operations xen_xenbus_fops; diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c index d73000800762..a6f42fc01407 100644 --- a/drivers/xen/xenbus/xenbus_dev_backend.c +++ b/drivers/xen/xenbus/xenbus_dev_backend.c @@ -70,22 +70,21 @@ static long xenbus_alloc(domid_t domid) return err; } -static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data) +static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, + unsigned long data) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; switch (cmd) { - case IOCTL_XENBUS_BACKEND_EVTCHN: - if (xen_store_evtchn > 0) - return xen_store_evtchn; - return -ENODEV; - - case IOCTL_XENBUS_BACKEND_SETUP: - return xenbus_alloc(data); - - default: - return -ENOTTY; + case IOCTL_XENBUS_BACKEND_EVTCHN: + if (xen_store_evtchn > 0) + return xen_store_evtchn; + return -ENODEV; + case IOCTL_XENBUS_BACKEND_SETUP: + return xenbus_alloc(data); + default: + return -ENOTTY; } } diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 3325884c693f..56cfaaa9d006 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -69,6 +69,9 @@ EXPORT_SYMBOL_GPL(xen_store_evtchn); struct xenstore_domain_interface *xen_store_interface; EXPORT_SYMBOL_GPL(xen_store_interface); +enum xenstore_init xen_store_domain_type; +EXPORT_SYMBOL_GPL(xen_store_domain_type); + static unsigned long xen_store_mfn; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); @@ -719,17 +722,11 @@ static int __init xenstored_local_init(void) return err; } -enum xenstore_init { - UNKNOWN, - PV, - HVM, - LOCAL, -}; static int __init xenbus_init(void) { int err = 0; - enum xenstore_init usage = UNKNOWN; uint64_t v = 0; + xen_store_domain_type = XS_UNKNOWN; if (!xen_domain()) return -ENODEV; @@ -737,29 +734,29 @@ static int __init xenbus_init(void) xenbus_ring_ops_init(); if (xen_pv_domain()) - usage = PV; + xen_store_domain_type = XS_PV; if (xen_hvm_domain()) - usage = HVM; + xen_store_domain_type = XS_HVM; if (xen_hvm_domain() && xen_initial_domain()) - usage = LOCAL; + xen_store_domain_type = XS_LOCAL; if (xen_pv_domain() && !xen_start_info->store_evtchn) - usage = LOCAL; + xen_store_domain_type = XS_LOCAL; if (xen_pv_domain() && xen_start_info->store_evtchn) xenstored_ready = 1; - switch (usage) { - case LOCAL: + switch (xen_store_domain_type) { + case XS_LOCAL: err = xenstored_local_init(); if (err) goto out_error; xen_store_interface = mfn_to_virt(xen_store_mfn); break; - case PV: + case XS_PV: xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); break; - case HVM: + case XS_HVM: err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto out_error; diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h index bb4f92ed8730..146f857a36f8 100644 --- a/drivers/xen/xenbus/xenbus_probe.h +++ b/drivers/xen/xenbus/xenbus_probe.h @@ -47,6 +47,13 @@ struct xen_bus_type { struct bus_type bus; }; +enum xenstore_init { + XS_UNKNOWN, + XS_PV, + XS_HVM, + XS_LOCAL, +}; + extern struct device_attribute xenbus_dev_attrs[]; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index 3159a37d966d..a7e25073de19 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -29,6 +29,8 @@ #include "xenbus_probe.h" +static struct workqueue_struct *xenbus_frontend_wq; + /* device/<type>/<id> => <type>-<id> */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { @@ -89,9 +91,40 @@ static void backend_changed(struct xenbus_watch *watch, xenbus_otherend_changed(watch, vec, len, 1); } +static void xenbus_frontend_delayed_resume(struct work_struct *w) +{ + struct xenbus_device *xdev = container_of(w, struct xenbus_device, work); + + xenbus_dev_resume(&xdev->dev); +} + +static int xenbus_frontend_dev_resume(struct device *dev) +{ + /* + * If xenstored is running in this domain, we cannot access the backend + * state at the moment, so we need to defer xenbus_dev_resume + */ + if (xen_store_domain_type == XS_LOCAL) { + struct xenbus_device *xdev = to_xenbus_device(dev); + + if (!xenbus_frontend_wq) { + pr_err("%s: no workqueue to process delayed resume\n", + xdev->nodename); + return -EFAULT; + } + + INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume); + queue_work(xenbus_frontend_wq, &xdev->work); + + return 0; + } + + return xenbus_dev_resume(dev); +} + static const struct dev_pm_ops xenbus_pm_ops = { .suspend = xenbus_dev_suspend, - .resume = xenbus_dev_resume, + .resume = xenbus_frontend_dev_resume, .freeze = xenbus_dev_suspend, .thaw = xenbus_dev_cancel, .restore = xenbus_dev_resume, @@ -440,6 +473,8 @@ static int __init xenbus_probe_frontend_init(void) register_xenstore_notifier(&xenstore_notifier); + xenbus_frontend_wq = create_workqueue("xenbus_frontend"); + return 0; } subsys_initcall(xenbus_probe_frontend_init); |