diff options
Diffstat (limited to 'drivers')
223 files changed, 3094 insertions, 2259 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 92ed9692c47e..4bf68c8d4797 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD config ACPI_BGRT bool "Boottime Graphics Resource Table support" - depends on EFI + depends on EFI && X86 help This driver adds support for exposing the ACPI Boottime Graphics Resource Table, which allows the operating system to obtain diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c index 82045e3f5cac..a82c7626aa9b 100644 --- a/drivers/acpi/acpi_i2c.c +++ b/drivers/acpi/acpi_i2c.c @@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter) acpi_handle handle; acpi_status status; - handle = ACPI_HANDLE(&adapter->dev); + handle = ACPI_HANDLE(adapter->dev.parent); if (!handle) return; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 5ff173066127..6ae5e440436e 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -415,7 +415,6 @@ static int acpi_pci_root_add(struct acpi_device *device, struct acpi_pci_root *root; struct acpi_pci_driver *driver; u32 flags, base_flags; - bool is_osc_granted = false; root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); if (!root) @@ -476,6 +475,30 @@ static int acpi_pci_root_add(struct acpi_device *device, flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; acpi_pci_osc_support(root, flags); + /* + * TBD: Need PCI interface for enumeration/configuration of roots. + */ + + mutex_lock(&acpi_pci_root_lock); + list_add_tail(&root->node, &acpi_pci_roots); + mutex_unlock(&acpi_pci_root_lock); + + /* + * Scan the Root Bridge + * -------------------- + * Must do this prior to any attempt to bind the root device, as the + * PCI namespace does not get created until this call is made (and + * thus the root bridge's pci_dev does not exist). + */ + root->bus = pci_acpi_scan_root(root); + if (!root->bus) { + printk(KERN_ERR PREFIX + "Bus %04x:%02x not present in PCI namespace\n", + root->segment, (unsigned int)root->secondary.start); + result = -ENODEV; + goto out_del_root; + } + /* Indicate support for various _OSC capabilities. */ if (pci_ext_cfg_avail()) flags |= OSC_EXT_PCI_CONFIG_SUPPORT; @@ -494,6 +517,7 @@ static int acpi_pci_root_add(struct acpi_device *device, flags = base_flags; } } + if (!pcie_ports_disabled && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL @@ -514,54 +538,28 @@ static int acpi_pci_root_add(struct acpi_device *device, status = acpi_pci_osc_control_set(device->handle, &flags, OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); if (ACPI_SUCCESS(status)) { - is_osc_granted = true; dev_info(&device->dev, "ACPI _OSC control (0x%02x) granted\n", flags); + if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { + /* + * We have ASPM control, but the FADT indicates + * that it's unsupported. Clear it. + */ + pcie_clear_aspm(root->bus); + } } else { - is_osc_granted = false; dev_info(&device->dev, "ACPI _OSC request failed (%s), " "returned control mask: 0x%02x\n", acpi_format_exception(status), flags); + pr_info("ACPI _OSC control for PCIe not granted, " + "disabling ASPM\n"); + pcie_no_aspm(); } } else { dev_info(&device->dev, - "Unable to request _OSC control " - "(_OSC support mask: 0x%02x)\n", flags); - } - - /* - * TBD: Need PCI interface for enumeration/configuration of roots. - */ - - mutex_lock(&acpi_pci_root_lock); - list_add_tail(&root->node, &acpi_pci_roots); - mutex_unlock(&acpi_pci_root_lock); - - /* - * Scan the Root Bridge - * -------------------- - * Must do this prior to any attempt to bind the root device, as the - * PCI namespace does not get created until this call is made (and - * thus the root bridge's pci_dev does not exist). - */ - root->bus = pci_acpi_scan_root(root); - if (!root->bus) { - printk(KERN_ERR PREFIX - "Bus %04x:%02x not present in PCI namespace\n", - root->segment, (unsigned int)root->secondary.start); - result = -ENODEV; - goto out_del_root; - } - - /* ASPM setting */ - if (is_osc_granted) { - if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) - pcie_clear_aspm(root->bus); - } else { - pr_info("ACPI _OSC control for PCIe not granted, " - "disabling ASPM\n"); - pcie_no_aspm(); + "Unable to request _OSC control " + "(_OSC support mask: 0x%02x)\n", flags); } pci_acpi_add_bus_pm_notifier(device, root->bus); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index fc95308e9a11..ee255c60bdac 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -66,7 +66,8 @@ module_param(latency_factor, uint, 0644); static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); -static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX]; +static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], + acpi_cstate); static int disabled_by_idle_boot_param(void) { @@ -722,7 +723,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; - struct acpi_processor_cx *cx = acpi_cstate[index]; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); @@ -745,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, */ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) { - struct acpi_processor_cx *cx = acpi_cstate[index]; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); ACPI_FLUSH_CPU_CACHE(); @@ -775,7 +776,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; - struct acpi_processor_cx *cx = acpi_cstate[index]; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); @@ -833,7 +834,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; - struct acpi_processor_cx *cx = acpi_cstate[index]; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); @@ -960,7 +961,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) continue; #endif - acpi_cstate[count] = cx; + per_cpu(acpi_cstate[count], dev->cpu) = cx; count++; if (count == CPUIDLE_STATE_MAX) diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index ffdd32d22602..2f48123d74c4 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -150,6 +150,7 @@ enum piix_controller_ids { tolapai_sata, piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ ich8_sata_snb, + ich8_2port_sata_snb, }; struct piix_map_db { @@ -304,7 +305,7 @@ static const struct pci_device_id piix_pci_tbl[] = { /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Lynx Point) */ - { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point-LP) */ @@ -439,6 +440,7 @@ static const struct piix_map_db *piix_map_db_table[] = { [ich8m_apple_sata] = &ich8m_apple_map_db, [tolapai_sata] = &tolapai_map_db, [ich8_sata_snb] = &ich8_map_db, + [ich8_2port_sata_snb] = &ich8_2port_map_db, }; static struct pci_bits piix_enable_bits[] = { @@ -1242,6 +1244,16 @@ static struct ata_port_info piix_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, + + [ich8_2port_sata_snb] = + { + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR + | PIIX_FLAG_PIO16, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, }; #define AHCI_PCI_BAR 5 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 497adea1f0d6..63c743baf920 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev) * from SATA Settings page of Identify Device Data Log. */ if (ata_id_has_devslp(dev->id)) { - u8 sata_setting[ATA_SECT_SIZE]; + u8 *sata_setting = ap->sector_buf; int i, j; dev->flags |= ATA_DFLAG_DEVSLP; @@ -2439,6 +2439,9 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); + if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) + dev->max_sectors = ATA_MAX_SECTORS_LBA48; + if (ap->ops->dev_config) ap->ops->dev_config(dev); @@ -4100,6 +4103,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, + { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, /* Devices we expect to fail diagnostics */ diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 318b41358187..ff44787e5a45 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) struct scsi_sense_hdr sshdr; scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sshdr); - if (sshdr.sense_key == 0 && - sshdr.asc == 0 && sshdr.ascq == 0) + if (sshdr.sense_key == RECOVERED_ERROR && + sshdr.asc == 0 && sshdr.ascq == 0x1d) cmd_result &= ~SAM_STAT_CHECK_CONDITION; } @@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) struct scsi_sense_hdr sshdr; scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sshdr); - if (sshdr.sense_key == 0 && - sshdr.asc == 0 && sshdr.ascq == 0) + if (sshdr.sense_key == RECOVERED_ERROR && + sshdr.asc == 0 && sshdr.ascq == 0x1d) cmd_result &= ~SAM_STAT_CHECK_CONDITION; } diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5f74587ef258..71671c42ef45 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -46,6 +46,7 @@ #include "power.h" static DEFINE_MUTEX(dev_pm_qos_mtx); +static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); @@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev) struct pm_qos_constraints *c; struct pm_qos_flags *f; - mutex_lock(&dev_pm_qos_mtx); + mutex_lock(&dev_pm_qos_sysfs_mtx); /* * If the device's PM QoS resume latency limit or PM QoS flags have been * exposed to user space, they have to be hidden at this point. */ + pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_flags(dev); + + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_hide_latency_limit(dev); __dev_pm_qos_hide_flags(dev); @@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev) out: mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); } /** @@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, kfree(req); } +static void dev_pm_qos_drop_user_request(struct device *dev, + enum dev_pm_qos_req_type type) +{ + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_drop_user_request(dev, type); + mutex_unlock(&dev_pm_qos_mtx); +} + /** * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. * @dev: Device whose PM QoS latency limit is to be exposed to user space. @@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) return ret; } + mutex_lock(&dev_pm_qos_sysfs_mtx); + mutex_lock(&dev_pm_qos_mtx); if (IS_ERR_OR_NULL(dev->power.qos)) @@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (ret < 0) { __dev_pm_qos_remove_request(req); kfree(req); + mutex_unlock(&dev_pm_qos_mtx); goto out; } - dev->power.qos->latency_req = req; + + mutex_unlock(&dev_pm_qos_mtx); + ret = pm_qos_sysfs_add_latency(dev); if (ret) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); out: - mutex_unlock(&dev_pm_qos_mtx); + mutex_unlock(&dev_pm_qos_sysfs_mtx); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); static void __dev_pm_qos_hide_latency_limit(struct device *dev) { - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { - pm_qos_sysfs_remove_latency(dev); + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); - } } /** @@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev) */ void dev_pm_qos_hide_latency_limit(struct device *dev) { + mutex_lock(&dev_pm_qos_sysfs_mtx); + + pm_qos_sysfs_remove_latency(dev); + mutex_lock(&dev_pm_qos_mtx); __dev_pm_qos_hide_latency_limit(dev); mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); @@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) } pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + mutex_lock(&dev_pm_qos_mtx); if (IS_ERR_OR_NULL(dev->power.qos)) @@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) if (ret < 0) { __dev_pm_qos_remove_request(req); kfree(req); + mutex_unlock(&dev_pm_qos_mtx); goto out; } - dev->power.qos->flags_req = req; + + mutex_unlock(&dev_pm_qos_mtx); + ret = pm_qos_sysfs_add_flags(dev); if (ret) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); out: - mutex_unlock(&dev_pm_qos_mtx); + mutex_unlock(&dev_pm_qos_sysfs_mtx); pm_runtime_put(dev); return ret; } @@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); static void __dev_pm_qos_hide_flags(struct device *dev) { - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { - pm_qos_sysfs_remove_flags(dev); + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); - } } /** @@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev) void dev_pm_qos_hide_flags(struct device *dev) { pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + + pm_qos_sysfs_remove_flags(dev); + mutex_lock(&dev_pm_qos_mtx); __dev_pm_qos_hide_flags(dev); mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); pm_runtime_put(dev); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e6732cf7c06e..79f4fca9877a 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, base = 0; if (max < rbnode->base_reg + rbnode->blklen) - end = rbnode->base_reg + rbnode->blklen - max; + end = max - rbnode->base_reg + 1; else end = rbnode->blklen; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 3d2367501fd0..58cfb3232428 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -710,12 +710,12 @@ skip_format_initialization: } } + regmap_debugfs_init(map, config->name); + ret = regcache_init(map, config); if (ret != 0) goto err_range; - regmap_debugfs_init(map, config->name); - /* Add a devres resource for dev_get_regmap() */ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); if (!m) { @@ -1036,6 +1036,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, kfree(async->work_buf); kfree(async); } + + return ret; } trace_regmap_hw_write_start(map->dev, reg, diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 25ef5c014fca..92b6d7c51e39 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -51,8 +51,9 @@ new_skb(ulong len) { struct sk_buff *skb; - skb = alloc_skb(len, GFP_ATOMIC); + skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC); if (skb) { + skb_reserve(skb, MAX_HEADER); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index fe5f6403417f..dfe758382eaf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -922,6 +922,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, lo->lo_flags |= LO_FLAGS_PARTSCAN; if (lo->lo_flags & LO_FLAGS_PARTSCAN) ioctl_by_bdev(bdev, BLKRRPART, 0); + + /* Grab the block_device to prevent its destruction after we + * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). + */ + bdgrab(bdev); return 0; out_clr: @@ -1031,8 +1036,10 @@ static int loop_clr_fd(struct loop_device *lo) memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); - if (bdev) + if (bdev) { + bdput(bdev); invalidate_bdev(bdev); + } set_capacity(lo->lo_disk, 0); loop_sysfs_exit(lo); if (bdev) { @@ -1044,29 +1051,12 @@ static int loop_clr_fd(struct loop_device *lo) lo->lo_state = Lo_unbound; /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); + if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) + ioctl_by_bdev(bdev, BLKRRPART, 0); lo->lo_flags = 0; if (!part_shift) lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; mutex_unlock(&lo->lo_ctl_mutex); - - /* - * Remove all partitions, since BLKRRPART won't remove user - * added partitions when max_part=0 - */ - if (bdev) { - struct disk_part_iter piter; - struct hd_struct *part; - - mutex_lock_nested(&bdev->bd_mutex, 1); - invalidate_partition(bdev->bd_disk, 0); - disk_part_iter_init(&piter, bdev->bd_disk, - DISK_PITER_INCL_EMPTY); - while ((part = disk_part_iter_next(&piter))) - delete_partition(bdev->bd_disk, part->partno); - disk_part_iter_exit(&piter); - mutex_unlock(&bdev->bd_mutex); - } - /* * Need not hold lo_ctl_mutex to fput backing file. * Calling fput holding lo_ctl_mutex triggers a circular diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 92250af84e7d..32c678028e53 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -81,12 +81,17 @@ /* Device instance number, incremented each time a device is probed. */ static int instance; +struct list_head online_list; +struct list_head removing_list; +spinlock_t dev_lock; + /* * Global variable used to hold the major block device number * allocated in mtip_init(). */ static int mtip_major; static struct dentry *dfs_parent; +static struct dentry *dfs_device_status; static u32 cpu_use[NR_CPUS]; @@ -243,40 +248,31 @@ static inline void release_slot(struct mtip_port *port, int tag) /* * Reset the HBA (without sleeping) * - * Just like hba_reset, except does not call sleep, so can be - * run from interrupt/tasklet context. - * * @dd Pointer to the driver data structure. * * return value * 0 The reset was successful. * -1 The HBA Reset bit did not clear. */ -static int hba_reset_nosleep(struct driver_data *dd) +static int mtip_hba_reset(struct driver_data *dd) { unsigned long timeout; - /* Chip quirk: quiesce any chip function */ - mdelay(10); - /* Set the reset bit */ writel(HOST_RESET, dd->mmio + HOST_CTL); /* Flush */ readl(dd->mmio + HOST_CTL); - /* - * Wait 10ms then spin for up to 1 second - * waiting for reset acknowledgement - */ - timeout = jiffies + msecs_to_jiffies(1000); - mdelay(10); - while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) - && time_before(jiffies, timeout)) - mdelay(1); + /* Spin for up to 2 seconds, waiting for reset acknowledgement */ + timeout = jiffies + msecs_to_jiffies(2000); + do { + mdelay(10); + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) + return -1; - if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) - return -1; + } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) + && time_before(jiffies, timeout)); if (readl(dd->mmio + HOST_CTL) & HOST_RESET) return -1; @@ -481,7 +477,7 @@ static void mtip_restart_port(struct mtip_port *port) dev_warn(&port->dd->pdev->dev, "PxCMD.CR not clear, escalating reset\n"); - if (hba_reset_nosleep(port->dd)) + if (mtip_hba_reset(port->dd)) dev_err(&port->dd->pdev->dev, "HBA reset escalation failed.\n"); @@ -527,6 +523,26 @@ static void mtip_restart_port(struct mtip_port *port) } +static int mtip_device_reset(struct driver_data *dd) +{ + int rv = 0; + + if (mtip_check_surprise_removal(dd->pdev)) + return 0; + + if (mtip_hba_reset(dd) < 0) + rv = -EFAULT; + + mdelay(1); + mtip_init_port(dd->port); + mtip_start_port(dd->port); + + /* Enable interrupts on the HBA. */ + writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, + dd->mmio + HOST_CTL); + return rv; +} + /* * Helper function for tag logging */ @@ -632,7 +648,7 @@ static void mtip_timeout_function(unsigned long int data) if (cmdto_cnt) { print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { - mtip_restart_port(port); + mtip_device_reset(port->dd); wake_up_interruptible(&port->svc_wait); } clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); @@ -1283,11 +1299,11 @@ static int mtip_exec_internal_command(struct mtip_port *port, int rv = 0, ready2go = 1; struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; unsigned long to; + struct driver_data *dd = port->dd; /* Make sure the buffer is 8 byte aligned. This is asic specific. */ if (buffer & 0x00000007) { - dev_err(&port->dd->pdev->dev, - "SG buffer is not 8 byte aligned\n"); + dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); return -EFAULT; } @@ -1300,23 +1316,21 @@ static int mtip_exec_internal_command(struct mtip_port *port, mdelay(100); } while (time_before(jiffies, to)); if (!ready2go) { - dev_warn(&port->dd->pdev->dev, + dev_warn(&dd->pdev->dev, "Internal cmd active. new cmd [%02X]\n", fis->command); return -EBUSY; } set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); port->ic_pause_timer = 0; - if (fis->command == ATA_CMD_SEC_ERASE_UNIT) - clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); - else if (fis->command == ATA_CMD_DOWNLOAD_MICRO) - clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); + clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); + clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); if (atomic == GFP_KERNEL) { if (fis->command != ATA_CMD_STANDBYNOW1) { /* wait for io to complete if non atomic */ if (mtip_quiesce_io(port, 5000) < 0) { - dev_warn(&port->dd->pdev->dev, + dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n"); release_slot(port, MTIP_TAG_INTERNAL); clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); @@ -1361,58 +1375,84 @@ static int mtip_exec_internal_command(struct mtip_port *port, /* Issue the command to the hardware */ mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); - /* Poll if atomic, wait_for_completion otherwise */ if (atomic == GFP_KERNEL) { /* Wait for the command to complete or timeout. */ - if (wait_for_completion_timeout( + if (wait_for_completion_interruptible_timeout( &wait, - msecs_to_jiffies(timeout)) == 0) { - dev_err(&port->dd->pdev->dev, - "Internal command did not complete [%d] " - "within timeout of %lu ms\n", - atomic, timeout); - if (mtip_check_surprise_removal(port->dd->pdev) || + msecs_to_jiffies(timeout)) <= 0) { + if (rv == -ERESTARTSYS) { /* interrupted */ + dev_err(&dd->pdev->dev, + "Internal command [%02X] was interrupted after %lu ms\n", + fis->command, timeout); + rv = -EINTR; + goto exec_ic_exit; + } else if (rv == 0) /* timeout */ + dev_err(&dd->pdev->dev, + "Internal command did not complete [%02X] within timeout of %lu ms\n", + fis->command, timeout); + else + dev_err(&dd->pdev->dev, + "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", + fis->command, rv, timeout); + + if (mtip_check_surprise_removal(dd->pdev) || test_bit(MTIP_DDF_REMOVE_PENDING_BIT, - &port->dd->dd_flag)) { + &dd->dd_flag)) { + dev_err(&dd->pdev->dev, + "Internal command [%02X] wait returned due to SR\n", + fis->command); rv = -ENXIO; goto exec_ic_exit; } + mtip_device_reset(dd); /* recover from timeout issue */ rv = -EAGAIN; + goto exec_ic_exit; } } else { + u32 hba_stat, port_stat; + /* Spin for <timeout> checking if command still outstanding */ timeout = jiffies + msecs_to_jiffies(timeout); while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL)) && time_before(jiffies, timeout)) { - if (mtip_check_surprise_removal(port->dd->pdev)) { + if (mtip_check_surprise_removal(dd->pdev)) { rv = -ENXIO; goto exec_ic_exit; } if ((fis->command != ATA_CMD_STANDBYNOW1) && test_bit(MTIP_DDF_REMOVE_PENDING_BIT, - &port->dd->dd_flag)) { + &dd->dd_flag)) { rv = -ENXIO; goto exec_ic_exit; } - if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) { - atomic_inc(&int_cmd->active); /* error */ - break; + port_stat = readl(port->mmio + PORT_IRQ_STAT); + if (!port_stat) + continue; + + if (port_stat & PORT_IRQ_ERR) { + dev_err(&dd->pdev->dev, + "Internal command [%02X] failed\n", + fis->command); + mtip_device_reset(dd); + rv = -EIO; + goto exec_ic_exit; + } else { + writel(port_stat, port->mmio + PORT_IRQ_STAT); + hba_stat = readl(dd->mmio + HOST_IRQ_STAT); + if (hba_stat) + writel(hba_stat, + dd->mmio + HOST_IRQ_STAT); } + break; } } - if (atomic_read(&int_cmd->active) > 1) { - dev_err(&port->dd->pdev->dev, - "Internal command [%02X] failed\n", fis->command); - rv = -EIO; - } if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL)) { rv = -ENXIO; - if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, - &port->dd->dd_flag)) { - mtip_restart_port(port); + if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { + mtip_device_reset(dd); rv = -EAGAIN; } } @@ -1724,7 +1764,8 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, * -EINVAL Invalid parameters passed in, trim not supported * -EIO Error submitting trim request to hw */ -static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len) +static int mtip_send_trim(struct driver_data *dd, unsigned int lba, + unsigned int len) { int i, rv = 0; u64 tlba, tlen, sect_left; @@ -1811,45 +1852,6 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) } /* - * Reset the HBA. - * - * Resets the HBA by setting the HBA Reset bit in the Global - * HBA Control register. After setting the HBA Reset bit the - * function waits for 1 second before reading the HBA Reset - * bit to make sure it has cleared. If HBA Reset is not clear - * an error is returned. Cannot be used in non-blockable - * context. - * - * @dd Pointer to the driver data structure. - * - * return value - * 0 The reset was successful. - * -1 The HBA Reset bit did not clear. - */ -static int mtip_hba_reset(struct driver_data *dd) -{ - mtip_deinit_port(dd->port); - - /* Set the reset bit */ - writel(HOST_RESET, dd->mmio + HOST_CTL); - - /* Flush */ - readl(dd->mmio + HOST_CTL); - - /* Wait for reset to clear */ - ssleep(1); - - /* Check the bit has cleared */ - if (readl(dd->mmio + HOST_CTL) & HOST_RESET) { - dev_err(&dd->pdev->dev, - "Reset bit did not clear.\n"); - return -1; - } - - return 0; -} - -/* * Display the identify command data. * * @port Pointer to the port data structure. @@ -2710,6 +2712,100 @@ static ssize_t mtip_hw_show_status(struct device *dev, static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); +/* debugsfs entries */ + +static ssize_t show_device_status(struct device_driver *drv, char *buf) +{ + int size = 0; + struct driver_data *dd, *tmp; + unsigned long flags; + char id_buf[42]; + u16 status = 0; + + spin_lock_irqsave(&dev_lock, flags); + size += sprintf(&buf[size], "Devices Present:\n"); + list_for_each_entry_safe(dd, tmp, &online_list, online_list) { + if (dd->pdev) { + if (dd->port && + dd->port->identify && + dd->port->identify_valid) { + strlcpy(id_buf, + (char *) (dd->port->identify + 10), 21); + status = *(dd->port->identify + 141); + } else { + memset(id_buf, 0, 42); + status = 0; + } + + if (dd->port && + test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { + size += sprintf(&buf[size], + " device %s %s (ftl rebuild %d %%)\n", + dev_name(&dd->pdev->dev), + id_buf, + status); + } else { + size += sprintf(&buf[size], + " device %s %s\n", + dev_name(&dd->pdev->dev), + id_buf); + } + } + } + + size += sprintf(&buf[size], "Devices Being Removed:\n"); + list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { + if (dd->pdev) { + if (dd->port && + dd->port->identify && + dd->port->identify_valid) { + strlcpy(id_buf, + (char *) (dd->port->identify+10), 21); + status = *(dd->port->identify + 141); + } else { + memset(id_buf, 0, 42); + status = 0; + } + + if (dd->port && + test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { + size += sprintf(&buf[size], + " device %s %s (ftl rebuild %d %%)\n", + dev_name(&dd->pdev->dev), + id_buf, + status); + } else { + size += sprintf(&buf[size], + " device %s %s\n", + dev_name(&dd->pdev->dev), + id_buf); + } + } + } + spin_unlock_irqrestore(&dev_lock, flags); + + return size; +} + +static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, + size_t len, loff_t *offset) +{ + int size = *offset; + char buf[MTIP_DFS_MAX_BUF_SIZE]; + + if (!len || *offset) + return 0; + + size += show_device_status(NULL, buf); + + *offset = size <= len ? size : len; + size = copy_to_user(ubuf, buf, *offset); + if (size) + return -EFAULT; + + return *offset; +} + static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, size_t len, loff_t *offset) { @@ -2804,6 +2900,13 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, return *offset; } +static const struct file_operations mtip_device_status_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mtip_hw_read_device_status, + .llseek = no_llseek, +}; + static const struct file_operations mtip_regs_fops = { .owner = THIS_MODULE, .open = simple_open, @@ -4161,6 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev, const struct cpumask *node_mask; int cpu, i = 0, j = 0; int my_node = NUMA_NO_NODE; + unsigned long flags; /* Allocate memory for this devices private data. */ my_node = pcibus_to_node(pdev->bus); @@ -4218,6 +4322,9 @@ static int mtip_pci_probe(struct pci_dev *pdev, dd->pdev = pdev; dd->numa_node = my_node; + INIT_LIST_HEAD(&dd->online_list); + INIT_LIST_HEAD(&dd->remove_list); + memset(dd->workq_name, 0, 32); snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); @@ -4305,6 +4412,14 @@ static int mtip_pci_probe(struct pci_dev *pdev, instance++; if (rv != MTIP_FTL_REBUILD_MAGIC) set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); + else + rv = 0; /* device in rebuild state, return 0 from probe */ + + /* Add to online list even if in ftl rebuild */ + spin_lock_irqsave(&dev_lock, flags); + list_add(&dd->online_list, &online_list); + spin_unlock_irqrestore(&dev_lock, flags); + goto done; block_initialize_err: @@ -4338,9 +4453,15 @@ static void mtip_pci_remove(struct pci_dev *pdev) { struct driver_data *dd = pci_get_drvdata(pdev); int counter = 0; + unsigned long flags; set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); + spin_lock_irqsave(&dev_lock, flags); + list_del_init(&dd->online_list); + list_add(&dd->remove_list, &removing_list); + spin_unlock_irqrestore(&dev_lock, flags); + if (mtip_check_surprise_removal(pdev)) { while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { counter++; @@ -4366,6 +4487,10 @@ static void mtip_pci_remove(struct pci_dev *pdev) pci_disable_msi(pdev); + spin_lock_irqsave(&dev_lock, flags); + list_del_init(&dd->remove_list); + spin_unlock_irqrestore(&dev_lock, flags); + kfree(dd); pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); } @@ -4513,6 +4638,11 @@ static int __init mtip_init(void) pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); + spin_lock_init(&dev_lock); + + INIT_LIST_HEAD(&online_list); + INIT_LIST_HEAD(&removing_list); + /* Allocate a major block device number to use with this driver. */ error = register_blkdev(0, MTIP_DRV_NAME); if (error <= 0) { @@ -4522,11 +4652,18 @@ static int __init mtip_init(void) } mtip_major = error; - if (!dfs_parent) { - dfs_parent = debugfs_create_dir("rssd", NULL); - if (IS_ERR_OR_NULL(dfs_parent)) { - pr_warn("Error creating debugfs parent\n"); - dfs_parent = NULL; + dfs_parent = debugfs_create_dir("rssd", NULL); + if (IS_ERR_OR_NULL(dfs_parent)) { + pr_warn("Error creating debugfs parent\n"); + dfs_parent = NULL; + } + if (dfs_parent) { + dfs_device_status = debugfs_create_file("device_status", + S_IRUGO, dfs_parent, NULL, + &mtip_device_status_fops); + if (IS_ERR_OR_NULL(dfs_device_status)) { + pr_err("Error creating device_status node\n"); + dfs_device_status = NULL; } } diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 3bffff5f670c..8e8334c9dd0f 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -129,9 +129,9 @@ enum { MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ - MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \ - (1 << MTIP_PF_EH_ACTIVE_BIT) | \ - (1 << MTIP_PF_SE_ACTIVE_BIT) | \ + MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | + (1 << MTIP_PF_EH_ACTIVE_BIT) | + (1 << MTIP_PF_SE_ACTIVE_BIT) | (1 << MTIP_PF_DM_ACTIVE_BIT)), MTIP_PF_SVC_THD_ACTIVE_BIT = 4, @@ -144,9 +144,9 @@ enum { MTIP_DDF_REMOVE_PENDING_BIT = 1, MTIP_DDF_OVER_TEMP_BIT = 2, MTIP_DDF_WRITE_PROTECT_BIT = 3, - MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ - (1 << MTIP_DDF_SEC_LOCK_BIT) | \ - (1 << MTIP_DDF_OVER_TEMP_BIT) | \ + MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | + (1 << MTIP_DDF_SEC_LOCK_BIT) | + (1 << MTIP_DDF_OVER_TEMP_BIT) | (1 << MTIP_DDF_WRITE_PROTECT_BIT)), MTIP_DDF_CLEANUP_BIT = 5, @@ -180,7 +180,7 @@ struct mtip_work { #define MTIP_TRIM_TIMEOUT_MS 240000 #define MTIP_MAX_TRIM_ENTRIES 8 -#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 +#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 struct mtip_trim_entry { u32 lba; /* starting lba of region */ @@ -501,6 +501,10 @@ struct driver_data { atomic_t irq_workers_active; int isr_binding; + + struct list_head online_list; /* linkage for online list */ + + struct list_head remove_list; /* linkage for removing list */ }; #endif diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f556f8a8b3f9..b7b7a88d9f68 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1742,9 +1742,10 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request) struct rbd_device *rbd_dev = img_request->rbd_dev; struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; + struct rbd_obj_request *next_obj_request; dout("%s: img %p\n", __func__, img_request); - for_each_obj_request(img_request, obj_request) { + for_each_obj_request_safe(img_request, obj_request, next_obj_request) { int ret; obj_request->callback = rbd_img_obj_callback; diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index e3f9a99b8522..d784650d14f0 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) struct hpet_dev *devp; unsigned long addr; - if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) - return -EINVAL; - devp = file->private_data; addr = devp->hd_hpets->hp_hpet_phys; if (addr & (PAGE_SIZE - 1)) return -ENOSYS; - vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot)) { - printk(KERN_ERR "%s: io_remap_pfn_range failed\n", - __func__); - return -EAGAIN; - } - - return 0; + return vm_iomap_memory(vma, addr, PAGE_SIZE); #else return -ENOSYS; #endif diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 69ae5972713c..a0f7724852eb 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng) } EXPORT_SYMBOL_GPL(hwrng_unregister); +static void __exit hwrng_exit(void) +{ + mutex_lock(&rng_mutex); + BUG_ON(current_rng); + kfree(rng_buffer); + mutex_unlock(&rng_mutex); +} + +module_exit(hwrng_exit); MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index e905d5f53051..ce5f3fc25d6d 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -149,7 +149,8 @@ struct ports_device { spinlock_t ports_lock; /* To protect the vq operations for the control channel */ - spinlock_t cvq_lock; + spinlock_t c_ivq_lock; + spinlock_t c_ovq_lock; /* The current config space is stored here */ struct virtio_console_config config; @@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, vq = portdev->c_ovq; sg_init_one(sg, &cpkt, sizeof(cpkt)); + + spin_lock(&portdev->c_ovq_lock); if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { virtqueue_kick(vq); while (!virtqueue_get_buf(vq, &len)) cpu_relax(); } + spin_unlock(&portdev->c_ovq_lock); return 0; } @@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id) * rproc_serial does not want the console port, only * the generic port implementation. */ - port->host_connected = port->guest_connected = true; + port->host_connected = true; else if (!use_multiport(port->portdev)) { /* * If we're not using multiport support, @@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work) portdev = container_of(work, struct ports_device, control_work); vq = portdev->c_ivq; - spin_lock(&portdev->cvq_lock); + spin_lock(&portdev->c_ivq_lock); while ((buf = virtqueue_get_buf(vq, &len))) { - spin_unlock(&portdev->cvq_lock); + spin_unlock(&portdev->c_ivq_lock); buf->len = len; buf->offset = 0; handle_control_message(portdev, buf); - spin_lock(&portdev->cvq_lock); + spin_lock(&portdev->c_ivq_lock); if (add_inbuf(portdev->c_ivq, buf) < 0) { dev_warn(&portdev->vdev->dev, "Error adding buffer to queue\n"); free_buf(buf, false); } } - spin_unlock(&portdev->cvq_lock); + spin_unlock(&portdev->c_ivq_lock); } static void out_intr(struct virtqueue *vq) @@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq) port->inbuf = get_inbuf(port); /* - * Don't queue up data when port is closed. This condition + * Normally the port should not accept data when the port is + * closed. For generic serial ports, the host won't (shouldn't) + * send data till the guest is connected. But this condition * can be reached when a console port is not yet connected (no - * tty is spawned) and the host sends out data to console - * ports. For generic serial ports, the host won't - * (shouldn't) send data till the guest is connected. + * tty is spawned) and the other side sends out data over the + * vring, or when a remote devices start sending data before + * the ports are opened. + * + * A generic serial port will discard data if not connected, + * while console ports and rproc-serial ports accepts data at + * any time. rproc-serial is initiated with guest_connected to + * false because port_fops_open expects this. Console ports are + * hooked up with an HVC console and is initialized with + * guest_connected to true. */ - if (!port->guest_connected) + + if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev)) discard_port_data(port); spin_unlock_irqrestore(&port->inbuf_lock, flags); @@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev) if (multiport) { unsigned int nr_added_bufs; - spin_lock_init(&portdev->cvq_lock); + spin_lock_init(&portdev->c_ivq_lock); + spin_lock_init(&portdev->c_ovq_lock); INIT_WORK(&portdev->control_work, &control_work_handler); - nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); + nr_added_bufs = fill_queue(portdev->c_ivq, + &portdev->c_ivq_lock); if (!nr_added_bufs) { dev_err(&vdev->dev, "Error allocating buffers for control queue\n"); @@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev) return ret; if (use_multiport(portdev)) - fill_queue(portdev->c_ivq, &portdev->cvq_lock); + fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); list_for_each_entry(port, &portdev->ports, list) { port->in_vq = portdev->in_vqs[port->id]; diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 1e2de7305362..f873dcefe0de 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -703,7 +703,7 @@ static void tegra20_pll_init(void) clks[pll_a_out0] = clk; /* PLLE */ - clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL, + clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base, 0, 100000000, &pll_e_params, 0, pll_e_freq_table, NULL); clk_register_clkdev(clk, "pll_e", NULL); diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 4e5b7fb8927c..37d23a0f8c56 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { static int cpu0_cpufreq_probe(struct platform_device *pdev) { - struct device_node *np; + struct device_node *np, *parent; int ret; - for_each_child_of_node(of_find_node_by_path("/cpus"), np) { + parent = of_find_node_by_path("/cpus"); + if (!parent) { + pr_err("failed to find OF /cpus\n"); + return -ENOENT; + } + + for_each_child_of_node(parent, np) { if (of_get_property(np, "operating-points", NULL)) break; } diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 46bde01eee62..cc4bd2f6838a 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -14,8 +14,8 @@ * published by the Free Software Foundation. */ -#ifndef _CPUFREQ_GOVERNER_H -#define _CPUFREQ_GOVERNER_H +#ifndef _CPUFREQ_GOVERNOR_H +#define _CPUFREQ_GOVERNOR_H #include <linux/cpufreq.h> #include <linux/kobject.h> @@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate); int cpufreq_governor_dbs(struct dbs_data *dbs_data, struct cpufreq_policy *policy, unsigned int event); -#endif /* _CPUFREQ_GOVERNER_H */ +#endif /* _CPUFREQ_GOVERNOR_H */ diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ad72922919ed..6133ef5cf671 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) sample_time = cpu->pstate_policy->sample_rate_ms; delay = msecs_to_jiffies(sample_time); - delay -= jiffies % delay; mod_timer_pinned(&cpu->timer, jiffies + delay); } diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 8bc5fef07e7a..22c9063e0120 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = { .shutdown = ux500_cryp_shutdown, .driver = { .owner = THIS_MODULE, - .name = "cryp1" + .name = "cryp1", .pm = &ux500_cryp_pm, } }; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 80b69971cf28..aeaea32bcfda 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -83,6 +83,7 @@ config INTEL_IOP_ADMA config DW_DMAC tristate "Synopsys DesignWare AHB DMA support" + depends on GENERIC_HARDIRQS select DMA_ENGINE default y if CPU_AT32AP7000 help diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6e13f262139a..88cfc61329d2 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -310,8 +310,6 @@ static void atc_complete_all(struct at_dma_chan *atchan) dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); - BUG_ON(atc_chan_is_enabled(atchan)); - /* * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. @@ -368,6 +366,9 @@ static void atc_advance_work(struct at_dma_chan *atchan) { dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); + if (atc_chan_is_enabled(atchan)) + return; + if (list_empty(&atchan->active_list) || list_is_singular(&atchan->active_list)) { atc_complete_all(atchan); @@ -1078,9 +1079,7 @@ static void atc_issue_pending(struct dma_chan *chan) return; spin_lock_irqsave(&atchan->lock, flags); - if (!atc_chan_is_enabled(atchan)) { - atc_advance_work(atchan); - } + atc_advance_work(atchan); spin_unlock_irqrestore(&atchan->lock, flags); } diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index c4b4fd2acc42..08b43bf37158 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan) spin_lock_irqsave(&c->vc.lock, flags); if (vchan_issue_pending(&c->vc) && !c->desc) { - struct omap_dmadev *d = to_omap_dma_dev(chan->device); - spin_lock(&d->lock); - if (list_empty(&c->node)) - list_add_tail(&c->node, &d->pending); - spin_unlock(&d->lock); - tasklet_schedule(&d->task); + /* + * c->cyclic is used only by audio and in this case the DMA need + * to be started without delay. + */ + if (!c->cyclic) { + struct omap_dmadev *d = to_omap_dma_dev(chan->device); + spin_lock(&d->lock); + if (list_empty(&c->node)) + list_add_tail(&c->node, &d->pending); + spin_unlock(&d->lock); + tasklet_schedule(&d->task); + } else { + omap_dma_start_desc(c); + } } spin_unlock_irqrestore(&c->vc.lock, flags); } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 718153122759..5dbc5946c4c3 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) { struct dma_pl330_platdata *pdat; struct dma_pl330_dmac *pdmac; - struct dma_pl330_chan *pch; + struct dma_pl330_chan *pch, *_p; struct pl330_info *pi; struct dma_device *pd; struct resource *res; @@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ret = dma_async_device_register(pd); if (ret) { dev_err(&adev->dev, "unable to register DMAC\n"); - goto probe_err2; + goto probe_err3; + } + + if (adev->dev.of_node) { + ret = of_dma_controller_register(adev->dev.of_node, + of_dma_pl330_xlate, pdmac); + if (ret) { + dev_err(&adev->dev, + "unable to register DMA to the generic DT DMA helpers\n"); + } } dev_info(&adev->dev, @@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, pi->pcfg.num_peri, pi->pcfg.num_events); - ret = of_dma_controller_register(adev->dev.of_node, - of_dma_pl330_xlate, pdmac); - if (ret) { - dev_err(&adev->dev, - "unable to register DMA to the generic DT DMA helpers\n"); - goto probe_err2; - } - return 0; +probe_err3: + amba_set_drvdata(adev, NULL); + /* Idle the DMAC */ + list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, + chan.device_node) { + + /* Remove the channel */ + list_del(&pch->chan.device_node); + + /* Flush the channel */ + pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); + pl330_free_chan_resources(&pch->chan); + } probe_err2: pl330_del(pi); probe_err1: @@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev) if (!pdmac) return 0; - of_dma_controller_free(adev->dev.of_node); + if (adev->dev.of_node) + of_dma_controller_free(adev->dev.of_node); + dma_async_device_unregister(&pdmac->ddma); amba_set_drvdata(adev, NULL); /* Idle the DMAC */ diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c index cdae207028a7..6c3fca97d346 100644 --- a/drivers/eisa/pci_eisa.c +++ b/drivers/eisa/pci_eisa.c @@ -19,10 +19,10 @@ /* There is only *one* pci_eisa device per machine, right ? */ static struct eisa_root_device pci_eisa_root; -static int __init pci_eisa_init(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __init pci_eisa_init(struct pci_dev *pdev) { - int rc; + int rc, i; + struct resource *res, *bus_res = NULL; if ((rc = pci_enable_device (pdev))) { printk (KERN_ERR "pci_eisa : Could not enable device %s\n", @@ -30,9 +30,30 @@ static int __init pci_eisa_init(struct pci_dev *pdev, return rc; } + /* + * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI + * device, so the resources available on EISA are the same as those + * available on the 82375 bus. This works the same as a PCI-PCI + * bridge in subtractive-decode mode (see pci_read_bridge_bases()). + * We assume other PCI-EISA bridges are similar. + * + * eisa_root_register() can only deal with a single io port resource, + * so we use the first valid io port resource. + */ + pci_bus_for_each_resource(pdev->bus, res, i) + if (res && (res->flags & IORESOURCE_IO)) { + bus_res = res; + break; + } + + if (!bus_res) { + dev_err(&pdev->dev, "No resources available\n"); + return -1; + } + pci_eisa_root.dev = &pdev->dev; - pci_eisa_root.res = pdev->bus->resource[0]; - pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start; + pci_eisa_root.res = bus_res; + pci_eisa_root.bus_base_addr = bus_res->start; pci_eisa_root.slots = EISA_MAX_SLOTS; pci_eisa_root.dma_mask = pdev->dma_mask; dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); @@ -45,22 +66,26 @@ static int __init pci_eisa_init(struct pci_dev *pdev, return 0; } -static struct pci_device_id pci_eisa_pci_tbl[] = { - { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, - { 0, } -}; +/* + * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init(). + * Otherwise pnp resource will get enabled early and could prevent eisa + * to be initialized. + * Also need to make sure pci_eisa_init_early() is called after + * x86/pci_subsys_init(). + * So need to use subsys_initcall_sync with it. + */ +static int __init pci_eisa_init_early(void) +{ + struct pci_dev *dev = NULL; + int ret; -static struct pci_driver __refdata pci_eisa_driver = { - .name = "pci_eisa", - .id_table = pci_eisa_pci_tbl, - .probe = pci_eisa_init, -}; + for_each_pci_dev(dev) + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) { + ret = pci_eisa_init(dev); + if (ret) + return ret; + } -static int __init pci_eisa_init_module (void) -{ - return pci_register_driver (&pci_eisa_driver); + return 0; } - -device_initcall(pci_eisa_init_module); -MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl); +subsys_initcall_sync(pci_eisa_init_early); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 42c759a4d047..3e532002e4d1 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -39,6 +39,7 @@ config FIRMWARE_MEMMAP config EFI_VARS tristate "EFI Variable Support via sysfs" depends on EFI + select UCS2_STRING default n help If you say Y here, you are able to get EFI (Extensible Firmware diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 7acafb80fd4c..f4baa11d3644 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -80,6 +80,7 @@ #include <linux/slab.h> #include <linux/pstore.h> #include <linux/ctype.h> +#include <linux/ucs2_string.h> #include <linux/fs.h> #include <linux/ramfs.h> @@ -172,51 +173,6 @@ static void efivar_update_sysfs_entries(struct work_struct *); static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries); static bool efivar_wq_enabled = true; -/* Return the number of unicode characters in data */ -static unsigned long -utf16_strnlen(efi_char16_t *s, size_t maxlength) -{ - unsigned long length = 0; - - while (*s++ != 0 && length < maxlength) - length++; - return length; -} - -static inline unsigned long -utf16_strlen(efi_char16_t *s) -{ - return utf16_strnlen(s, ~0UL); -} - -/* - * Return the number of bytes is the length of this string - * Note: this is NOT the same as the number of unicode characters - */ -static inline unsigned long -utf16_strsize(efi_char16_t *data, unsigned long maxlength) -{ - return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); -} - -static inline int -utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len) -{ - while (1) { - if (len == 0) - return 0; - if (*a < *b) - return -1; - if (*a > *b) - return 1; - if (*a == 0) /* implies *b == 0 */ - return 0; - a++; - b++; - len--; - } -} - static bool validate_device_path(struct efi_variable *var, int match, u8 *buffer, unsigned long len) @@ -268,7 +224,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, u16 filepathlength; int i, desclength = 0, namelen; - namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName)); + namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); /* Either "Boot" or "Driver" followed by four digits of hex */ for (i = match; i < match+4; i++) { @@ -291,7 +247,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, * There's no stored length for the description, so it has to be * found by hand */ - desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; + desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; /* Each boot entry must have a descriptor */ if (!desclength) @@ -436,24 +392,12 @@ static efi_status_t check_var_size_locked(struct efivars *efivars, u32 attributes, unsigned long size) { - u64 storage_size, remaining_size, max_size; - efi_status_t status; const struct efivar_operations *fops = efivars->ops; - if (!efivars->ops->query_variable_info) + if (!efivars->ops->query_variable_store) return EFI_UNSUPPORTED; - status = fops->query_variable_info(attributes, &storage_size, - &remaining_size, &max_size); - - if (status != EFI_SUCCESS) - return status; - - if (!storage_size || size > remaining_size || size > max_size || - (remaining_size - size) < (storage_size / 2)) - return EFI_OUT_OF_RESOURCES; - - return status; + return fops->query_variable_store(attributes, size); } @@ -593,7 +537,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) spin_lock_irq(&efivars->lock); status = check_var_size_locked(efivars, new_var->Attributes, - new_var->DataSize + utf16_strsize(new_var->VariableName, 1024)); + new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024)); if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) status = efivars->ops->set_variable(new_var->VariableName, @@ -771,7 +715,7 @@ static ssize_t efivarfs_file_write(struct file *file, * QueryVariableInfo() isn't supported by the firmware. */ - varsize = datasize + utf16_strsize(var->var.VariableName, 1024); + varsize = datasize + ucs2_strsize(var->var.VariableName, 1024); status = check_var_size(efivars, attributes, varsize); if (status != EFI_SUCCESS) { @@ -1223,7 +1167,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) inode = NULL; - len = utf16_strlen(entry->var.VariableName); + len = ucs2_strlen(entry->var.VariableName); /* name, plus '-', plus GUID, plus NUL*/ name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC); @@ -1481,8 +1425,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, if (efi_guidcmp(entry->var.VendorGuid, vendor)) continue; - if (utf16_strncmp(entry->var.VariableName, efi_name, - utf16_strlen(efi_name))) { + if (ucs2_strncmp(entry->var.VariableName, efi_name, + ucs2_strlen(efi_name))) { /* * Check if an old format, * which doesn't support holding @@ -1494,8 +1438,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, for (i = 0; i < DUMP_NAME_LEN; i++) efi_name_old[i] = name_old[i]; - if (utf16_strncmp(entry->var.VariableName, efi_name_old, - utf16_strlen(efi_name_old))) + if (ucs2_strncmp(entry->var.VariableName, efi_name_old, + ucs2_strlen(efi_name_old))) continue; } @@ -1573,8 +1517,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, * Does this variable already exist? */ list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { - strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); - strsize2 = utf16_strsize(new_var->VariableName, 1024); + strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024); + strsize2 = ucs2_strsize(new_var->VariableName, 1024); if (strsize1 == strsize2 && !memcmp(&(search_efivar->var.VariableName), new_var->VariableName, strsize1) && @@ -1590,7 +1534,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, } status = check_var_size_locked(efivars, new_var->Attributes, - new_var->DataSize + utf16_strsize(new_var->VariableName, 1024)); + new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024)); if (status && status != EFI_UNSUPPORTED) { spin_unlock_irq(&efivars->lock); @@ -1614,7 +1558,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, /* Create the entry in sysfs. Locking is not required here */ status = efivar_create_sysfs_entry(efivars, - utf16_strsize(new_var->VariableName, + ucs2_strsize(new_var->VariableName, 1024), new_var->VariableName, &new_var->VendorGuid); @@ -1644,8 +1588,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, * Does this variable already exist? */ list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { - strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); - strsize2 = utf16_strsize(del_var->VariableName, 1024); + strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024); + strsize2 = ucs2_strsize(del_var->VariableName, 1024); if (strsize1 == strsize2 && !memcmp(&(search_efivar->var.VariableName), del_var->VariableName, strsize1) && @@ -1684,16 +1628,17 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, return count; } -static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor) +static bool variable_is_present(struct efivars *efivars, + efi_char16_t *variable_name, + efi_guid_t *vendor) { struct efivar_entry *entry, *n; - struct efivars *efivars = &__efivars; unsigned long strsize1, strsize2; bool found = false; - strsize1 = utf16_strsize(variable_name, 1024); + strsize1 = ucs2_strsize(variable_name, 1024); list_for_each_entry_safe(entry, n, &efivars->list, list) { - strsize2 = utf16_strsize(entry->var.VariableName, 1024); + strsize2 = ucs2_strsize(entry->var.VariableName, 1024); if (strsize1 == strsize2 && !memcmp(variable_name, &(entry->var.VariableName), strsize2) && @@ -1759,8 +1704,8 @@ static void efivar_update_sysfs_entries(struct work_struct *work) if (status != EFI_SUCCESS) { break; } else { - if (!variable_is_present(variable_name, - &vendor)) { + if (!variable_is_present(efivars, + variable_name, &vendor)) { found = true; break; } @@ -2064,7 +2009,8 @@ int register_efivars(struct efivars *efivars, * we'll ever see a different variable name, * and may end up looping here forever. */ - if (variable_is_present(variable_name, &vendor_guid)) { + if (variable_is_present(efivars, variable_name, + &vendor_guid)) { dup_variable_bug(variable_name, &vendor_guid, variable_name_size); status = EFI_NOT_FOUND; @@ -2131,7 +2077,7 @@ efivars_init(void) ops.get_variable = efi.get_variable; ops.set_variable = efi.set_variable; ops.get_next_variable = efi.get_next_variable; - ops.query_variable_info = efi.query_variable_info; + ops.query_variable_store = efi_query_variable_store; error = register_efivars(&__efivars, &ops, efi_kobj); if (error) diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index f9dbd503fc40..de3c317bd3e2 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c @@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr) * If it can't be trusted, assume that the pin can be used as a GPIO. */ if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) - return 1; + return 0; return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV; } diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 24059462c87f..9391cf16e990 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, chip->gpio_chip.ngpio, irq_base, &pca953x_irq_simple_ops, - NULL); + chip); if (!chip->domain) return -ENODEV; diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 9cc108d2b770..8325f580c0f1 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -642,7 +642,12 @@ static struct platform_driver pxa_gpio_driver = { .of_match_table = of_match_ptr(pxa_gpio_dt_ids), }, }; -module_platform_driver(pxa_gpio_driver); + +static int __init pxa_gpio_init(void) +{ + return platform_driver_register(&pxa_gpio_driver); +} +postcore_initcall(pxa_gpio_init); #ifdef CONFIG_PM static int pxa_gpio_suspend(void) diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 770476a9da87..3ce5bc38ac31 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = { .xlate = irq_domain_xlate_twocell, }; -static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio) +static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio, + struct device_node *np) { - int base = stmpe_gpio->irq_base; + int base = 0; - stmpe_gpio->domain = irq_domain_add_simple(NULL, + if (!np) + base = stmpe_gpio->irq_base; + + stmpe_gpio->domain = irq_domain_add_simple(np, stmpe_gpio->chip.ngpio, base, &stmpe_gpio_irq_simple_ops, stmpe_gpio); if (!stmpe_gpio->domain) { @@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev) stmpe_gpio->chip = template_chip; stmpe_gpio->chip.ngpio = stmpe->num_gpios; stmpe_gpio->chip.dev = &pdev->dev; +#ifdef CONFIG_OF + stmpe_gpio->chip.of_node = np; +#endif stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; if (pdata) @@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev) goto out_free; if (irq >= 0) { - ret = stmpe_gpio_irq_init(stmpe_gpio); + ret = stmpe_gpio_irq_init(stmpe_gpio, np); if (ret) goto out_disable; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 792c3e3795ca..dd64a06dc5b4 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2326,7 +2326,6 @@ int drm_mode_addfb(struct drm_device *dev, fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); if (IS_ERR(fb)) { DRM_DEBUG_KMS("could not create framebuffer\n"); - drm_modeset_unlock_all(dev); return PTR_ERR(fb); } @@ -2506,7 +2505,6 @@ int drm_mode_addfb2(struct drm_device *dev, fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); if (IS_ERR(fb)) { DRM_DEBUG_KMS("could not create framebuffer\n"); - drm_modeset_unlock_all(dev); return PTR_ERR(fb); } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 59d6b9bf204b..892ff9f95975 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) if (!fb_helper->fb) return 0; - drm_modeset_lock_all(dev); + mutex_lock(&fb_helper->dev->mode_config.mutex); if (!drm_fb_helper_is_bound(fb_helper)) { fb_helper->delayed_hotplug = true; - drm_modeset_unlock_all(dev); + mutex_unlock(&fb_helper->dev->mode_config.mutex); return 0; } DRM_DEBUG_KMS("\n"); @@ -1558,9 +1558,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); + mutex_unlock(&fb_helper->dev->mode_config.mutex); + + drm_modeset_lock_all(dev); drm_setup_crtcs(fb_helper); drm_modeset_unlock_all(dev); - drm_fb_helper_set_par(fb_helper->fbdev); return 0; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 13fdcd10a605..429e07d0b0f1 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp) int retcode = 0; int need_setup = 0; struct address_space *old_mapping; + struct address_space *old_imapping; minor = idr_find(&drm_minors_idr, minor_id); if (!minor) @@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp) if (!dev->open_count++) need_setup = 1; mutex_lock(&dev->struct_mutex); + old_imapping = inode->i_mapping; old_mapping = dev->dev_mapping; if (old_mapping == NULL) dev->dev_mapping = &inode->i_data; @@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp) err_undo: mutex_lock(&dev->struct_mutex); - filp->f_mapping = old_mapping; - inode->i_mapping = old_mapping; + filp->f_mapping = old_imapping; + inode->i_mapping = old_imapping; iput(container_of(dev->dev_mapping, struct inode, i_data)); dev->dev_mapping = old_mapping; mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3b11ab0fbc96..9a48e1a2d417 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args) if (eb == NULL) { int size = args->buffer_count; int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; - BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); + BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); while (count > 2*size) count >>= 1; eb = kzalloc(count*sizeof(struct hlist_head) + diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 32a3693905ec..1ce45a0a2d3e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -45,6 +45,9 @@ struct intel_crt { struct intel_encoder base; + /* DPMS state is stored in the connector, which we need in the + * encoder's enable/disable callbacks */ + struct intel_connector *connector; bool force_hotplug_required; u32 adpa_reg; }; @@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, return true; } -static void intel_disable_crt(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_crt *crt = intel_encoder_to_crt(encoder); - u32 temp; - - temp = I915_READ(crt->adpa_reg); - temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; - temp &= ~ADPA_DAC_ENABLE; - I915_WRITE(crt->adpa_reg, temp); -} - -static void intel_enable_crt(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_crt *crt = intel_encoder_to_crt(encoder); - u32 temp; - - temp = I915_READ(crt->adpa_reg); - temp |= ADPA_DAC_ENABLE; - I915_WRITE(crt->adpa_reg, temp); -} - /* Note: The caller is required to filter out dpms modes not supported by the * platform. */ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) @@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) I915_WRITE(crt->adpa_reg, temp); } +static void intel_disable_crt(struct intel_encoder *encoder) +{ + intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void intel_enable_crt(struct intel_encoder *encoder) +{ + struct intel_crt *crt = intel_encoder_to_crt(encoder); + + intel_crt_set_dpms(encoder, crt->connector->base.dpms); +} + + static void intel_crt_dpms(struct drm_connector *connector, int mode) { struct drm_device *dev = connector->dev; @@ -746,6 +739,7 @@ void intel_crt_init(struct drm_device *dev) } connector = &intel_connector->base; + crt->connector = intel_connector; drm_connector_init(dev, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d7d4afe01341..8fc93f90a7cd 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2559,12 +2559,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dp_to_dev(intel_dp); i2c_del_adapter(&intel_dp->adapter); drm_encoder_cleanup(encoder); if (is_edp(intel_dp)) { cancel_delayed_work_sync(&intel_dp->panel_vdd_work); + mutex_lock(&dev->mode_config.mutex); ironlake_panel_vdd_off_sync(intel_dp); + mutex_unlock(&dev->mode_config.mutex); } kfree(intel_dig_port); } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fe22bb780e1d..78d8e919509f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, int i; unsigned char misc = 0; unsigned char ext_vga[6]; - unsigned char ext_vga_index24; - unsigned char dac_index90 = 0; u8 bppshift; static unsigned char dacvalue[] = { @@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, option2 = 0x0000b000; break; case G200_ER: - dac_index90 = 0; break; } @@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, WREG_DAC(i, dacvalue[i]); } - if (mdev->type == G200_ER) { - WREG_DAC(0x90, dac_index90); - } - + if (mdev->type == G200_ER) + WREG_DAC(0x90, 0); if (option) pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); @@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, if (mdev->type == G200_WB) ext_vga[1] |= 0x88; - ext_vga_index24 = 0x05; - /* Set pixel clocks */ misc = 0x2d; WREG8(MGA_MISC_OUT, misc); @@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, } if (mdev->type == G200_ER) - WREG_ECRT(24, ext_vga_index24); + WREG_ECRT(0x24, 0x5); if (mdev->type == G200_EV) { WREG_ECRT(6, 0); diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index e816f06637a7..0e2c1a4f1659 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -248,6 +248,22 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios) } } +static void +nouveau_bios_shadow_platform(struct nouveau_bios *bios) +{ + struct pci_dev *pdev = nv_device(bios)->pdev; + size_t size; + + void __iomem *rom = pci_platform_rom(pdev, &size); + if (rom && size) { + bios->data = kmalloc(size, GFP_KERNEL); + if (bios->data) { + memcpy_fromio(bios->data, rom, size); + bios->size = size; + } + } +} + static int nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) { @@ -288,6 +304,7 @@ nouveau_bios_shadow(struct nouveau_bios *bios) { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, + { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL }, {} }; struct methods *mthd, *best; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 3b6dc883e150..5eb3e0da7c6e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -391,7 +391,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_device *device = nv_device(drm->device); struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); - struct nouveau_abi16_chan *chan, *temp; + struct nouveau_abi16_chan *chan = NULL, *temp; struct nouveau_abi16_ntfy *ntfy; struct nouveau_object *object; struct nv_dma_class args = {}; @@ -404,10 +404,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) return nouveau_abi16_put(abi16, -EINVAL); - list_for_each_entry_safe(chan, temp, &abi16->channels, head) { - if (chan->chan->handle == (NVDRM_CHAN | info->channel)) + list_for_each_entry(temp, &abi16->channels, head) { + if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { + chan = temp; break; - chan = NULL; + } } if (!chan) @@ -459,17 +460,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) { struct drm_nouveau_gpuobj_free *fini = data; struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); - struct nouveau_abi16_chan *chan, *temp; + struct nouveau_abi16_chan *chan = NULL, *temp; struct nouveau_abi16_ntfy *ntfy; int ret; if (unlikely(!abi16)) return -ENOMEM; - list_for_each_entry_safe(chan, temp, &abi16->channels, head) { - if (chan->chan->handle == (NVDRM_CHAN | fini->channel)) + list_for_each_entry(temp, &abi16->channels, head) { + if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { + chan = temp; break; - chan = NULL; + } } if (!chan) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index d1099365bfc1..c95decf543e9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -72,11 +72,25 @@ module_param_named(modeset, nouveau_modeset, int, 0400); static struct drm_driver driver; static int +nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) +{ + struct nouveau_drm *drm = + container_of(event, struct nouveau_drm, vblank[head]); + drm_handle_vblank(drm->dev, head); + return NVKM_EVENT_KEEP; +} + +static int nouveau_drm_vblank_enable(struct drm_device *dev, int head) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_disp *pdisp = nouveau_disp(drm->device); - nouveau_event_get(pdisp->vblank, head, &drm->vblank); + + if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank))) + return -EIO; + WARN_ON_ONCE(drm->vblank[head].func); + drm->vblank[head].func = nouveau_drm_vblank_handler; + nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]); return 0; } @@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_disp *pdisp = nouveau_disp(drm->device); - nouveau_event_put(pdisp->vblank, head, &drm->vblank); -} - -static int -nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) -{ - struct nouveau_drm *drm = - container_of(event, struct nouveau_drm, vblank); - drm_handle_vblank(drm->dev, head); - return NVKM_EVENT_KEEP; + if (drm->vblank[head].func) + nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]); + else + WARN_ON_ONCE(1); + drm->vblank[head].func = NULL; } static u64 @@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) dev->dev_private = drm; drm->dev = dev; - drm->vblank.func = nouveau_drm_vblank_handler; INIT_LIST_HEAD(&drm->clients); spin_lock_init(&drm->tile.lock); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index b25df374c901..9c39bafbef2c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -113,7 +113,7 @@ struct nouveau_drm { struct nvbios vbios; struct nouveau_display *display; struct backlight_device *backlight; - struct nouveau_eventh vblank; + struct nouveau_eventh vblank[4]; /* power management */ struct nouveau_pm *pm; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 7f0e6c3f37d1..1ddc03e51bf4 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data) { struct nv50_display_flip *flip = data; if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == - flip->chan->data); + flip->chan->data) return true; usleep_range(1, 2); return false; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b8015913d382..fa3c56fba294 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -99,6 +99,29 @@ static bool radeon_read_bios(struct radeon_device *rdev) return true; } +static bool radeon_read_platform_bios(struct radeon_device *rdev) +{ + uint8_t __iomem *bios; + size_t size; + + rdev->bios = NULL; + + bios = pci_platform_rom(rdev->pdev, &size); + if (!bios) { + return false; + } + + if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + return false; + } + rdev->bios = kmemdup(bios, size, GFP_KERNEL); + if (rdev->bios == NULL) { + return false; + } + + return true; +} + #ifdef CONFIG_ACPI /* ATRM is used to get the BIOS on the discrete cards in * dual-gpu systems. @@ -620,6 +643,9 @@ bool radeon_get_bios(struct radeon_device *rdev) if (r == false) { r = radeon_read_disabled_bios(rdev); } + if (r == false) { + r = radeon_read_platform_bios(rdev); + } if (r == false || rdev->bios == NULL) { DRM_ERROR("Unable to locate a BIOS ROM\n"); rdev->bios = NULL; diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index fe5cdbcf2636..b44d548c56f8 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector) int ret; edid = (struct edid *)udl_get_edid(udl); + if (!edid) { + drm_mode_connector_update_edid_property(connector, NULL); + return 0; + } /* * We only read the main block, but if the monitor reports extension diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 512b01c04ea7..aa341d135867 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) }, { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, @@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev) hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST)) return true; break; + case USB_VENDOR_ID_ATMEL_V_USB: + /* Masterkit MA901 usb radio based on Atmel tiny85 chip and + * it has the same USB ID as many Atmel V-USB devices. This + * usb radio is handled by radio-ma901.c driver so we want + * ignore the hid. Check the name, bus, product and ignore + * if we have MA901 usb radio. + */ + if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB && + hdev->bus == BUS_USB && + strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) + return true; + break; } if (hdev->type == HID_TYPE_USBMOUSE && diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index c4388776f4e4..5309fd5eb0eb 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -158,6 +158,8 @@ #define USB_VENDOR_ID_ATMEL 0x03eb #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 +#define USB_VENDOR_ID_ATMEL_V_USB 0x16c0 +#define USB_DEVICE_ID_ATMEL_V_USB 0x05df #define USB_VENDOR_ID_AUREAL 0x0755 #define USB_DEVICE_ID_AUREAL_W01RN 0x2626 @@ -557,9 +559,6 @@ #define USB_VENDOR_ID_MADCATZ 0x0738 #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 -#define USB_VENDOR_ID_MASTERKIT 0x16c0 -#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df - #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index f7f113ba083e..a8ce44296cfd 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev, return 0; } +static void magicmouse_input_configured(struct hid_device *hdev, + struct hid_input *hi) + +{ + struct magicmouse_sc *msc = hid_get_drvdata(hdev); + + int ret = magicmouse_setup_input(msc->input, hdev); + if (ret) { + hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); + /* clean msc->input to notify probe() of the failure */ + msc->input = NULL; + } +} + + static int magicmouse_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev, goto err_free; } - /* We do this after hid-input is done parsing reports so that - * hid-input uses the most natural button and axis IDs. - */ - if (msc->input) { - ret = magicmouse_setup_input(msc->input, hdev); - if (ret) { - hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); - goto err_stop_hw; - } + if (!msc->input) { + hid_err(hdev, "magicmouse input not registered\n"); + ret = -ENOMEM; + goto err_stop_hw; } if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) @@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = { .remove = magicmouse_remove, .raw_event = magicmouse_raw_event, .input_mapping = magicmouse_input_mapping, + .input_configured = magicmouse_input_configured, }; module_hid_driver(magicmouse_driver); diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index db713c0dfba4..461a0d739d75 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock) ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "%s: can't power on device\n", __func__); + pm_runtime_put_noidle(dev); + module_put(dev->driver->owner); return ret; } diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0ceb6e1b0f65..e3085c487ace 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -182,7 +182,6 @@ static int dw_i2c_probe(struct platform_device *pdev) adap->algo = &i2c_dw_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; - ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev)); r = i2c_add_numbered_adapter(adap); if (r) { diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 5d6675013864..1a38dd7dfe4e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -465,6 +465,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { ICPU(0x3c, idle_cpu_hsw), ICPU(0x3f, idle_cpu_hsw), ICPU(0x45, idle_cpu_hsw), + ICPU(0x46, idle_cpu_hsw), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 08a6c6d39e56..911205d3d5a0 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -44,7 +44,7 @@ #include "qib.h" #include "qib_7220.h" -#define SD7220_FW_NAME "intel/sd7220.fw" +#define SD7220_FW_NAME "qlogic/sd7220.fw" MODULE_FIRMWARE(SD7220_FW_NAME); /* diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 1daa97913b7d..0bfd8cf25200 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x802: /* Intuos4 General Pen */ case 0x804: /* Intuos4 Marker Pen */ case 0x40802: /* Intuos4 Classic Pen */ - case 0x18803: /* DTH2242 Grip Pen */ + case 0x18802: /* DTH2242 Grip Pen */ case 0x022: wacom->tool[idx] = BTN_TOOL_PEN; break; @@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB = { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xBC = - { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047, + { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047, 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x26 = { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, @@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x44) }, { USB_DEVICE_WACOM(0x45) }, { USB_DEVICE_WACOM(0x59) }, - { USB_DEVICE_WACOM(0x5D) }, + { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0xB0) }, { USB_DEVICE_WACOM(0xB1) }, { USB_DEVICE_WACOM(0xB2) }, @@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, { USB_DEVICE_WACOM(0xF8) }, - { USB_DEVICE_WACOM(0xF6) }, + { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0xFA) }, { USB_DEVICE_LENOVO(0x6004) }, { } diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index a7f6b04eaa5e..1d84be17ff21 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -46,6 +46,7 @@ #include "amd_iommu_proto.h" #include "amd_iommu_types.h" #include "irq_remapping.h" +#include "pci.h" #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) @@ -263,12 +264,6 @@ static bool check_device(struct device *dev) return true; } -static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) -{ - pci_dev_put(*from); - *from = to; -} - static struct pci_bus *find_hosted_bus(struct pci_bus *bus) { while (!bus->self) { @@ -701,9 +696,6 @@ retry: static void iommu_poll_events(struct amd_iommu *iommu) { u32 head, tail; - unsigned long flags; - - spin_lock_irqsave(&iommu->lock, flags); head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); @@ -714,8 +706,6 @@ static void iommu_poll_events(struct amd_iommu *iommu) } writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); - - spin_unlock_irqrestore(&iommu->lock, flags); } static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) @@ -740,17 +730,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) static void iommu_poll_ppr_log(struct amd_iommu *iommu) { - unsigned long flags; u32 head, tail; if (iommu->ppr_log == NULL) return; - /* enable ppr interrupts again */ - writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); - - spin_lock_irqsave(&iommu->lock, flags); - head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); @@ -786,34 +770,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - /* - * Release iommu->lock because ppr-handling might need to - * re-acquire it - */ - spin_unlock_irqrestore(&iommu->lock, flags); - /* Handle PPR entry */ iommu_handle_ppr_entry(iommu, entry); - spin_lock_irqsave(&iommu->lock, flags); - /* Refresh ring-buffer information */ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); } - - spin_unlock_irqrestore(&iommu->lock, flags); } irqreturn_t amd_iommu_int_thread(int irq, void *data) { - struct amd_iommu *iommu; + struct amd_iommu *iommu = (struct amd_iommu *) data; + u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); - for_each_iommu(iommu) { - iommu_poll_events(iommu); - iommu_poll_ppr_log(iommu); - } + while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) { + /* Enable EVT and PPR interrupts again */ + writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK), + iommu->mmio_base + MMIO_STATUS_OFFSET); + if (status & MMIO_STATUS_EVT_INT_MASK) { + pr_devel("AMD-Vi: Processing IOMMU Event Log\n"); + iommu_poll_events(iommu); + } + + if (status & MMIO_STATUS_PPR_INT_MASK) { + pr_devel("AMD-Vi: Processing IOMMU PPR Log\n"); + iommu_poll_ppr_log(iommu); + } + + /* + * Hardware bug: ERBT1312 + * When re-enabling interrupt (by writing 1 + * to clear the bit), the hardware might also try to set + * the interrupt bit in the event status register. + * In this scenario, the bit will be set, and disable + * subsequent interrupts. + * + * Workaround: The IOMMU driver should read back the + * status register and check if the interrupt bits are cleared. + * If not, driver will need to go through the interrupt handler + * again and re-clear the bits + */ + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + } return IRQ_HANDLED; } @@ -2839,24 +2839,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, } /* - * This is a special map_sg function which is used if we should map a - * device which is not handled by an AMD IOMMU in the system. - */ -static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, - int nelems, int dir) -{ - struct scatterlist *s; - int i; - - for_each_sg(sglist, s, nelems, i) { - s->dma_address = (dma_addr_t)sg_phys(s); - s->dma_length = s->length; - } - - return nelems; -} - -/* * The exported map_sg function for dma_ops (handles scatter-gather * lists). */ @@ -2875,9 +2857,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, INC_STATS_COUNTER(cnt_map_sg); domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) - return map_sg_no_iommu(dev, sglist, nelems, dir); - else if (IS_ERR(domain)) + if (IS_ERR(domain)) return 0; dma_mask = *dev->dma_mask; @@ -3947,6 +3927,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) if (!table) goto out; + /* Initialize table spin-lock */ + spin_lock_init(&table->lock); + if (ioapic) /* Keep the first 32 indexes free for IOAPIC interrupts */ table->min_index = 32; @@ -4007,7 +3990,7 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count) c = 0; if (c == count) { - struct irq_2_iommu *irte_info; + struct irq_2_irte *irte_info; for (; c != 0; --c) table->table[index - c + 1] = IRTE_ALLOCATED; @@ -4015,9 +3998,9 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count) index -= count - 1; cfg->remapped = 1; - irte_info = &cfg->irq_2_iommu; - irte_info->sub_handle = devid; - irte_info->irte_index = index; + irte_info = &cfg->irq_2_irte; + irte_info->devid = devid; + irte_info->index = index; goto out; } @@ -4098,7 +4081,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, struct io_apic_irq_attr *attr) { struct irq_remap_table *table; - struct irq_2_iommu *irte_info; + struct irq_2_irte *irte_info; struct irq_cfg *cfg; union irte irte; int ioapic_id; @@ -4110,7 +4093,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, if (!cfg) return -EINVAL; - irte_info = &cfg->irq_2_iommu; + irte_info = &cfg->irq_2_irte; ioapic_id = mpc_ioapic_id(attr->ioapic); devid = get_ioapic_devid(ioapic_id); @@ -4125,8 +4108,8 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, /* Setup IRQ remapping info */ cfg->remapped = 1; - irte_info->sub_handle = devid; - irte_info->irte_index = index; + irte_info->devid = devid; + irte_info->index = index; /* Setup IRTE for IOMMU */ irte.val = 0; @@ -4160,7 +4143,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, static int set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { - struct irq_2_iommu *irte_info; + struct irq_2_irte *irte_info; unsigned int dest, irq; struct irq_cfg *cfg; union irte irte; @@ -4171,12 +4154,12 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask, cfg = data->chip_data; irq = data->irq; - irte_info = &cfg->irq_2_iommu; + irte_info = &cfg->irq_2_irte; if (!cpumask_intersects(mask, cpu_online_mask)) return -EINVAL; - if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte)) + if (get_irte(irte_info->devid, irte_info->index, &irte)) return -EBUSY; if (assign_irq_vector(irq, cfg, mask)) @@ -4192,7 +4175,7 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask, irte.fields.vector = cfg->vector; irte.fields.destination = dest; - modify_irte(irte_info->sub_handle, irte_info->irte_index, irte); + modify_irte(irte_info->devid, irte_info->index, irte); if (cfg->move_in_progress) send_cleanup_vector(cfg); @@ -4204,16 +4187,16 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask, static int free_irq(int irq) { - struct irq_2_iommu *irte_info; + struct irq_2_irte *irte_info; struct irq_cfg *cfg; cfg = irq_get_chip_data(irq); if (!cfg) return -EINVAL; - irte_info = &cfg->irq_2_iommu; + irte_info = &cfg->irq_2_irte; - free_irte(irte_info->sub_handle, irte_info->irte_index); + free_irte(irte_info->devid, irte_info->index); return 0; } @@ -4222,7 +4205,7 @@ static void compose_msi_msg(struct pci_dev *pdev, unsigned int irq, unsigned int dest, struct msi_msg *msg, u8 hpet_id) { - struct irq_2_iommu *irte_info; + struct irq_2_irte *irte_info; struct irq_cfg *cfg; union irte irte; @@ -4230,7 +4213,7 @@ static void compose_msi_msg(struct pci_dev *pdev, if (!cfg) return; - irte_info = &cfg->irq_2_iommu; + irte_info = &cfg->irq_2_irte; irte.val = 0; irte.fields.vector = cfg->vector; @@ -4239,11 +4222,11 @@ static void compose_msi_msg(struct pci_dev *pdev, irte.fields.dm = apic->irq_dest_mode; irte.fields.valid = 1; - modify_irte(irte_info->sub_handle, irte_info->irte_index, irte); + modify_irte(irte_info->devid, irte_info->index, irte); msg->address_hi = MSI_ADDR_BASE_HI; msg->address_lo = MSI_ADDR_BASE_LO; - msg->data = irte_info->irte_index; + msg->data = irte_info->index; } static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec) @@ -4268,7 +4251,7 @@ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec) static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, int index, int offset) { - struct irq_2_iommu *irte_info; + struct irq_2_irte *irte_info; struct irq_cfg *cfg; u16 devid; @@ -4283,18 +4266,18 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, return 0; devid = get_device_id(&pdev->dev); - irte_info = &cfg->irq_2_iommu; + irte_info = &cfg->irq_2_irte; cfg->remapped = 1; - irte_info->sub_handle = devid; - irte_info->irte_index = index + offset; + irte_info->devid = devid; + irte_info->index = index + offset; return 0; } static int setup_hpet_msi(unsigned int irq, unsigned int id) { - struct irq_2_iommu *irte_info; + struct irq_2_irte *irte_info; struct irq_cfg *cfg; int index, devid; @@ -4302,7 +4285,7 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id) if (!cfg) return -EINVAL; - irte_info = &cfg->irq_2_iommu; + irte_info = &cfg->irq_2_irte; devid = get_hpet_devid(id); if (devid < 0) return devid; @@ -4312,8 +4295,8 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id) return index; cfg->remapped = 1; - irte_info->sub_handle = devid; - irte_info->irte_index = index; + irte_info->devid = devid; + irte_info->index = index; return 0; } diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index e3c2d74b7684..9d23552a9619 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -213,6 +213,14 @@ enum iommu_init_state { IOMMU_INIT_ERROR, }; +/* Early ioapic and hpet maps from kernel command line */ +#define EARLY_MAP_SIZE 4 +static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; +static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; +static int __initdata early_ioapic_map_size; +static int __initdata early_hpet_map_size; +static bool __initdata cmdline_maps; + static enum iommu_init_state init_state = IOMMU_START_STATE; static int amd_iommu_enable_interrupts(void); @@ -703,31 +711,66 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, set_iommu_for_device(iommu, devid); } -static int add_special_device(u8 type, u8 id, u16 devid) +static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) { struct devid_map *entry; struct list_head *list; - if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET) + if (type == IVHD_SPECIAL_IOAPIC) + list = &ioapic_map; + else if (type == IVHD_SPECIAL_HPET) + list = &hpet_map; + else return -EINVAL; + list_for_each_entry(entry, list, list) { + if (!(entry->id == id && entry->cmd_line)) + continue; + + pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n", + type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); + + return 0; + } + entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; - entry->id = id; - entry->devid = devid; - - if (type == IVHD_SPECIAL_IOAPIC) - list = &ioapic_map; - else - list = &hpet_map; + entry->id = id; + entry->devid = devid; + entry->cmd_line = cmd_line; list_add_tail(&entry->list, list); return 0; } +static int __init add_early_maps(void) +{ + int i, ret; + + for (i = 0; i < early_ioapic_map_size; ++i) { + ret = add_special_device(IVHD_SPECIAL_IOAPIC, + early_ioapic_map[i].id, + early_ioapic_map[i].devid, + early_ioapic_map[i].cmd_line); + if (ret) + return ret; + } + + for (i = 0; i < early_hpet_map_size; ++i) { + ret = add_special_device(IVHD_SPECIAL_HPET, + early_hpet_map[i].id, + early_hpet_map[i].devid, + early_hpet_map[i].cmd_line); + if (ret) + return ret; + } + + return 0; +} + /* * Reads the device exclusion range from ACPI and initializes the IOMMU with * it @@ -764,6 +807,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, u32 dev_i, ext_flags = 0; bool alias = false; struct ivhd_entry *e; + int ret; + + + ret = add_early_maps(); + if (ret) + return ret; /* * First save the recommended feature enable bits from ACPI @@ -929,7 +978,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, PCI_FUNC(devid)); set_dev_entry_from_acpi(iommu, devid, e->flags, 0); - ret = add_special_device(type, handle, devid); + ret = add_special_device(type, handle, devid, false); if (ret) return ret; break; @@ -1275,7 +1324,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu) amd_iommu_int_handler, amd_iommu_int_thread, 0, "AMD-Vi", - iommu->dev); + iommu); if (r) { pci_disable_msi(iommu->dev); @@ -1638,18 +1687,28 @@ static void __init free_on_init_error(void) static bool __init check_ioapic_information(void) { + const char *fw_bug = FW_BUG; bool ret, has_sb_ioapic; int idx; has_sb_ioapic = false; ret = false; + /* + * If we have map overrides on the kernel command line the + * messages in this function might not describe firmware bugs + * anymore - so be careful + */ + if (cmdline_maps) + fw_bug = ""; + for (idx = 0; idx < nr_ioapics; idx++) { int devid, id = mpc_ioapic_id(idx); devid = get_ioapic_devid(id); if (devid < 0) { - pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id); + pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n", + fw_bug, id); ret = false; } else if (devid == IOAPIC_SB_DEVID) { has_sb_ioapic = true; @@ -1666,11 +1725,11 @@ static bool __init check_ioapic_information(void) * when the BIOS is buggy and provides us the wrong * device id for the IOAPIC in the system. */ - pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n"); + pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug); } if (!ret) - pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n"); + pr_err("AMD-Vi: Disabling interrupt remapping\n"); return ret; } @@ -1801,6 +1860,7 @@ static int __init early_amd_iommu_init(void) * Interrupt remapping enabled, create kmem_cache for the * remapping tables. */ + ret = -ENOMEM; amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", MAX_IRQS_PER_TABLE * sizeof(u32), IRQ_TABLE_ALIGNMENT, @@ -2097,8 +2157,70 @@ static int __init parse_amd_iommu_options(char *str) return 1; } -__setup("amd_iommu_dump", parse_amd_iommu_dump); -__setup("amd_iommu=", parse_amd_iommu_options); +static int __init parse_ivrs_ioapic(char *str) +{ + unsigned int bus, dev, fn; + int ret, id, i; + u16 devid; + + ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); + + if (ret != 4) { + pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str); + return 1; + } + + if (early_ioapic_map_size == EARLY_MAP_SIZE) { + pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", + str); + return 1; + } + + devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); + + cmdline_maps = true; + i = early_ioapic_map_size++; + early_ioapic_map[i].id = id; + early_ioapic_map[i].devid = devid; + early_ioapic_map[i].cmd_line = true; + + return 1; +} + +static int __init parse_ivrs_hpet(char *str) +{ + unsigned int bus, dev, fn; + int ret, id, i; + u16 devid; + + ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); + + if (ret != 4) { + pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str); + return 1; + } + + if (early_hpet_map_size == EARLY_MAP_SIZE) { + pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n", + str); + return 1; + } + + devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); + + cmdline_maps = true; + i = early_hpet_map_size++; + early_hpet_map[i].id = id; + early_hpet_map[i].devid = devid; + early_hpet_map[i].cmd_line = true; + + return 1; +} + +__setup("amd_iommu_dump", parse_amd_iommu_dump); +__setup("amd_iommu=", parse_amd_iommu_options); +__setup("ivrs_ioapic", parse_ivrs_ioapic); +__setup("ivrs_hpet", parse_ivrs_hpet); IOMMU_INIT_FINISH(amd_iommu_detect, gart_iommu_hole_init, diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index e38ab438bb34..b81153fb9e60 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -99,6 +99,7 @@ #define PASID_MASK 0x000fffff /* MMIO status bits */ +#define MMIO_STATUS_EVT_INT_MASK (1 << 1) #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) #define MMIO_STATUS_PPR_INT_MASK (1 << 6) @@ -591,6 +592,7 @@ struct devid_map { struct list_head list; u8 id; u16 devid; + bool cmd_line; }; /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index e5cdaf87822c..a7967ceb79e6 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -129,7 +129,8 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) (*cnt)++; - else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) { + else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC && + scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) { pr_warn("Unsupported device scope\n"); } start += scope->length; @@ -645,7 +646,7 @@ out: int alloc_iommu(struct dmar_drhd_unit *drhd) { struct intel_iommu *iommu; - u32 ver; + u32 ver, sts; static int iommu_allocated = 0; int agaw = 0; int msagaw = 0; @@ -695,6 +696,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) (unsigned long long)iommu->cap, (unsigned long long)iommu->ecap); + /* Reflect status in gcmd */ + sts = readl(iommu->reg + DMAR_GSTS_REG); + if (sts & DMA_GSTS_IRES) + iommu->gcmd |= DMA_GCMD_IRE; + if (sts & DMA_GSTS_TES) + iommu->gcmd |= DMA_GCMD_TE; + if (sts & DMA_GSTS_QIES) + iommu->gcmd |= DMA_GCMD_QIE; + raw_spin_lock_init(&iommu->register_lock); drhd->iommu = iommu; @@ -1204,7 +1214,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) /* TBD: ignore advanced fault log currently */ if (!(fault_status & DMA_FSTS_PPF)) - goto clear_rest; + goto unlock_exit; fault_index = dma_fsts_fault_record_index(fault_status); reg = cap_fault_reg_offset(iommu->cap); @@ -1245,11 +1255,10 @@ irqreturn_t dmar_fault(int irq, void *dev_id) fault_index = 0; raw_spin_lock_irqsave(&iommu->register_lock, flag); } -clear_rest: - /* clear all the other faults */ - fault_status = readl(iommu->reg + DMAR_FSTS_REG); - writel(fault_status, iommu->reg + DMAR_FSTS_REG); + writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG); + +unlock_exit: raw_spin_unlock_irqrestore(&iommu->register_lock, flag); return IRQ_HANDLED; } @@ -1297,6 +1306,7 @@ int __init enable_drhd_fault_handling(void) for_each_drhd_unit(drhd) { int ret; struct intel_iommu *iommu = drhd->iommu; + u32 fault_status; ret = dmar_set_interrupt(iommu); if (ret) { @@ -1309,6 +1319,8 @@ int __init enable_drhd_fault_handling(void) * Clear any previous faults. */ dmar_fault(iommu->irq, iommu); + fault_status = readl(iommu->reg + DMAR_FSTS_REG); + writel(fault_status, iommu->reg + DMAR_FSTS_REG); } return 0; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 6e0b9ffc79b5..b4f0e28dfa41 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -47,6 +47,7 @@ #include <asm/iommu.h> #include "irq_remapping.h" +#include "pci.h" #define ROOT_SIZE VTD_PAGE_SIZE #define CONTEXT_SIZE VTD_PAGE_SIZE @@ -3665,6 +3666,7 @@ static struct notifier_block device_nb = { int __init intel_iommu_init(void) { int ret = 0; + struct dmar_drhd_unit *drhd; /* VT-d is required for a TXT/tboot launch, so enforce that */ force_on = tboot_force_iommu(); @@ -3675,6 +3677,20 @@ int __init intel_iommu_init(void) return -ENODEV; } + /* + * Disable translation if already enabled prior to OS handover. + */ + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu; + + if (drhd->ignored) + continue; + + iommu = drhd->iommu; + if (iommu->gcmd & DMA_GCMD_TE) + iommu_disable_translation(iommu); + } + if (dmar_dev_scope_init() < 0) { if (force_on) panic("tboot: Failed to initialize DMAR device scope\n"); @@ -4137,12 +4153,6 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain, return 0; } -static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) -{ - pci_dev_put(*from); - *from = to; -} - #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) static int intel_iommu_add_device(struct device *dev) diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index f3b8f23b5d8f..5b19b2d6ec2d 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -524,6 +524,16 @@ static int __init intel_irq_remapping_supported(void) if (disable_irq_remap) return 0; + if (irq_remap_broken) { + WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, + "This system BIOS has enabled interrupt remapping\n" + "on a chipset that contains an erratum making that\n" + "feature unstable. To maintain system stability\n" + "interrupt remapping is being disabled. Please\n" + "contact your BIOS vendor for an update\n"); + disable_irq_remap = 1; + return 0; + } if (!dmar_ir_support()) return 0; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 1d72b4f5b006..d8f98b14e2fe 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -204,6 +204,35 @@ again: } EXPORT_SYMBOL_GPL(iommu_group_alloc); +struct iommu_group *iommu_group_get_by_id(int id) +{ + struct kobject *group_kobj; + struct iommu_group *group; + const char *name; + + if (!iommu_group_kset) + return NULL; + + name = kasprintf(GFP_KERNEL, "%d", id); + if (!name) + return NULL; + + group_kobj = kset_find_obj(iommu_group_kset, name); + kfree(name); + + if (!group_kobj) + return NULL; + + group = container_of(group_kobj, struct iommu_group, kobj); + BUG_ON(group->id != id); + + kobject_get(group->devices_kobj); + kobject_put(&group->kobj); + + return group; +} +EXPORT_SYMBOL_GPL(iommu_group_get_by_id); + /** * iommu_group_get_iommudata - retrieve iommu_data registered for a group * @group: the group diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 7c11ff368d07..dcfea4e39be7 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c @@ -18,6 +18,7 @@ int irq_remapping_enabled; int disable_irq_remap; +int irq_remap_broken; int disable_sourceid_checking; int no_x2apic_optout; @@ -210,6 +211,11 @@ void __init setup_irq_remapping_ops(void) #endif } +void set_irq_remapping_broken(void) +{ + irq_remap_broken = 1; +} + int irq_remapping_supported(void) { if (disable_irq_remap) diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h index ecb637670405..90c4dae5a46b 100644 --- a/drivers/iommu/irq_remapping.h +++ b/drivers/iommu/irq_remapping.h @@ -32,6 +32,7 @@ struct pci_dev; struct msi_msg; extern int disable_irq_remap; +extern int irq_remap_broken; extern int disable_sourceid_checking; extern int no_x2apic_optout; extern int irq_remapping_enabled; @@ -89,6 +90,7 @@ extern struct irq_remap_ops amd_iommu_irq_ops; #define irq_remapping_enabled 0 #define disable_irq_remap 1 +#define irq_remap_broken 0 #endif /* CONFIG_IRQ_REMAP */ diff --git a/drivers/iommu/pci.h b/drivers/iommu/pci.h new file mode 100644 index 000000000000..352d80ae7443 --- /dev/null +++ b/drivers/iommu/pci.h @@ -0,0 +1,29 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright (C) 2013 Red Hat, Inc. + * Copyright (C) 2013 Freescale Semiconductor, Inc. + * + */ +#ifndef __IOMMU_PCI_H +#define __IOMMU_PCI_H + +/* Helper function for swapping pci device reference */ +static inline void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) +{ + pci_dev_put(*from); + *from = to; +} + +#endif /* __IOMMU_PCI_H */ diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 4aec8be38054..108c0e9c24d9 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -295,7 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, pa = (pte & GART_PAGE_MASK); if (!pfn_valid(__phys_to_pfn(pa))) { - dev_err(gart->dev, "No entry for %08lx:%08x\n", iova, pa); + dev_err(gart->dev, "No entry for %08llx:%08x\n", + (unsigned long long)iova, pa); gart_dump_table(gart); return -EINVAL; } diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index bc9b59949d09..f6f120e25409 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -772,7 +772,8 @@ static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, pfn = *pte & SMMU_PFN_MASK; WARN_ON(!pfn_valid(pfn)); dev_dbg(as->smmu->dev, - "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid); + "iova:%08llx pfn:%08lx asid:%d\n", (unsigned long long)iova, + pfn, as->asid); spin_unlock_irqrestore(&as->lock, flags); return PFN_PHYS(pfn); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index a32e0d5aa45f..fc6aebf1e4b2 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d) if (gic_arch_extn.irq_retrigger) return gic_arch_extn.irq_retrigger(d); - return -ENXIO; + /* the genirq layer expects 0 if we can't retrigger in hardware */ + return 0; } #ifdef CONFIG_SMP diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 66120bd46d15..10744091e6ca 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -6,6 +6,7 @@ #include "dm.h" #include "dm-bio-prison.h" +#include "dm-bio-record.h" #include "dm-cache-metadata.h" #include <linux/dm-io.h> @@ -201,10 +202,15 @@ struct per_bio_data { unsigned req_nr:2; struct dm_deferred_entry *all_io_entry; - /* writethrough fields */ + /* + * writethrough fields. These MUST remain at the end of this + * structure and the 'cache' member must be the first as it + * is used to determine the offsetof the writethrough fields. + */ struct cache *cache; dm_cblock_t cblock; bio_end_io_t *saved_bi_end_io; + struct dm_bio_details bio_details; }; struct dm_cache_migration { @@ -513,16 +519,28 @@ static void save_stats(struct cache *cache) /*---------------------------------------------------------------- * Per bio data *--------------------------------------------------------------*/ -static struct per_bio_data *get_per_bio_data(struct bio *bio) + +/* + * If using writeback, leave out struct per_bio_data's writethrough fields. + */ +#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) +#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) + +static size_t get_per_bio_data_size(struct cache *cache) +{ + return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; +} + +static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) { - struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); + struct per_bio_data *pb = dm_per_bio_data(bio, data_size); BUG_ON(!pb); return pb; } -static struct per_bio_data *init_per_bio_data(struct bio *bio) +static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) { - struct per_bio_data *pb = get_per_bio_data(bio); + struct per_bio_data *pb = get_per_bio_data(bio, data_size); pb->tick = false; pb->req_nr = dm_bio_get_target_bio_nr(bio); @@ -556,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) { unsigned long flags; - struct per_bio_data *pb = get_per_bio_data(bio); + size_t pb_data_size = get_per_bio_data_size(cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); spin_lock_irqsave(&cache->lock, flags); if (cache->need_tick_bio && @@ -635,7 +654,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio) static void writethrough_endio(struct bio *bio, int err) { - struct per_bio_data *pb = get_per_bio_data(bio); + struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); bio->bi_end_io = pb->saved_bi_end_io; if (err) { @@ -643,6 +662,7 @@ static void writethrough_endio(struct bio *bio, int err) return; } + dm_bio_restore(&pb->bio_details, bio); remap_to_cache(pb->cache, bio, pb->cblock); /* @@ -662,11 +682,12 @@ static void writethrough_endio(struct bio *bio, int err) static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) { - struct per_bio_data *pb = get_per_bio_data(bio); + struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); pb->cache = cache; pb->cblock = cblock; pb->saved_bi_end_io = bio->bi_end_io; + dm_bio_record(&pb->bio_details, bio); bio->bi_end_io = writethrough_endio; remap_to_origin_clear_discard(pb->cache, bio, oblock); @@ -1035,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio) static void process_flush_bio(struct cache *cache, struct bio *bio) { - struct per_bio_data *pb = get_per_bio_data(bio); + size_t pb_data_size = get_per_bio_data_size(cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); BUG_ON(bio->bi_size); if (!pb->req_nr) @@ -1107,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, dm_oblock_t block = get_bio_block(cache, bio); struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; struct policy_result lookup_result; - struct per_bio_data *pb = get_per_bio_data(bio); + size_t pb_data_size = get_per_bio_data_size(cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); bool discarded_block = is_discarded_oblock(cache, block); bool can_migrate = discarded_block || spare_migration_bandwidth(cache); @@ -1881,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) cache->ti = ca->ti; ti->private = cache; - ti->per_bio_data_size = sizeof(struct per_bio_data); ti->num_flush_bios = 2; ti->flush_supported = true; @@ -1890,6 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->discard_zeroes_data_unsupported = true; memcpy(&cache->features, &ca->features, sizeof(cache->features)); + ti->per_bio_data_size = get_per_bio_data_size(cache); cache->callbacks.congested_fn = cache_is_congested; dm_table_add_target_callbacks(ti->table, &cache->callbacks); @@ -2092,6 +2115,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) int r; dm_oblock_t block = get_bio_block(cache, bio); + size_t pb_data_size = get_per_bio_data_size(cache); bool can_migrate = false; bool discarded_block; struct dm_bio_prison_cell *cell; @@ -2108,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - pb = init_per_bio_data(bio); + pb = init_per_bio_data(bio, pb_data_size); if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { defer_bio(cache, bio); @@ -2193,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) { struct cache *cache = ti->private; unsigned long flags; - struct per_bio_data *pb = get_per_bio_data(bio); + size_t pb_data_size = get_per_bio_data_size(cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); if (pb->tick) { policy_tick(cache->policy); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7e469260fe5e..9a0bdad9ad8f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -611,6 +611,7 @@ static void dec_pending(struct dm_io *io, int error) queue_io(md, bio); } else { /* done with normal IO or empty flush */ + trace_block_bio_complete(md->queue, bio, io_error); bio_endio(bio, io_error); } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 24909eb13fec..f4e87bfc7567 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -184,6 +184,8 @@ static void return_io(struct bio *return_bi) return_bi = bi->bi_next; bi->bi_next = NULL; bi->bi_size = 0; + trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), + bi, 0); bio_endio(bi, 0); bi = return_bi; } @@ -3914,6 +3916,8 @@ static void raid5_align_endio(struct bio *bi, int error) rdev_dec_pending(rdev, conf->mddev); if (!error && uptodate) { + trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), + raid_bi, 0); bio_endio(raid_bi, 0); if (atomic_dec_and_test(&conf->active_aligned_reads)) wake_up(&conf->wait_for_stripe); @@ -4382,6 +4386,8 @@ static void make_request(struct mddev *mddev, struct bio * bi) if ( rw == WRITE ) md_write_end(mddev); + trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), + bi, 0); bio_endio(bi, 0); } } @@ -4758,8 +4764,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) handled++; } remaining = raid5_dec_bi_active_stripes(raid_bio); - if (remaining == 0) + if (remaining == 0) { + trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), + raid_bio, 0); bio_endio(raid_bio, 0); + } if (atomic_dec_and_test(&conf->active_aligned_reads)) wake_up(&conf->wait_for_stripe); return handled; diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c index f19cd7367040..4faaf8053f26 100644 --- a/drivers/media/dvb-frontends/mb86a20s.c +++ b/drivers/media/dvb-frontends/mb86a20s.c @@ -610,7 +610,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer, __func__, 'A' + layer, segment * isdbt_rate[m][f][i]/1000, rate, rate); - state->estimated_rate[i] = rate; + state->estimated_rate[layer] = rate; } diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c index d4de021dc844..31ce7698acb9 100644 --- a/drivers/media/pci/cx25821/cx25821-video.c +++ b/drivers/media/pci/cx25821/cx25821-video.c @@ -461,7 +461,7 @@ int cx25821_video_register(struct cx25821_dev *dev) spin_lock_init(&dev->slock); - for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; ++i) { + for (i = 0; i < VID_CHANNEL_NUM; ++i) { cx25821_init_controls(dev, i); cx25821_risc_stopper(dev->pci, &dev->channels[i].vidq.stopper, diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 05d7b6333461..a0639e779973 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC config VIDEO_SH_VEU tristate "SuperH VEU mem2mem video processing driver" - depends on VIDEO_DEV && VIDEO_V4L2 + depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV help diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c index c61f590029ad..348dafc0318a 100644 --- a/drivers/media/radio/radio-ma901.c +++ b/drivers/media/radio/radio-ma901.c @@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev) static int usb_ma901radio_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_device *dev = interface_to_usbdev(intf); struct ma901radio_device *radio; int retval = 0; + /* Masterkit MA901 usb radio has the same USB ID as many others + * Atmel V-USB devices. Let's make additional checks to be sure + * that this is our device. + */ + + if (dev->product && dev->manufacturer && + (strncmp(dev->product, "MA901", 5) != 0 + || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0)) + return -ENODEV; + radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL); if (!radio) { dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n"); diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig index 39c2ecadb273..ea98f7e9ccd1 100644 --- a/drivers/misc/vmw_vmci/Kconfig +++ b/drivers/misc/vmw_vmci/Kconfig @@ -4,7 +4,7 @@ config VMWARE_VMCI tristate "VMware VMCI Driver" - depends on X86 && PCI + depends on X86 && PCI && NET help This is VMware's Virtual Machine Communication Interface. It enables high-speed communication between host and guest in a virtual diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 92ab30ab00dc..dc571ebc1aa0 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -1123,33 +1123,6 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file, } #endif -static inline unsigned long get_vm_size(struct vm_area_struct *vma) -{ - return vma->vm_end - vma->vm_start; -} - -static inline resource_size_t get_vm_offset(struct vm_area_struct *vma) -{ - return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT; -} - -/* - * Set a new vm offset. - * - * Verify that the incoming offset really works as a page offset, - * and that the offset and size fit in a resource_size_t. - */ -static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off) -{ - pgoff_t pgoff = off >> PAGE_SHIFT; - if (off != (resource_size_t) pgoff << PAGE_SHIFT) - return -EINVAL; - if (off + get_vm_size(vma) - 1 < off) - return -EINVAL; - vma->vm_pgoff = pgoff; - return 0; -} - /* * set up a mapping for shared memory segments */ @@ -1159,45 +1132,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; struct map_info *map = mtd->priv; - resource_size_t start, off; - unsigned long len, vma_len; /* This is broken because it assumes the MTD device is map-based and that mtd->priv is a valid struct map_info. It should be replaced with something that uses the mtd_get_unmapped_area() operation properly. */ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { - off = get_vm_offset(vma); - start = map->phys; - len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); - start &= PAGE_MASK; - vma_len = get_vm_size(vma); - - /* Overflow in off+len? */ - if (vma_len + off < off) - return -EINVAL; - /* Does it fit in the mapping? */ - if (vma_len + off > len) - return -EINVAL; - - off += start; - /* Did that overflow? */ - if (off < start) - return -EINVAL; - if (set_vm_offset(vma, off) < 0) - return -EINVAL; - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - #ifdef pgprot_noncached - if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) + if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - return 0; + return vm_iomap_memory(vma, map->phys, map->size); } return -ENOSYS; #else diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6bbd90e1123c..dbbea0eec134 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1); + netif_addr_lock_bh(bond->dev); netdev_for_each_mc_addr(ha, bond->dev) dev_mc_del(old_active->dev, ha->addr); + netif_addr_unlock_bh(bond->dev); } if (new_active) { @@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1); + netif_addr_lock_bh(bond->dev); netdev_for_each_mc_addr(ha, bond->dev) dev_mc_add(new_active->dev, ha->addr); + netif_addr_unlock_bh(bond->dev); } } @@ -1901,11 +1905,29 @@ err_dest_symlinks: bond_destroy_slave_symlinks(bond_dev, slave_dev); err_detach: + if (!USES_PRIMARY(bond->params.mode)) { + netif_addr_lock_bh(bond_dev); + bond_mc_list_flush(bond_dev, slave_dev); + netif_addr_unlock_bh(bond_dev); + } + bond_del_vlans_from_slave(bond, slave_dev); write_lock_bh(&bond->lock); bond_detach_slave(bond, new_slave); + if (bond->primary_slave == new_slave) + bond->primary_slave = NULL; write_unlock_bh(&bond->lock); + if (bond->curr_active_slave == new_slave) { + read_lock(&bond->lock); + write_lock_bh(&bond->curr_slave_lock); + bond_change_active_slave(bond, NULL); + bond_select_active_slave(bond); + write_unlock_bh(&bond->curr_slave_lock); + read_unlock(&bond->lock); + } + slave_disable_netpoll(new_slave); err_close: + slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev); err_unset_master: @@ -1976,12 +1998,11 @@ static int __bond_release_one(struct net_device *bond_dev, return -EINVAL; } + write_unlock_bh(&bond->lock); /* unregister rx_handler early so bond_handle_frame wouldn't be called * for this slave anymore. */ netdev_rx_handler_unregister(slave_dev); - write_unlock_bh(&bond->lock); - synchronize_net(); write_lock_bh(&bond->lock); if (!all && !bond->params.fail_over_mac) { @@ -3169,11 +3190,20 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev) { struct slave *slave = bond_slave_get_rtnl(slave_dev); - struct bonding *bond = slave->bond; - struct net_device *bond_dev = slave->bond->dev; + struct bonding *bond; + struct net_device *bond_dev; u32 old_speed; u8 old_duplex; + /* A netdev event can be generated while enslaving a device + * before netdev_rx_handler_register is called in which case + * slave will be NULL + */ + if (!slave) + return NOTIFY_DONE; + bond_dev = slave->bond->dev; + bond = slave->bond; + switch (event) { case NETDEV_UNREGISTER: if (bond->setup_by_slave) @@ -3287,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) */ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) { - struct ethhdr *data = (struct ethhdr *)skb->data; - struct iphdr *iph; - struct ipv6hdr *ipv6h; + const struct ethhdr *data; + const struct iphdr *iph; + const struct ipv6hdr *ipv6h; u32 v6hash; - __be32 *s, *d; + const __be32 *s, *d; if (skb->protocol == htons(ETH_P_IP) && - skb_network_header_len(skb) >= sizeof(*iph)) { + pskb_network_may_pull(skb, sizeof(*iph))) { iph = ip_hdr(skb); + data = (struct ethhdr *)skb->data; return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ (data->h_dest[5] ^ data->h_source[5])) % count; } else if (skb->protocol == htons(ETH_P_IPV6) && - skb_network_header_len(skb) >= sizeof(*ipv6h)) { + pskb_network_may_pull(skb, sizeof(*ipv6h))) { ipv6h = ipv6_hdr(skb); + data = (struct ethhdr *)skb->data; s = &ipv6h->saddr.s6_addr32[0]; d = &ipv6h->daddr.s6_addr32[0]; v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); @@ -3319,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) { u32 layer4_xor = 0; - struct iphdr *iph; - struct ipv6hdr *ipv6h; - __be32 *s, *d; - __be16 *layer4hdr; + const struct iphdr *iph; + const struct ipv6hdr *ipv6h; + const __be32 *s, *d; + const __be16 *l4 = NULL; + __be16 _l4[2]; + int noff = skb_network_offset(skb); + int poff; if (skb->protocol == htons(ETH_P_IP) && - skb_network_header_len(skb) >= sizeof(*iph)) { + pskb_may_pull(skb, noff + sizeof(*iph))) { iph = ip_hdr(skb); - if (!ip_is_fragment(iph) && - (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP) && - (skb_headlen(skb) - skb_network_offset(skb) >= - iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { - layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); - layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); + poff = proto_ports_offset(iph->protocol); + + if (!ip_is_fragment(iph) && poff >= 0) { + l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff, + sizeof(_l4), &_l4); + if (l4) + layer4_xor = ntohs(l4[0] ^ l4[1]); } return (layer4_xor ^ ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; } else if (skb->protocol == htons(ETH_P_IPV6) && - skb_network_header_len(skb) >= sizeof(*ipv6h)) { + pskb_may_pull(skb, noff + sizeof(*ipv6h))) { ipv6h = ipv6_hdr(skb); - if ((ipv6h->nexthdr == IPPROTO_TCP || - ipv6h->nexthdr == IPPROTO_UDP) && - (skb_headlen(skb) - skb_network_offset(skb) >= - sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { - layer4hdr = (__be16 *)(ipv6h + 1); - layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); + poff = proto_ports_offset(ipv6h->nexthdr); + if (poff >= 0) { + l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff, + sizeof(_l4), &_l4); + if (l4) + layer4_xor = ntohs(l4[0] ^ l4[1]); } s = &ipv6h->saddr.s6_addr32[0]; d = &ipv6h->daddr.s6_addr32[0]; @@ -4847,9 +4882,18 @@ static int __net_init bond_net_init(struct net *net) static void __net_exit bond_net_exit(struct net *net) { struct bond_net *bn = net_generic(net, bond_net_id); + struct bonding *bond, *tmp_bond; + LIST_HEAD(list); bond_destroy_sysfs(bn); bond_destroy_proc_dir(bn); + + /* Kill off any bonds created after unregistering bond rtnl ops */ + rtnl_lock(); + list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) + unregister_netdevice_queue(bond->dev, &list); + unregister_netdevice_many(&list); + rtnl_unlock(); } static struct pernet_operations bond_net_ops = { diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index db103e03ba05..ea7a388f4843 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -527,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d, goto out; } if (new_value < 0) { - pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n", + pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n", bond->dev->name, new_value, INT_MAX); ret = -EINVAL; goto out; @@ -542,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d, pr_info("%s: Setting ARP monitoring interval to %d.\n", bond->dev->name, new_value); bond->params.arp_interval = new_value; - if (bond->params.miimon) { - pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", - bond->dev->name, bond->dev->name); - bond->params.miimon = 0; - } - if (!bond->params.arp_targets[0]) { - pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", - bond->dev->name); + if (new_value) { + if (bond->params.miimon) { + pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", + bond->dev->name, bond->dev->name); + bond->params.miimon = 0; + } + if (!bond->params.arp_targets[0]) + pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", + bond->dev->name); } if (bond->dev->flags & IFF_UP) { /* If the interface is up, we may need to fire off @@ -557,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d, * timer will get fired off when the open function * is called. */ - cancel_delayed_work_sync(&bond->mii_work); - queue_delayed_work(bond->wq, &bond->arp_work, 0); + if (!new_value) { + cancel_delayed_work_sync(&bond->arp_work); + } else { + cancel_delayed_work_sync(&bond->mii_work); + queue_delayed_work(bond->wq, &bond->arp_work, 0); + } } - out: rtnl_unlock(); return ret; @@ -702,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d, } if (new_value < 0) { pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", - bond->dev->name, new_value, 1, INT_MAX); + bond->dev->name, new_value, 0, INT_MAX); ret = -EINVAL; goto out; } else { @@ -757,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d, goto out; } if (new_value < 0) { - pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", - bond->dev->name, new_value, 1, INT_MAX); + pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n", + bond->dev->name, new_value, 0, INT_MAX); ret = -EINVAL; goto out; } else { @@ -968,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d, } if (new_value < 0) { pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", - bond->dev->name, new_value, 1, INT_MAX); + bond->dev->name, new_value, 0, INT_MAX); ret = -EINVAL; goto out; - } else { - pr_info("%s: Setting MII monitoring interval to %d.\n", - bond->dev->name, new_value); - bond->params.miimon = new_value; - if (bond->params.updelay) - pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", - bond->dev->name, - bond->params.updelay * bond->params.miimon); - if (bond->params.downdelay) - pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", - bond->dev->name, - bond->params.downdelay * bond->params.miimon); - if (bond->params.arp_interval) { - pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", - bond->dev->name); - bond->params.arp_interval = 0; - if (bond->params.arp_validate) { - bond->params.arp_validate = - BOND_ARP_VALIDATE_NONE; - } - } - - if (bond->dev->flags & IFF_UP) { - /* If the interface is up, we may need to fire off - * the MII timer. If the interface is down, the - * timer will get fired off when the open function - * is called. - */ + } + pr_info("%s: Setting MII monitoring interval to %d.\n", + bond->dev->name, new_value); + bond->params.miimon = new_value; + if (bond->params.updelay) + pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", + bond->dev->name, + bond->params.updelay * bond->params.miimon); + if (bond->params.downdelay) + pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", + bond->dev->name, + bond->params.downdelay * bond->params.miimon); + if (new_value && bond->params.arp_interval) { + pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", + bond->dev->name); + bond->params.arp_interval = 0; + if (bond->params.arp_validate) + bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; + } + if (bond->dev->flags & IFF_UP) { + /* If the interface is up, we may need to fire off + * the MII timer. If the interface is down, the + * timer will get fired off when the open function + * is called. + */ + if (!new_value) { + cancel_delayed_work_sync(&bond->mii_work); + } else { cancel_delayed_work_sync(&bond->arp_work); queue_delayed_work(bond->wq, &bond->mii_work, 0); } diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index f32b9fc6a983..9aa0c64c33c8 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -929,6 +929,7 @@ static int mcp251x_open(struct net_device *net) struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; struct mcp251x_platform_data *pdata = spi->dev.platform_data; + unsigned long flags; int ret; ret = open_candev(net); @@ -945,9 +946,14 @@ static int mcp251x_open(struct net_device *net) priv->tx_skb = NULL; priv->tx_len = 0; + flags = IRQF_ONESHOT; + if (pdata->irq_flags) + flags |= pdata->irq_flags; + else + flags |= IRQF_TRIGGER_FALLING; + ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, - pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING, - DEVICE_NAME, priv); + flags, DEVICE_NAME, priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); if (pdata->transceiver_enable) diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index b39ca5b3ea7f..ff2ba86cd4a4 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -46,6 +46,7 @@ config CAN_EMS_PCI config CAN_PEAK_PCMCIA tristate "PEAK PCAN-PC Card" depends on PCMCIA + depends on HAS_IOPORT ---help--- This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) from PEAK-System (http://www.peak-system.com). To compile this diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index a042cdc260dc..3c18d7d000ed 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) */ if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == REG_CR_BASICCAN_INITIAL && - (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) && + (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) && (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) flag = 1; @@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) * See states on p. 23 of the Datasheet. */ if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && - priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL && + priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL && priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) return flag; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index daf4013a8fc7..e4df307eaa90 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val) */ spin_lock_irqsave(&priv->cmdreg_lock, flags); priv->write_reg(priv, REG_CMR, val); - priv->read_reg(priv, REG_SR); + priv->read_reg(priv, SJA1000_REG_SR); spin_unlock_irqrestore(&priv->cmdreg_lock, flags); } @@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { n++; - status = priv->read_reg(priv, REG_SR); + status = priv->read_reg(priv, SJA1000_REG_SR); /* check for absent controller due to hw unplug */ if (status == 0xFF && sja1000_is_absent(priv)) return IRQ_NONE; @@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) /* receive interrupt */ while (status & SR_RBS) { sja1000_rx(dev); - status = priv->read_reg(priv, REG_SR); + status = priv->read_reg(priv, SJA1000_REG_SR); /* check for absent controller */ if (status == 0xFF && sja1000_is_absent(priv)) return IRQ_NONE; diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h index afa99847a510..aa48e053da27 100644 --- a/drivers/net/can/sja1000/sja1000.h +++ b/drivers/net/can/sja1000/sja1000.h @@ -56,7 +56,7 @@ /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ #define REG_MOD 0x00 #define REG_CMR 0x01 -#define REG_SR 0x02 +#define SJA1000_REG_SR 0x02 #define REG_IR 0x03 #define REG_IER 0x04 #define REG_ALC 0x0B diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 6433b81256cd..8e0c4a001939 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) struct net_device *dev; struct sja1000_priv *priv; struct resource res; - const u32 *prop; - int err, irq, res_size, prop_size; + u32 prop; + int err, irq, res_size; void __iomem *base; err = of_address_to_resource(np, 0, &res); @@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) priv->read_reg = sja1000_ofp_read_reg; priv->write_reg = sja1000_ofp_write_reg; - prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size); - if (prop && (prop_size == sizeof(u32))) - priv->can.clock.freq = *prop / 2; + err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop); + if (!err) + priv->can.clock.freq = prop / 2; else priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */ - prop = of_get_property(np, "nxp,tx-output-mode", &prop_size); - if (prop && (prop_size == sizeof(u32))) - priv->ocr |= *prop & OCR_MODE_MASK; + err = of_property_read_u32(np, "nxp,tx-output-mode", &prop); + if (!err) + priv->ocr |= prop & OCR_MODE_MASK; else priv->ocr |= OCR_MODE_NORMAL; /* default */ - prop = of_get_property(np, "nxp,tx-output-config", &prop_size); - if (prop && (prop_size == sizeof(u32))) - priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK; + err = of_property_read_u32(np, "nxp,tx-output-config", &prop); + if (!err) + priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK; else priv->ocr |= OCR_TX0_PULLDOWN; /* default */ - prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size); - if (prop && (prop_size == sizeof(u32)) && *prop) { - u32 divider = priv->can.clock.freq * 2 / *prop; + err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop); + if (!err && prop) { + u32 divider = priv->can.clock.freq * 2 / prop; if (divider > 1) priv->cdr |= divider / 2 - 1; @@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) priv->cdr |= CDR_CLK_OFF; /* default */ } - prop = of_get_property(np, "nxp,no-comparator-bypass", NULL); - if (!prop) + if (!of_property_read_bool(np, "nxp,no-comparator-bypass")) priv->cdr |= CDR_CBP; /* default */ priv->irq_flags = IRQF_SHARED; diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index cab306a9888e..e1d26433d619 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev) struct ei_device *ei_local; struct ax_device *ax; struct resource *irq, *mem, *mem2; - resource_size_t mem_size, mem2_size = 0; + unsigned long mem_size, mem2_size = 0; int ret = 0; dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index 829b5ad71d0d..b5fd934585e9 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h @@ -186,7 +186,7 @@ struct atl1e_tpd_desc { /* how about 0x2000 */ #define MAX_TX_BUF_LEN 0x2000 #define MAX_TX_BUF_SHIFT 13 -/*#define MAX_TX_BUF_LEN 0x3000 */ +#define MAX_TSO_SEG_SIZE 0x3c00 /* rrs word 1 bit 0:31 */ #define RRS_RX_CSUM_MASK 0xFFFF @@ -438,7 +438,6 @@ struct atl1e_adapter { struct atl1e_hw hw; struct atl1e_hw_stats hw_stats; - bool have_msi; u32 wol; u16 link_speed; u16 link_duplex; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 92f4734f860d..ac25f05ff68f 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter) struct net_device *netdev = adapter->netdev; free_irq(adapter->pdev->irq, netdev); - - if (adapter->have_msi) - pci_disable_msi(adapter->pdev); } static int atl1e_request_irq(struct atl1e_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; - int flags = 0; int err = 0; - adapter->have_msi = true; - err = pci_enable_msi(pdev); - if (err) { - netdev_dbg(netdev, - "Unable to allocate MSI interrupt Error: %d\n", err); - adapter->have_msi = false; - } - - if (!adapter->have_msi) - flags |= IRQF_SHARED; - err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev); + err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, + netdev); if (err) { netdev_dbg(adapter->netdev, "Unable to allocate interrupt Error: %d\n", err); - if (adapter->have_msi) - pci_disable_msi(pdev); return err; } netdev_dbg(netdev, "atl1e_request_irq OK\n"); @@ -2344,6 +2329,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->reset_task, atl1e_reset_task); INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); + netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); err = register_netdev(netdev); if (err) { netdev_err(netdev, "register netdevice failed\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4046f97378c2..57619dd4a92b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } } + /* initialize FW coalescing state machines in RAM */ + bnx2x_update_coalesce(bp); + /* setup the leading queue */ rc = bnx2x_setup_leading(bp); if (rc) { @@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); - u16 flags = REG_RD16(bp, addr); + u8 flags = REG_RD8(bp, addr); /* clear and set */ flags &= ~HC_INDEX_DATA_HC_ENABLED; flags |= enable_flag; - REG_WR16(bp, addr, flags); + REG_WR8(bp, addr, flags); DP(NETIF_MSG_IFUP, "port %x fw_sb_id %d sb_index %d disable %d\n", port, fw_sb_id, sb_index, disable); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 77ebae0ac64a..0283f343b0d1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, { struct bnx2x *bp = params->bp; u16 base_page, next_page, not_kr2_device, lane; - int sigdet = bnx2x_warpcore_get_sigdet(phy, params); - - if (!sigdet) { - if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) - bnx2x_kr2_recovery(params, vars, phy); - return; - } + int sigdet; /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery * since some switches tend to reinit the AN process and clear the @@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params, vars->check_kr2_recovery_cnt--; return; } + + sigdet = bnx2x_warpcore_get_sigdet(phy, params); + if (!sigdet) { + if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + bnx2x_kr2_recovery(params, vars, phy); + DP(NETIF_MSG_LINK, "No sigdet\n"); + } + return; + } + lane = bnx2x_get_warpcore_lane(phy, params); CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, lane); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e81a747ea8ce..c50696b396f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) q); } - if (!NO_FCOE(bp)) { + if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { fp = &bp->fp[FCOE_IDX(bp)]; queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; @@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); } } + if (!CHIP_IS_E1x(bp)) + /* block FW from writing to host */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + /* wait until BRB is empty */ tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); while (timer_count) { @@ -13354,6 +13358,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev) RCU_INIT_POINTER(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_mutex); synchronize_rcu(); + bp->cnic_enabled = false; kfree(bp->cnic_kwq); bp->cnic_kwq = NULL; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 67d2663b3974..17a972734ba7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -14604,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp) if (j + len > block_end) goto partno; - memcpy(tp->fw_ver, &vpd_data[j], len); - strncat(tp->fw_ver, " bc ", vpdlen - len - 1); + if (len >= sizeof(tp->fw_ver)) + len = sizeof(tp->fw_ver) - 1; + memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); + snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, + &vpd_data[j]); } partno: diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index a170065b5973..b0ebc9f6d55e 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -163,6 +163,7 @@ #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ /* XGMAC_INT_STAT reg */ +#define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ @@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev) writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + /* Mask power mgt interrupt */ + writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); + /* XGMAC requires AXI bus init. This is a 'magic number' for now */ writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); @@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) struct sk_buff *skb; int frame_len; + if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) + break; + entry = priv->rx_tail; p = priv->dma_rx + entry; if (desc_get_owner(p)) @@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) unsigned int pmt = 0; if (mode & WAKE_MAGIC) - pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; + pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; if (mode & WAKE_UCAST) pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 8cdf02503d13..9eada8e86078 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count) tmp = readl(reg); } +/* + * Sleep, either by using msleep() or if we are suspending, then + * use mdelay() to sleep. + */ +static void dm9000_msleep(board_info_t *db, unsigned int ms) +{ + if (db->in_suspend) + mdelay(ms); + else + msleep(ms); +} + +/* Read a word from phyxcer */ +static int +dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) +{ + board_info_t *db = netdev_priv(dev); + unsigned long flags; + unsigned int reg_save; + int ret; + + mutex_lock(&db->addr_lock); + + spin_lock_irqsave(&db->lock, flags); + + /* Save previous register address */ + reg_save = readb(db->io_addr); + + /* Fill the phyxcer register into REG_0C */ + iow(db, DM9000_EPAR, DM9000_PHY | reg); + + /* Issue phyxcer read command */ + iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); + + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock, flags); + + dm9000_msleep(db, 1); /* Wait read complete */ + + spin_lock_irqsave(&db->lock, flags); + reg_save = readb(db->io_addr); + + iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ + + /* The read data keeps on REG_0D & REG_0E */ + ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); + + /* restore the previous address */ + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock, flags); + + mutex_unlock(&db->addr_lock); + + dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); + return ret; +} + +/* Write a word to phyxcer */ +static void +dm9000_phy_write(struct net_device *dev, + int phyaddr_unused, int reg, int value) +{ + board_info_t *db = netdev_priv(dev); + unsigned long flags; + unsigned long reg_save; + + dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); + mutex_lock(&db->addr_lock); + + spin_lock_irqsave(&db->lock, flags); + + /* Save previous register address */ + reg_save = readb(db->io_addr); + + /* Fill the phyxcer register into REG_0C */ + iow(db, DM9000_EPAR, DM9000_PHY | reg); + + /* Fill the written data into REG_0D & REG_0E */ + iow(db, DM9000_EPDRL, value); + iow(db, DM9000_EPDRH, value >> 8); + + /* Issue phyxcer write command */ + iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); + + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock, flags); + + dm9000_msleep(db, 1); /* Wait write complete */ + + spin_lock_irqsave(&db->lock, flags); + reg_save = readb(db->io_addr); + + iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ + + /* restore the previous address */ + writeb(reg_save, db->io_addr); + + spin_unlock_irqrestore(&db->lock, flags); + mutex_unlock(&db->addr_lock); +} + /* dm9000_set_io * * select the specified set of io routines to use with the @@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev) iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ + dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ + dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ + ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; /* if wol is needed, then always set NCR_WAKEEN otherwise we end @@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev) return 0; } -/* - * Sleep, either by using msleep() or if we are suspending, then - * use mdelay() to sleep. - */ -static void dm9000_msleep(board_info_t *db, unsigned int ms) -{ - if (db->in_suspend) - mdelay(ms); - else - msleep(ms); -} - -/* - * Read a word from phyxcer - */ -static int -dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) -{ - board_info_t *db = netdev_priv(dev); - unsigned long flags; - unsigned int reg_save; - int ret; - - mutex_lock(&db->addr_lock); - - spin_lock_irqsave(&db->lock,flags); - - /* Save previous register address */ - reg_save = readb(db->io_addr); - - /* Fill the phyxcer register into REG_0C */ - iow(db, DM9000_EPAR, DM9000_PHY | reg); - - iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ - - writeb(reg_save, db->io_addr); - spin_unlock_irqrestore(&db->lock,flags); - - dm9000_msleep(db, 1); /* Wait read complete */ - - spin_lock_irqsave(&db->lock,flags); - reg_save = readb(db->io_addr); - - iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ - - /* The read data keeps on REG_0D & REG_0E */ - ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); - - /* restore the previous address */ - writeb(reg_save, db->io_addr); - spin_unlock_irqrestore(&db->lock,flags); - - mutex_unlock(&db->addr_lock); - - dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); - return ret; -} - -/* - * Write a word to phyxcer - */ -static void -dm9000_phy_write(struct net_device *dev, - int phyaddr_unused, int reg, int value) -{ - board_info_t *db = netdev_priv(dev); - unsigned long flags; - unsigned long reg_save; - - dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); - mutex_lock(&db->addr_lock); - - spin_lock_irqsave(&db->lock,flags); - - /* Save previous register address */ - reg_save = readb(db->io_addr); - - /* Fill the phyxcer register into REG_0C */ - iow(db, DM9000_EPAR, DM9000_PHY | reg); - - /* Fill the written data into REG_0D & REG_0E */ - iow(db, DM9000_EPDRL, value); - iow(db, DM9000_EPDRH, value >> 8); - - iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ - - writeb(reg_save, db->io_addr); - spin_unlock_irqrestore(&db->lock, flags); - - dm9000_msleep(db, 1); /* Wait write complete */ - - spin_lock_irqsave(&db->lock,flags); - reg_save = readb(db->io_addr); - - iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ - - /* restore the previous address */ - writeb(reg_save, db->io_addr); - - spin_unlock_irqrestore(&db->lock, flags); - mutex_unlock(&db->addr_lock); -} - static void dm9000_shutdown(struct net_device *dev) { @@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev) db->flags |= DM9000_PLATF_SIMPLE_PHY; #endif - dm9000_reset(db); + /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), + * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo + * while probe stage. + */ + + iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST); /* try multiple times, DM9000 sometimes gets the read wrong */ for (i = 0; i < 8; i++) { diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h index 55688bd1a3ef..9ce058adabab 100644 --- a/drivers/net/ethernet/davicom/dm9000.h +++ b/drivers/net/ethernet/davicom/dm9000.h @@ -69,7 +69,9 @@ #define NCR_WAKEEN (1<<6) #define NCR_FCOL (1<<4) #define NCR_FDX (1<<3) -#define NCR_LBK (3<<1) + +#define NCR_RESERVED (3<<1) +#define NCR_MAC_LBK (1<<1) #define NCR_RST (1<<0) #define NSR_SPEED (1<<7) @@ -167,5 +169,12 @@ #define ISR_LNKCHNG (1<<5) #define ISR_UNDERRUN (1<<4) +/* Davicom MII registers. + */ + +#define MII_DM_DSPCR 0x1b /* DSP Control Register */ + +#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */ + #endif /* _DM9000X_H_ */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08e54f3d288b..2886c9b63f90 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, if (vlan_tx_tag_present(skb)) { vlan_tag = be_get_tx_vlan_tag(adapter, skb); - __vlan_put_tag(skb, vlan_tag); - skb->vlan_tci = 0; + skb = __vlan_put_tag(skb, vlan_tag); + if (skb) + skb->vlan_tci = 0; } return skb; diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 911d0253dbb2..73195f643c9c 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +/* Init RX & TX buffer descriptors + */ +static void fec_enet_bd_init(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct bufdesc *bdp; + unsigned int i; + + /* Initialize the receive buffer descriptors. */ + bdp = fep->rx_bd_base; + for (i = 0; i < RX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. */ + if (bdp->cbd_bufaddr) + bdp->cbd_sc = BD_ENET_RX_EMPTY; + else + bdp->cbd_sc = 0; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp->cbd_sc |= BD_SC_WRAP; + + fep->cur_rx = fep->rx_bd_base; + + /* ...and the same for transmit */ + bdp = fep->tx_bd_base; + fep->cur_tx = bdp; + for (i = 0; i < TX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. */ + bdp->cbd_sc = 0; + if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { + dev_kfree_skb_any(fep->tx_skbuff[i]); + fep->tx_skbuff[i] = NULL; + } + bdp->cbd_bufaddr = 0; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp->cbd_sc |= BD_SC_WRAP; + fep->dirty_tx = bdp; +} + /* This function is called to start or restart the FEC during a link * change. This only happens when switching between half and full * duplex. @@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex) /* Set maximum receive buffer size. */ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); + fec_enet_bd_init(ndev); + /* Set receive and transmit descriptor base. */ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); if (fep->bufdesc_ex) @@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex) writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); - fep->cur_rx = fep->rx_bd_base; for (i = 0; i <= TX_RING_MOD_MASK; i++) { if (fep->tx_skbuff[i]) { @@ -954,6 +1002,7 @@ static void fec_enet_adjust_link(struct net_device *ndev) } else { if (fep->link) { fec_stop(ndev); + fep->link = phy_dev->link; status_change = 1; } } @@ -1597,8 +1646,6 @@ static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct bufdesc *cbd_base; - struct bufdesc *bdp; - unsigned int i; /* Allocate memory for buffer descriptors. */ cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, @@ -1608,6 +1655,7 @@ static int fec_enet_init(struct net_device *ndev) return -ENOMEM; } + memset(cbd_base, 0, PAGE_SIZE); spin_lock_init(&fep->hw_lock); fep->netdev = ndev; @@ -1631,35 +1679,6 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); - /* Initialize the receive buffer descriptors. */ - bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { - - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); - } - - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); - bdp->cbd_sc |= BD_SC_WRAP; - - /* ...and the same for transmit */ - bdp = fep->tx_bd_base; - fep->cur_tx = bdp; - for (i = 0; i < TX_RING_SIZE; i++) { - - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); - } - - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); - bdp->cbd_sc |= BD_SC_WRAP; - fep->dirty_tx = bdp; - fec_restart(ndev, 0); return 0; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ec800b093e7e..d2bea3f07c73 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -870,7 +870,7 @@ err_unlock: } static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, - void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) { struct cb *cb; unsigned long flags; @@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, nic->cbs_avail--; cb->skb = skb; + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + if (unlikely(!nic->cbs_avail)) err = -ENOSPC; - cb_prepare(nic, cb, skb); /* Order is important otherwise we'll be in a race with h/w: * set S-bit in current first, then clear S-bit in previous. */ @@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic) nic->mii.mdio_write = mdio_write; } -static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) { struct config *config = &cb->u.config; u8 *c = (u8 *)config; @@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); + return 0; } /************************************************************************* @@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic) return fw; } -static void e100_setup_ucode(struct nic *nic, struct cb *cb, +static int e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) { const struct firmware *fw = (void *)skb; @@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; } static inline int e100_load_ucode_wait(struct nic *nic) @@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic) return err; } -static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, struct sk_buff *skb) { cb->command = cpu_to_le16(cb_iaaddr); memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; } -static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) { cb->command = cpu_to_le16(cb_dump); cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + offsetof(struct mem, dump_buf)); + return 0; } static int e100_phy_check_without_mii(struct nic *nic) @@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic) return 0; } -static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) { struct net_device *netdev = nic->netdev; struct netdev_hw_addr *ha; @@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, ETH_ALEN); } + return 0; } static void e100_set_multicast_list(struct net_device *netdev) @@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data) round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); } -static void e100_xmit_prepare(struct nic *nic, struct cb *cb, +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, struct sk_buff *skb) { + dma_addr_t dma_addr; cb->command = nic->tx_command; + dma_addr = pci_map_single(nic->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (pci_dma_mapping_error(nic->pdev, dma_addr)) + return -ENOMEM; + /* * Use the last 4 bytes of the SKB payload packet as the CRC, used for * testing, ie sending frames with bad CRC. @@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb, cb->u.tcb.tcb_byte_count = 0; cb->u.tcb.threshold = nic->tx_threshold; cb->u.tcb.tbd_count = 1; - cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, - skb->data, skb->len, PCI_DMA_TODEVICE)); - /* check for mapping failure? */ + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); cb->u.tcb.tbd.size = cpu_to_le16(skb->len); skb_tx_timestamp(skb); + return 0; } static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 43462d596a4e..ffd287196bf8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) txdr->buffer_info[i].dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); tx_desc->lower.data = cpu_to_le32(skb->len); tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | @@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), GFP_KERNEL); if (!rxdr->buffer_info) { - ret_val = 4; + ret_val = 5; goto err_nomem; } @@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, GFP_KERNEL); if (!rxdr->desc) { - ret_val = 5; + ret_val = 6; goto err_nomem; } memset(rxdr->desc, 0, rxdr->size); @@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); if (!skb) { - ret_val = 6; + ret_val = 7; goto err_nomem; } skb_reserve(skb, NET_IP_ALIGN); @@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rxdr->buffer_info[i].dma = dma_map_single(&pdev->dev, skb->data, E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); memset(skb->data, 0x00, skb->len); } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 948b86ffa4f0..7e615e2bf7e6 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -848,11 +848,16 @@ check_page: } } - if (!buffer_info->dma) + if (!buffer_info->dma) { buffer_info->dma = dma_map_page(&pdev->dev, buffer_info->page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + adapter->alloc_rx_buff_failed++; + break; + } + } rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 25151401c2ab..ab577a763a20 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -284,18 +284,10 @@ struct igb_q_vector { enum e1000_ring_flags_t { IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_LB_VLAN_BSWAP, - IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, IGB_RING_FLAG_TX_CTX_IDX, IGB_RING_FLAG_TX_DETECT_HANG }; -#define ring_uses_build_skb(ring) \ - test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) -#define set_ring_build_skb_enabled(ring) \ - set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) -#define clear_ring_build_skb_enabled(ring) \ - clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) - #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) #define IGB_RX_DESC(R, i) \ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8496adfc6a68..64f75291e3a5 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, wr32(E1000_RXDCTL(reg_idx), rxdctl); } -static void igb_set_rx_buffer_len(struct igb_adapter *adapter, - struct igb_ring *rx_ring) -{ -#define IGB_MAX_BUILD_SKB_SIZE \ - (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \ - (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN)) - - /* set build_skb flag */ - if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE) - set_ring_build_skb_enabled(rx_ring); - else - clear_ring_build_skb_enabled(rx_ring); -} - /** * igb_configure_rx - Configure receive Unit after Reset * @adapter: board private structure @@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter) /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ - for (i = 0; i < adapter->num_rx_queues; i++) { - struct igb_ring *rx_ring = adapter->rx_ring[i]; - igb_set_rx_buffer_len(adapter, rx_ring); - igb_configure_rx_ring(adapter, rx_ring); - } + for (i = 0; i < adapter->num_rx_queues; i++) + igb_configure_rx_ring(adapter, adapter->rx_ring[i]); } /** @@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, return igb_can_reuse_rx_page(rx_buffer, page, truesize); } -static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc) -{ - struct igb_rx_buffer *rx_buffer; - struct sk_buff *skb; - struct page *page; - void *page_addr; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); -#if (PAGE_SIZE < 8192) - unsigned int truesize = IGB_RX_BUFSZ; -#else - unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(NET_SKB_PAD + - NET_IP_ALIGN + - size); -#endif - - /* If we spanned a buffer we have a huge mess so test for it */ - BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); - - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); - - page_addr = page_address(page) + rx_buffer->page_offset; - - /* prefetch first cache line of first page */ - prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN); -#endif - - /* build an skb to around the page buffer */ - skb = build_skb(page_addr, truesize); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_failed++; - return NULL; - } - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - IGB_RX_BUFSZ, - DMA_FROM_DEVICE); - - /* update pointers within the skb to store the data */ - skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); - __skb_put(skb, size); - - /* pull timestamp out of packet data */ - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); - __skb_pull(skb, IGB_TS_HDR_LEN); - } - - if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) { - /* hand second half of page back to the ring */ - igb_reuse_rx_page(rx_ring, rx_buffer); - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - PAGE_SIZE, DMA_FROM_DEVICE); - } - - /* clear contents of buffer_info */ - rx_buffer->dma = 0; - rx_buffer->page = NULL; - - return skb; -} - static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) rmb(); /* retrieve a buffer from the ring */ - if (ring_uses_build_skb(rx_ring)) - skb = igb_build_rx_buffer(rx_ring, rx_desc); - else - skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); + skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); /* exit if we failed to retrieve a buffer */ if (!skb) @@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; } -static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) -{ - if (ring_uses_build_skb(rx_ring)) - return NET_SKB_PAD + NET_IP_ALIGN; - else - return 0; -} - /** * igb_alloc_rx_buffers - Replace used receive buffers; packet split * @adapter: address of board private structure @@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) * Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + - bi->page_offset + - igb_rx_offset(rx_ring)); + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc++; bi++; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index ea4808373435..b5f94abe3cff 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -2159,6 +2159,10 @@ map_skb: skb->data, adapter->rx_buffer_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + adapter->alloc_rx_buff_failed++; + break; + } rx_desc = IXGB_RX_DESC(*rx_ring, i); rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); @@ -2168,7 +2172,8 @@ map_skb: rx_desc->status = 0; - if (++i == rx_ring->count) i = 0; + if (++i == rx_ring->count) + i = 0; buffer_info = &rx_ring->buffer_info[i]; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index db5611ae407e..79f4a26ea6cc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7922,12 +7922,19 @@ static int __init ixgbe_init_module(void) ixgbe_dbg_init(); #endif /* CONFIG_DEBUG_FS */ + ret = pci_register_driver(&ixgbe_driver); + if (ret) { +#ifdef CONFIG_DEBUG_FS + ixgbe_dbg_exit(); +#endif /* CONFIG_DEBUG_FS */ + return ret; + } + #ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); #endif - ret = pci_register_driver(&ixgbe_driver); - return ret; + return 0; } module_init(ixgbe_init_module); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d44b4d21268c..97e33669c0b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) return -EINVAL; if (vlan || qos) { + if (adapter->vfinfo[vf].pf_vlan) + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, + vf); + if (err) + goto out; err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); if (err) goto out; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index edfba9370922..434e33c527df 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -33,6 +33,7 @@ config MV643XX_ETH config MVMDIO tristate "Marvell MDIO interface support" + select PHYLIB ---help--- This driver supports the MDIO interface found in the network interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, @@ -45,7 +46,6 @@ config MVMDIO config MVNETA tristate "Marvell Armada 370/XP network interface support" depends on MACH_ARMADA_370_XP - select PHYLIB select MVMDIO ---help--- This driver supports the network interface units in the diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index cd345b8969bc..a47a097c21e1 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -374,7 +374,6 @@ static int rxq_number = 8; static int txq_number = 8; static int rxq_def; -static int txq_def; #define MVNETA_DRIVER_NAME "mvneta" #define MVNETA_DRIVER_VERSION "1.0" @@ -1475,7 +1474,8 @@ error: static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - struct mvneta_tx_queue *txq = &pp->txqs[txq_def]; + u16 txq_id = skb_get_queue_mapping(skb); + struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; struct mvneta_tx_desc *tx_desc; struct netdev_queue *nq; int frags = 0; @@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) goto out; frags = skb_shinfo(skb)->nr_frags + 1; - nq = netdev_get_tx_queue(dev, txq_def); + nq = netdev_get_tx_queue(dev, txq_id); /* Get a descriptor for the first part of the packet */ tx_desc = mvneta_txq_next_desc_get(txq); @@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev) return -EINVAL; } - dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8); + dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); if (!dev) return -ENOMEM; @@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev) netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; + dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; + dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; + dev->priv_flags |= IFF_UNICAST_FLT; + err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register\n"); goto err_deinit; } - dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM; - dev->priv_flags |= IFF_UNICAST_FLT; - netdev_info(dev, "mac: %pM\n", dev->dev_addr); platform_set_drvdata(pdev, pp->dev); @@ -2843,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO); module_param(txq_number, int, S_IRUGO); module_param(rxq_def, int, S_IRUGO); -module_param(txq_def, int, S_IRUGO); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index fc07ca35721b..6a0e671fcecd 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); - tp = space - 2048/8; + tp = space - 8192/8; sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); } else { diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index 615ac63ea860..ec6dcd80152b 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2074,7 +2074,7 @@ enum { GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ -#define GMAC_DEF_MSK GM_IS_TX_FF_UR +#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) }; /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f278b10ef714..30d78f806dc3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) { - unsigned int i; - for (i = ETH_ALEN - 1; i; --i) { + int i; + for (i = ETH_ALEN - 1; i >= 0; --i) { dst_mac[i] = src_mac & 0xff; src_mac >>= 8; } diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 33bcb63d56a2..8fb481252e2c 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) for (; rxfc != 0; rxfc--) { rxh = ks8851_rdreg32(ks, KS_RXFHSR); rxstat = rxh & 0xffff; - rxlen = rxh >> 16; + rxlen = (rxh >> 16) & 0xfff; netif_dbg(ks, rx_status, ks->netdev, "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index cd5ae8813cb3..edd63f1230f3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) } } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); + /* Make sure carrier is off and queue is stopped during loopback */ + if (netif_running(netdev)) { + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + ret = qlcnic_do_lb_test(adapter, mode); qlcnic_83xx_clear_lb_mode(adapter, mode); @@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter, void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) { struct qlcnic_cmd_args cmd; + struct net_device *netdev = adapter->netdev; int ret = 0; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); @@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_TX, &ret); if (ret) { - dev_info(&adapter->pdev->dev, "Error getting MAC stats\n"); + netdev_err(netdev, "Error getting Tx stats\n"); goto out; } /* Get MAC stats */ @@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_MAC, &ret); if (ret) { - dev_info(&adapter->pdev->dev, - "Error getting Rx stats\n"); + netdev_err(netdev, "Error getting MAC stats\n"); goto out; } /* Get Rx stats */ @@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_RX, &ret); if (ret) - dev_info(&adapter->pdev->dev, - "Error getting Tx stats\n"); + netdev_err(netdev, "Error getting Rx stats\n"); out: qlcnic_free_mbx_args(&cmd); } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0e630061bff3..5fa847fe388a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -358,8 +358,7 @@ set_flags: memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); } opcode = TX_ETHER_PKT; - if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && - skb_shinfo(skb)->gso_size > 0) { + if (skb_is_gso(skb)) { hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); first_desc->total_hdr_length = hdr_len; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 987fb6f8adc3..5ef328af61d0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -200,10 +200,10 @@ beacon_err: } err = qlcnic_config_led(adapter, b_state, b_rate); - if (!err) + if (!err) { err = len; - else ahw->beacon_state = b_state; + } if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) qlcnic_diag_free_res(adapter->netdev, max_sds_rings); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index a131d7b5d2fe..7e8d68263963 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -18,7 +18,7 @@ */ #define DRV_NAME "qlge" #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "v1.00.00.31" +#define DRV_VERSION "v1.00.00.32" #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 6f316ab23257..0780e039b271 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev, ecmd->supported = SUPPORTED_10000baseT_Full; ecmd->advertising = ADVERTISED_10000baseT_Full; - ecmd->autoneg = AUTONEG_ENABLE; ecmd->transceiver = XCVR_EXTERNAL; if ((qdev->link_status & STS_LINK_TYPE_MASK) == STS_LINK_TYPE_10GBASET) { ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); ecmd->port = PORT_TP; + ecmd->autoneg = AUTONEG_ENABLE; } else { ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b13ab544a7eb..8033555e53c2 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1434,11 +1434,13 @@ map_error: } /* Categorizing receive firmware frame errors */ -static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err) +static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, + struct rx_ring *rx_ring) { struct nic_stats *stats = &qdev->nic_stats; stats->rx_err_count++; + rx_ring->rx_errors++; switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { case IB_MAC_IOCB_RSP_ERR_CODE_ERR: @@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct napi_struct *napi = &rx_ring->napi; + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + put_page(lbq_desc->p.pg_chunk.page); + return; + } napi->dev = qdev->ndev; skb = napi_get_frags(napi); @@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, addr = lbq_desc->p.pg_chunk.va; prefetch(addr); + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + goto err_out; + } + /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ @@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, memcpy(skb_put(new_skb, length), skb->data, length); skb = new_skb; + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + dev_kfree_skb_any(skb); + return; + } + /* loopback self test for ethtool */ if (test_bit(QL_SELFTEST, &qdev->flags)) { ql_check_lb_frame(qdev, skb); @@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, return; } + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + dev_kfree_skb_any(skb); + return; + } + /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ @@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, QL_DUMP_IB_MAC_RSP(ib_mac_rsp); - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2); - return (unsigned long)length; - } - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { /* The data and headers are split into * separate buffers. diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 28fb50a1e9c3..4ecbe64a758d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -3818,6 +3818,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) } } +static void rtl_speed_down(struct rtl8169_private *tp) +{ + u32 adv; + int lpa; + + rtl_writephy(tp, 0x1f, 0x0000); + lpa = rtl_readphy(tp, MII_LPA); + + if (lpa & (LPA_10HALF | LPA_10FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + else if (lpa & (LPA_100HALF | LPA_100FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + else + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0); + + rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + adv); +} + static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; @@ -3848,9 +3872,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) return false; - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - + rtl_speed_down(tp); rtl_wol_suspend_quirk(tp); return true; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index bf5e3cf97c4d..6ed333fe5c04 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) if (felic_stat & ECSR_LCHNG) { /* Link Changed */ if (mdp->cd->no_psr || mdp->no_ether_link) { - if (mdp->link == PHY_DOWN) - link_stat = 0; - else - link_stat = PHY_ST_LINK; + goto ignore_link; } else { link_stat = (sh_eth_read(ndev, PSR)); if (mdp->ether_link_active_low) @@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) } } +ignore_link: if (intr_status & EESR_TWB) { /* Write buck end. unused write back interrupt */ if (intr_status & EESR_TABT) /* Transmit Abort int */ @@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_cpu_data *cd = mdp->cd; irqreturn_t ret = IRQ_NONE; - u32 intr_status = 0; + unsigned long intr_status; spin_lock(&mdp->lock); - /* Get interrpt stat */ + /* Get interrupt status */ intr_status = sh_eth_read(ndev, EESR); + /* Mask it with the interrupt mask, forcing ECI interrupt to be always + * enabled since it's the one that comes thru regardless of the mask, + * and we need to fully handle it in sh_eth_error() in order to quench + * it as it doesn't get cleared by just writing 1 to the ECI bit... + */ + intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; /* Clear interrupt */ if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | @@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) struct phy_device *phydev = mdp->phydev; int new_state = 0; - if (phydev->link != PHY_DOWN) { + if (phydev->link) { if (phydev->duplex != mdp->duplex) { new_state = 1; mdp->duplex = phydev->duplex; @@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev) if (mdp->cd->set_rate) mdp->cd->set_rate(ndev); } - if (mdp->link == PHY_DOWN) { + if (!mdp->link) { sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); new_state = 1; mdp->link = phydev->link; + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_enable(ndev); } } else if (mdp->link) { new_state = 1; - mdp->link = PHY_DOWN; + mdp->link = 0; mdp->speed = 0; mdp->duplex = -1; + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_disable(ndev); } if (new_state && netif_msg_link(mdp)) @@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev) snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, mdp->mii_bus->id , mdp->phy_id); - mdp->link = PHY_DOWN; + mdp->link = 0; mdp->speed = 0; mdp->duplex = -1; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index e6655678458e..828be4515008 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -723,7 +723,7 @@ struct sh_eth_private { u32 phy_id; /* PHY ID */ struct mii_bus *mii_bus; /* MDIO bus control */ struct phy_device *phydev; /* PHY device control */ - enum phy_state link; + int link; phy_interface_t phy_interface; int msg_enable; int speed; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 0c74a702d461..50617c5a0bdb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) { writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK); } /* This reads the MAC core counters (if actaully supported). diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index df32a090d08e..4781d3d8e182 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status) * queue is stopped then start the queue as we have free desc for tx */ if (unlikely(netif_queue_stopped(ndev))) - netif_start_queue(ndev); + netif_wake_queue(ndev); cpts_tx_timestamp(priv->cpts, skb); priv->stats.tx_packets++; priv->stats.tx_bytes += len; @@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); if (data->dual_emac) { - if (of_property_read_u32(node, "dual_emac_res_vlan", + if (of_property_read_u32(slave_node, "dual_emac_res_vlan", &prop)) { pr_err("Missing dual_emac_res_vlan in DT.\n"); slave_data->dual_emac_res_vlan = i+1; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index ae1b77aa199f..72300bc9e378 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status) * queue is stopped then start the queue as we have free desc for tx */ if (unlikely(netif_queue_stopped(ndev))) - netif_start_queue(ndev); + netif_wake_queue(ndev); ndev->stats.tx_packets++; ndev->stats.tx_bytes += len; dev_kfree_skb_any(skb); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 1cd77483da50..f5f0f09e4cc5 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device, packet->trans_id; /* Notify the layer above us */ - nvsc_packet->completion.send.send_completion( - nvsc_packet->completion.send.send_completion_ctx); + if (nvsc_packet) + nvsc_packet->completion.send.send_completion( + nvsc_packet->completion.send. + send_completion_ctx); num_outstanding_sends = atomic_dec_return(&net_device->num_outstanding_sends); @@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device, int ret = 0; struct nvsp_message sendMessage; struct net_device *ndev; + u64 req_id; net_device = get_outbound_net_device(device); if (!net_device) @@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device, 0xFFFFFFFF; sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; + if (packet->completion.send.send_completion) + req_id = (u64)packet; + else + req_id = 0; + if (packet->page_buf_cnt) { ret = vmbus_sendpacket_pagebuffer(device->channel, packet->page_buf, packet->page_buf_cnt, &sendMessage, sizeof(struct nvsp_message), - (unsigned long)packet); + req_id); } else { ret = vmbus_sendpacket(device->channel, &sendMessage, sizeof(struct nvsp_message), - (unsigned long)packet, + req_id, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - } if (ret == 0) { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5f85205cd12b..8341b62e5521 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, if (status == 1) { netif_carrier_on(net); - netif_wake_queue(net); ndev_ctx = netdev_priv(net); schedule_delayed_work(&ndev_ctx->dwork, 0); schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); } else { netif_carrier_off(net); - netif_tx_disable(net); } } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2b657d4d63a8..0775f0aefd1e 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -61,9 +61,6 @@ struct rndis_request { static void rndis_filter_send_completion(void *ctx); -static void rndis_filter_send_request_completion(void *ctx); - - static struct rndis_device *get_rndis_device(void) { @@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, packet->page_buf[0].len; } - packet->completion.send.send_completion_ctx = req;/* packet; */ - packet->completion.send.send_completion = - rndis_filter_send_request_completion; - packet->completion.send.send_completion_tid = (unsigned long)dev; + packet->completion.send.send_completion = NULL; ret = netvsc_send(dev->net_dev->dev, packet); return ret; @@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx) /* Pass it back to the original handler */ filter_pkt->completion(filter_pkt->completion_ctx); } - - -static void rndis_filter_send_request_completion(void *ctx) -{ - /* Noop */ -} diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b7c457adc0dc..729ed533bb33 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (tun->flags & TUN_TAP_MQ && (tun->numqueues + tun->numdisabled > 1)) - return err; + return -EBUSY; } else { char *name; diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 16c842997291..6bd91676d2cb 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb goto error; if (skb) { - if (skb->len <= sizeof(ETH_HLEN)) + if (skb->len <= ETH_HLEN) goto error; /* mapping VLANs to MBIM sessions: diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 968d5d50751d..2a3579f67910 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> +#include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/cdc.h> @@ -52,6 +53,96 @@ struct qmi_wwan_state { struct usb_interface *data; }; +/* default ethernet address used by the modem */ +static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; + +/* Make up an ethernet header if the packet doesn't have one. + * + * A firmware bug common among several devices cause them to send raw + * IP packets under some circumstances. There is no way for the + * driver/host to know when this will happen. And even when the bug + * hits, some packets will still arrive with an intact header. + * + * The supported devices are only capably of sending IPv4, IPv6 and + * ARP packets on a point-to-point link. Any packet with an ethernet + * header will have either our address or a broadcast/multicast + * address as destination. ARP packets will always have a header. + * + * This means that this function will reliably add the appropriate + * header iff necessary, provided our hardware address does not start + * with 4 or 6. + * + * Another common firmware bug results in all packets being addressed + * to 00:a0:c6:00:00:00 despite the host address being different. + * This function will also fixup such packets. + */ +static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + __be16 proto; + + /* usbnet rx_complete guarantees that skb->len is at least + * hard_header_len, so we can inspect the dest address without + * checking skb->len + */ + switch (skb->data[0] & 0xf0) { + case 0x40: + proto = htons(ETH_P_IP); + break; + case 0x60: + proto = htons(ETH_P_IPV6); + break; + case 0x00: + if (is_multicast_ether_addr(skb->data)) + return 1; + /* possibly bogus destination - rewrite just in case */ + skb_reset_mac_header(skb); + goto fix_dest; + default: + /* pass along other packets without modifications */ + return 1; + } + if (skb_headroom(skb) < ETH_HLEN) + return 0; + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + eth_hdr(skb)->h_proto = proto; + memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); +fix_dest: + memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); + return 1; +} + +/* very simplistic detection of IPv4 or IPv6 headers */ +static bool possibly_iphdr(const char *data) +{ + return (data[0] & 0xd0) == 0x40; +} + +/* disallow addresses which may be confused with IP headers */ +static int qmi_wwan_mac_addr(struct net_device *dev, void *p) +{ + int ret; + struct sockaddr *addr = p; + + ret = eth_prepare_mac_addr_change(dev, p); + if (ret < 0) + return ret; + if (possibly_iphdr(addr->sa_data)) + return -EADDRNOTAVAIL; + eth_commit_mac_addr_change(dev, p); + return 0; +} + +static const struct net_device_ops qmi_wwan_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = usbnet_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_change_mtu = usbnet_change_mtu, + .ndo_set_mac_address = qmi_wwan_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* using a counter to merge subdriver requests with our own into a combined state */ static int qmi_wwan_manage_power(struct usbnet *dev, int on) { @@ -229,6 +320,18 @@ next_desc: usb_driver_release_interface(driver, info->data); } + /* Never use the same address on both ends of the link, even + * if the buggy firmware told us to. + */ + if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr)) + eth_hw_addr_random(dev->net); + + /* make MAC addr easily distinguishable from an IP header */ + if (possibly_iphdr(dev->net->dev_addr)) { + dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */ + dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */ + } + dev->net->netdev_ops = &qmi_wwan_netdev_ops; err: return status; } @@ -307,6 +410,7 @@ static const struct driver_info qmi_wwan_info = { .bind = qmi_wwan_bind, .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, + .rx_fixup = qmi_wwan_rx_fixup, }; #define HUAWEI_VENDOR_ID 0x12D1 diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 9abe51710f22..1a15ec14c386 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) { struct usbnet *dev = netdev_priv(netdev); + int ret; + + if (new_mtu > MAX_SINGLE_PACKET_SIZE) + return -EINVAL; - int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); + ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); if (ret < 0) { netdev_warn(dev->net, "Failed to set mac rx frame length\n"); return ret; @@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev) netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); - ret = smsc75xx_set_rx_max_frame_length(dev, 1514); + ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); if (ret < 0) { netdev_warn(dev->net, "Failed to set max rx frame length\n"); return ret; @@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) dev->net->stats.rx_frame_errors++; } else { - /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ - if (unlikely(size > (ETH_FRAME_LEN + 12))) { + /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ + if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index 28fd99203f64..bdee2ed67219 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h @@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = { {0x00008258, 0x00000000}, {0x0000825c, 0x40000000}, {0x00008260, 0x00080922}, - {0x00008264, 0x9bc00010}, + {0x00008264, 0x9d400010}, {0x00008268, 0xffffffff}, {0x0000826c, 0x0000ffff}, {0x00008270, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c index 467b60014b7b..73fe8d6db566 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c @@ -143,14 +143,14 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq) u32 sz, i; struct channel_detector *cd; - cd = kmalloc(sizeof(*cd), GFP_KERNEL); + cd = kmalloc(sizeof(*cd), GFP_ATOMIC); if (cd == NULL) goto fail; INIT_LIST_HEAD(&cd->head); cd->freq = freq; sz = sizeof(cd->detectors) * dpd->num_radar_types; - cd->detectors = kzalloc(sz, GFP_KERNEL); + cd->detectors = kzalloc(sz, GFP_ATOMIC); if (cd->detectors == NULL) goto fail; diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c index 91b8dceeadb1..5e48c5515b8c 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c @@ -218,7 +218,7 @@ static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts) { struct pulse_elem *p = pool_get_pulse_elem(); if (p == NULL) { - p = kmalloc(sizeof(*p), GFP_KERNEL); + p = kmalloc(sizeof(*p), GFP_ATOMIC); if (p == NULL) { DFS_POOL_STAT_INC(pulse_alloc_error); return false; @@ -299,7 +299,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde, ps.deadline_ts = ps.first_ts + ps.dur; new_ps = pool_get_pseq_elem(); if (new_ps == NULL) { - new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL); + new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC); if (new_ps == NULL) { DFS_POOL_STAT_INC(pseq_alloc_error); return false; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 716058b67557..a47f5e05fc04 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) * required version. */ if (priv->fw_version_major != MAJOR_VERSION_REQ || - priv->fw_version_minor != MINOR_VERSION_REQ) { + priv->fw_version_minor < MINOR_VERSION_REQ) { dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", MAJOR_VERSION_REQ, MINOR_VERSION_REQ); return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 39c84ecf6a42..7fdac6c7b3ea 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data) { struct ath_softc *sc = (struct ath_softc *)data; - ieee80211_queue_work(sc->hw, &sc->hw_check_work); + if (!test_bit(SC_OP_INVALID, &sc->sc_flags)) + ieee80211_queue_work(sc->hw, &sc->hw_check_work); } /* diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6e66f9c6782b..988372d218a4 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) if (r) { ath_err(common, "Unable to reset channel, reset status %d\n", r); + + ath9k_hw_enable_interrupts(ah); + ath9k_queue_reset(sc, RESET_TYPE_BB_HANG); + goto out; } diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 38bc5a7997ff..122146943bf2 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_dma_ops *ops; struct b43_dmaring *ring; struct b43_dmadesc_meta *meta; + static const struct b43_txstatus fake; /* filled with 0 */ + const struct b43_txstatus *txstat; int slot, firstused; bool frame_succeed; + int skip; + static u8 err_out1, err_out2; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) @@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; + + skip = 0; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in - * malfunction, memory leaks and/or stall of DMA functionality. */ - b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " - "Expected %d, but got %d\n", - ring->index, firstused, slot); - return; + * malfunction, memory leaks and/or stall of DMA functionality. + */ + if (slot == next_slot(ring, next_slot(ring, firstused))) { + /* If a single header/data pair was missed, skip over + * the first two slots in an attempt to recover. + */ + slot = firstused; + skip = 2; + if (!err_out1) { + /* Report the error once. */ + b43dbg(dev->wl, + "Skip on DMA ring %d slot %d.\n", + ring->index, slot); + err_out1 = 1; + } + } else { + /* More than a single header/data pair were missed. + * Report this error once. + */ + if (!err_out2) + b43dbg(dev->wl, + "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", + ring->index, firstused, slot); + err_out2 = 1; + return; + } } ops = ring->ops; @@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, slot, firstused, ring->index); break; } + if (meta->skb) { struct b43_private_tx_info *priv_info = - b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); + b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); - unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { @@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { - /* This is a scatter-gather fragment of a frame, so - * the skb pointer must not be NULL. */ + /* This is a scatter-gather fragment of a frame, + * so the skb pointer must not be NULL. + */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); @@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, /* * Call back to inform the ieee80211 subsystem about - * the status of the transmission. + * the status of the transmission. When skipping over + * a missed TX status report, use a status structure + * filled with zeros to indicate that the frame was not + * sent (frame_count 0) and not acknowledged */ - frame_succeed = b43_fill_txstatus_report(dev, info, status); + if (unlikely(skip)) + txstat = &fake; + else + txstat = status; + + frame_succeed = b43_fill_txstatus_report(dev, info, + txstat); #ifdef CONFIG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; @@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; - if (meta->is_last_fragment) { + if (meta->is_last_fragment && !skip) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); + if (skip > 0) + --skip; } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index 3c35382ee6c2..b70f220bc4b3 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) u16 clip_off[2] = { 0xFFFF, 0xFFFF }; u8 vcm_final = 0; - s8 offset[4]; + s32 offset[4]; s32 results[8][4] = { }; s32 results_min[4] = { }; s32 poll_results[4] = { }; @@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) } for (i = 0; i < 4; i += 2) { s32 curr; - s32 mind = 40; + s32 mind = 0x100000; s32 minpoll = 249; u8 minvcm = 0; if (2 * core != i) @@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) u8 regs_save_radio[2]; u16 regs_save_phy[2]; - s8 offset[4]; + s32 offset[4]; u8 core; u8 rail; @@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) } for (i = 0; i < 4; i++) { - s32 mind = 40; + s32 mind = 0x100000; u8 minvcm = 0; s32 minpoll = 249; s32 curr; @@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: - /* FIXME */ + ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco, + avoid); break; #endif } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 4469321c0eb3..35fc68be158d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) goto err; } - /* External image takes precedence if specified */ if (brcmf_sdbrcm_download_code_file(bus)) { brcmf_err("dongle image file download failed\n"); goto err; } - /* External nvram takes precedence if specified */ - if (brcmf_sdbrcm_download_nvram(bus)) + if (brcmf_sdbrcm_download_nvram(bus)) { brcmf_err("dongle nvram file download failed\n"); + goto err; + } /* Take arm out of reset */ if (brcmf_sdbrcm_download_state(bus, false)) { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 2af9c0f0798d..78da3eff75e8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -1891,8 +1891,10 @@ static s32 brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, const u8 *mac_addr, struct key_params *params) { + struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_wsec_key key; s32 err = 0; + u8 keybuf[8]; memset(&key, 0, sizeof(key)); key.index = (u32) key_idx; @@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, brcmf_dbg(CONN, "Setting the key index %d\n", key.index); memcpy(key.data, params->key, key.len); - if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { - u8 keybuf[8]; + if ((ifp->vif->mode != WL_MODE_AP) && + (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { + brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); memcpy(keybuf, &key.data[24], sizeof(keybuf)); memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); memcpy(&key.data[16], keybuf, sizeof(keybuf)); @@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, break; case WLAN_CIPHER_SUITE_TKIP: if (ifp->vif->mode != WL_MODE_AP) { - brcmf_dbg(CONN, "Swapping key\n"); + brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); memcpy(keybuf, &key.data[24], sizeof(keybuf)); memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); memcpy(&key.data[16], keybuf, sizeof(keybuf)); @@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, err = -EAGAIN; goto done; } - switch (wsec & ~SES_OW_ENABLED) { - case WEP_ENABLED: + if (wsec & WEP_ENABLED) { sec = &profile->sec; if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { params.cipher = WLAN_CIPHER_SUITE_WEP40; @@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, params.cipher = WLAN_CIPHER_SUITE_WEP104; brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); } - break; - case TKIP_ENABLED: + } else if (wsec & TKIP_ENABLED) { params.cipher = WLAN_CIPHER_SUITE_TKIP; brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); - break; - case AES_ENABLED: + } else if (wsec & AES_ENABLED) { params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); - break; - default: + } else { brcmf_err("Invalid algo (0x%x)\n", wsec); err = -EINVAL; goto done; @@ -3824,8 +3823,9 @@ exit: static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) { struct brcmf_if *ifp = netdev_priv(ndev); - s32 err = -EPERM; + s32 err; struct brcmf_fil_bss_enable_le bss_enable; + struct brcmf_join_params join_params; brcmf_dbg(TRACE, "Enter\n"); @@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) /* Due to most likely deauths outstanding we sleep */ /* first to make sure they get processed by fw. */ msleep(400); - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); - if (err < 0) { - brcmf_err("setting AP mode failed %d\n", err); - goto exit; - } + + memset(&join_params, 0, sizeof(join_params)); + err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, + &join_params, sizeof(join_params)); + if (err < 0) + brcmf_err("SET SSID error (%d)\n", err); err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); - if (err < 0) { + if (err < 0) brcmf_err("BRCMF_C_UP error %d\n", err); - goto exit; - } + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); + if (err < 0) + brcmf_err("setting AP mode failed %d\n", err); + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0); + if (err < 0) + brcmf_err("setting INFRA mode failed %d\n", err); } else { bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); bss_enable.enable = cpu_to_le32(0); @@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); -exit: return err; } @@ -4124,10 +4128,6 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = { }, { .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_DEVICE) - }, - { - .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) }, @@ -4183,8 +4183,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_DEVICE); + BIT(NL80211_IFTYPE_P2P_GO); wiphy->iface_combinations = brcmf_iface_combos; wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index c6451c61407a..e2340b231aa1 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -274,6 +274,130 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br) } } +/** + * This function frees the WL per-device resources. + * + * This function frees resources owned by the WL device pointed to + * by the wl parameter. + * + * precondition: can both be called locked and unlocked + * + */ +static void brcms_free(struct brcms_info *wl) +{ + struct brcms_timer *t, *next; + + /* free ucode data */ + if (wl->fw.fw_cnt) + brcms_ucode_data_free(&wl->ucode); + if (wl->irq) + free_irq(wl->irq, wl); + + /* kill dpc */ + tasklet_kill(&wl->tasklet); + + if (wl->pub) { + brcms_debugfs_detach(wl->pub); + brcms_c_module_unregister(wl->pub, "linux", wl); + } + + /* free common resources */ + if (wl->wlc) { + brcms_c_detach(wl->wlc); + wl->wlc = NULL; + wl->pub = NULL; + } + + /* virtual interface deletion is deferred so we cannot spinwait */ + + /* wait for all pending callbacks to complete */ + while (atomic_read(&wl->callbacks) > 0) + schedule(); + + /* free timers */ + for (t = wl->timers; t; t = next) { + next = t->next; +#ifdef DEBUG + kfree(t->name); +#endif + kfree(t); + } +} + +/* +* called from both kernel as from this kernel module (error flow on attach) +* precondition: perimeter lock is not acquired. +*/ +static void brcms_remove(struct bcma_device *pdev) +{ + struct ieee80211_hw *hw = bcma_get_drvdata(pdev); + struct brcms_info *wl = hw->priv; + + if (wl->wlc) { + wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); + wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); + ieee80211_unregister_hw(hw); + } + + brcms_free(wl); + + bcma_set_drvdata(pdev, NULL); + ieee80211_free_hw(hw); +} + +/* + * Precondition: Since this function is called in brcms_pci_probe() context, + * no locking is required. + */ +static void brcms_release_fw(struct brcms_info *wl) +{ + int i; + for (i = 0; i < MAX_FW_IMAGES; i++) { + release_firmware(wl->fw.fw_bin[i]); + release_firmware(wl->fw.fw_hdr[i]); + } +} + +/* + * Precondition: Since this function is called in brcms_pci_probe() context, + * no locking is required. + */ +static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) +{ + int status; + struct device *device = &pdev->dev; + char fw_name[100]; + int i; + + memset(&wl->fw, 0, sizeof(struct brcms_firmware)); + for (i = 0; i < MAX_FW_IMAGES; i++) { + if (brcms_firmwares[i] == NULL) + break; + sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], + UCODE_LOADER_API_VER); + status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); + if (status) { + wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", + KBUILD_MODNAME, fw_name); + return status; + } + sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], + UCODE_LOADER_API_VER); + status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); + if (status) { + wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", + KBUILD_MODNAME, fw_name); + return status; + } + wl->fw.hdr_num_entries[i] = + wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); + } + wl->fw.fw_cnt = i; + status = brcms_ucode_data_init(wl, &wl->ucode); + brcms_release_fw(wl); + return status; +} + static void brcms_ops_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) @@ -306,6 +430,14 @@ static int brcms_ops_start(struct ieee80211_hw *hw) if (!blocked) wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); + if (!wl->ucode.bcm43xx_bomminor) { + err = brcms_request_fw(wl, wl->wlc->hw->d11core); + if (err) { + brcms_remove(wl->wlc->hw->d11core); + return -ENOENT; + } + } + spin_lock_bh(&wl->lock); /* avoid acknowledging frames before a non-monitor device is added */ wl->mute_tx = true; @@ -793,128 +925,6 @@ void brcms_dpc(unsigned long data) wake_up(&wl->tx_flush_wq); } -/* - * Precondition: Since this function is called in brcms_pci_probe() context, - * no locking is required. - */ -static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) -{ - int status; - struct device *device = &pdev->dev; - char fw_name[100]; - int i; - - memset(&wl->fw, 0, sizeof(struct brcms_firmware)); - for (i = 0; i < MAX_FW_IMAGES; i++) { - if (brcms_firmwares[i] == NULL) - break; - sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], - UCODE_LOADER_API_VER); - status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); - if (status) { - wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", - KBUILD_MODNAME, fw_name); - return status; - } - sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], - UCODE_LOADER_API_VER); - status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); - if (status) { - wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", - KBUILD_MODNAME, fw_name); - return status; - } - wl->fw.hdr_num_entries[i] = - wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); - } - wl->fw.fw_cnt = i; - return brcms_ucode_data_init(wl, &wl->ucode); -} - -/* - * Precondition: Since this function is called in brcms_pci_probe() context, - * no locking is required. - */ -static void brcms_release_fw(struct brcms_info *wl) -{ - int i; - for (i = 0; i < MAX_FW_IMAGES; i++) { - release_firmware(wl->fw.fw_bin[i]); - release_firmware(wl->fw.fw_hdr[i]); - } -} - -/** - * This function frees the WL per-device resources. - * - * This function frees resources owned by the WL device pointed to - * by the wl parameter. - * - * precondition: can both be called locked and unlocked - * - */ -static void brcms_free(struct brcms_info *wl) -{ - struct brcms_timer *t, *next; - - /* free ucode data */ - if (wl->fw.fw_cnt) - brcms_ucode_data_free(&wl->ucode); - if (wl->irq) - free_irq(wl->irq, wl); - - /* kill dpc */ - tasklet_kill(&wl->tasklet); - - if (wl->pub) { - brcms_debugfs_detach(wl->pub); - brcms_c_module_unregister(wl->pub, "linux", wl); - } - - /* free common resources */ - if (wl->wlc) { - brcms_c_detach(wl->wlc); - wl->wlc = NULL; - wl->pub = NULL; - } - - /* virtual interface deletion is deferred so we cannot spinwait */ - - /* wait for all pending callbacks to complete */ - while (atomic_read(&wl->callbacks) > 0) - schedule(); - - /* free timers */ - for (t = wl->timers; t; t = next) { - next = t->next; -#ifdef DEBUG - kfree(t->name); -#endif - kfree(t); - } -} - -/* -* called from both kernel as from this kernel module (error flow on attach) -* precondition: perimeter lock is not acquired. -*/ -static void brcms_remove(struct bcma_device *pdev) -{ - struct ieee80211_hw *hw = bcma_get_drvdata(pdev); - struct brcms_info *wl = hw->priv; - - if (wl->wlc) { - wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); - wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); - ieee80211_unregister_hw(hw); - } - - brcms_free(wl); - - bcma_set_drvdata(pdev, NULL); - ieee80211_free_hw(hw); -} - static irqreturn_t brcms_isr(int irq, void *dev_id) { struct brcms_info *wl; @@ -1047,18 +1057,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) spin_lock_init(&wl->lock); spin_lock_init(&wl->isr_lock); - /* prepare ucode */ - if (brcms_request_fw(wl, pdev) < 0) { - wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in " - "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm"); - brcms_release_fw(wl); - brcms_remove(pdev); - return NULL; - } - /* common load-time initialization */ wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); - brcms_release_fw(wl); if (!wl->wlc) { wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", KBUILD_MODNAME, err); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 21a824232478..18d37645e2cd 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, gain0_15 = ((biq1 & 0xf) << 12) | ((tia & 0xf) << 8) | ((lna2 & 0x3) << 6) | - ((lna2 & 0x3) << 4) | - ((lna1 & 0x3) << 2) | - ((lna1 & 0x3) << 0); + ((lna2 & + 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); @@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, } mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); - mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11); - mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3); } @@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples) return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; } -static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain, - u16 tia_gain, u16 lna2_gain) -{ - u32 i_thresh_l, q_thresh_l; - u32 i_thresh_h, q_thresh_h; - struct lcnphy_iq_est iq_est_h, iq_est_l; - - wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain, - lna2_gain, 0); - - wlc_lcnphy_rx_gain_override_enable(pi, true); - wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0); - udelay(500); - write_radio_reg(pi, RADIO_2064_REG112, 0); - if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l)) - return false; - - wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0); - udelay(500); - write_radio_reg(pi, RADIO_2064_REG112, 0); - if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h)) - return false; - - i_thresh_l = (iq_est_l.i_pwr << 1); - i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr; - - q_thresh_l = (iq_est_l.q_pwr << 1); - q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr; - if ((iq_est_h.i_pwr > i_thresh_l) && - (iq_est_h.i_pwr < i_thresh_h) && - (iq_est_h.q_pwr > q_thresh_l) && - (iq_est_h.q_pwr < q_thresh_h)) - return true; - - return false; -} - static bool wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, const struct lcnphy_rx_iqcomp *iqcomp, @@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, rfoverride3_old, rfoverride3val_old, rfoverride4_old, rfoverride4val_old, afectrlovr_old, afectrlovrval_old; - int tia_gain, lna2_gain, biq1_gain; - bool set_gain; + int tia_gain; + u32 received_power, rx_pwr_threshold; u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; u16 values_to_save[11]; s16 *ptr; @@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, goto cal_done; } - WARN_ON(module != 1); - tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); - wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); - - for (i = 0; i < 11; i++) - values_to_save[i] = - read_radio_reg(pi, rxiq_cal_rf_reg[i]); - Core1TxControl_old = read_phy_reg(pi, 0x631); - - or_phy_reg(pi, 0x631, 0x0015); - - RFOverride0_old = read_phy_reg(pi, 0x44c); - RFOverrideVal0_old = read_phy_reg(pi, 0x44d); - rfoverride2_old = read_phy_reg(pi, 0x4b0); - rfoverride2val_old = read_phy_reg(pi, 0x4b1); - rfoverride3_old = read_phy_reg(pi, 0x4f9); - rfoverride3val_old = read_phy_reg(pi, 0x4fa); - rfoverride4_old = read_phy_reg(pi, 0x938); - rfoverride4val_old = read_phy_reg(pi, 0x939); - afectrlovr_old = read_phy_reg(pi, 0x43b); - afectrlovrval_old = read_phy_reg(pi, 0x43c); - old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); - old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); - - tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); - if (tx_gain_override_old) { - wlc_lcnphy_get_tx_gain(pi, &old_gains); - tx_gain_index_old = pi_lcn->lcnphy_current_index; - } - - wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); + if (module == 1) { - mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); + tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); + wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); - mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); + for (i = 0; i < 11; i++) + values_to_save[i] = + read_radio_reg(pi, rxiq_cal_rf_reg[i]); + Core1TxControl_old = read_phy_reg(pi, 0x631); + + or_phy_reg(pi, 0x631, 0x0015); + + RFOverride0_old = read_phy_reg(pi, 0x44c); + RFOverrideVal0_old = read_phy_reg(pi, 0x44d); + rfoverride2_old = read_phy_reg(pi, 0x4b0); + rfoverride2val_old = read_phy_reg(pi, 0x4b1); + rfoverride3_old = read_phy_reg(pi, 0x4f9); + rfoverride3val_old = read_phy_reg(pi, 0x4fa); + rfoverride4_old = read_phy_reg(pi, 0x938); + rfoverride4val_old = read_phy_reg(pi, 0x939); + afectrlovr_old = read_phy_reg(pi, 0x43b); + afectrlovrval_old = read_phy_reg(pi, 0x43c); + old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); + old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); + + tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); + if (tx_gain_override_old) { + wlc_lcnphy_get_tx_gain(pi, &old_gains); + tx_gain_index_old = pi_lcn->lcnphy_current_index; + } - write_radio_reg(pi, RADIO_2064_REG116, 0x06); - write_radio_reg(pi, RADIO_2064_REG12C, 0x07); - write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); - write_radio_reg(pi, RADIO_2064_REG098, 0x03); - write_radio_reg(pi, RADIO_2064_REG00B, 0x7); - mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); - write_radio_reg(pi, RADIO_2064_REG01D, 0x01); - write_radio_reg(pi, RADIO_2064_REG114, 0x01); - write_radio_reg(pi, RADIO_2064_REG02E, 0x10); - write_radio_reg(pi, RADIO_2064_REG12A, 0x08); - - mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); - mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); - mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); - mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); - mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); - mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); - mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); + wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); - mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); + mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); - write_phy_reg(pi, 0x6da, 0xffff); - or_phy_reg(pi, 0x6db, 0x3); + mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); - wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); - set_gain = false; - - lna2_gain = 3; - while ((lna2_gain >= 0) && !set_gain) { - tia_gain = 4; - - while ((tia_gain >= 0) && !set_gain) { - biq1_gain = 6; - - while ((biq1_gain >= 0) && !set_gain) { - set_gain = wlc_lcnphy_rx_iq_cal_gain(pi, - (u16) - biq1_gain, - (u16) - tia_gain, - (u16) - lna2_gain); - biq1_gain -= 1; - } + write_radio_reg(pi, RADIO_2064_REG116, 0x06); + write_radio_reg(pi, RADIO_2064_REG12C, 0x07); + write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); + write_radio_reg(pi, RADIO_2064_REG098, 0x03); + write_radio_reg(pi, RADIO_2064_REG00B, 0x7); + mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); + write_radio_reg(pi, RADIO_2064_REG01D, 0x01); + write_radio_reg(pi, RADIO_2064_REG114, 0x01); + write_radio_reg(pi, RADIO_2064_REG02E, 0x10); + write_radio_reg(pi, RADIO_2064_REG12A, 0x08); + + mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); + mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); + mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); + mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); + mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); + mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); + mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); + + mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); + + wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); + write_phy_reg(pi, 0x6da, 0xffff); + or_phy_reg(pi, 0x6db, 0x3); + wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); + wlc_lcnphy_rx_gain_override_enable(pi, true); + + tia_gain = 8; + rx_pwr_threshold = 950; + while (tia_gain > 0) { tia_gain -= 1; + wlc_lcnphy_set_rx_gain_by_distribution(pi, + 0, 0, 2, 2, + (u16) + tia_gain, 1, 0); + udelay(500); + + received_power = + wlc_lcnphy_measure_digital_power(pi, 2000); + if (received_power < rx_pwr_threshold) + break; } - lna2_gain -= 1; - } + result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); - if (set_gain) - result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024); - else - result = false; + wlc_lcnphy_stop_tx_tone(pi); - wlc_lcnphy_stop_tx_tone(pi); + write_phy_reg(pi, 0x631, Core1TxControl_old); - write_phy_reg(pi, 0x631, Core1TxControl_old); - - write_phy_reg(pi, 0x44c, RFOverrideVal0_old); - write_phy_reg(pi, 0x44d, RFOverrideVal0_old); - write_phy_reg(pi, 0x4b0, rfoverride2_old); - write_phy_reg(pi, 0x4b1, rfoverride2val_old); - write_phy_reg(pi, 0x4f9, rfoverride3_old); - write_phy_reg(pi, 0x4fa, rfoverride3val_old); - write_phy_reg(pi, 0x938, rfoverride4_old); - write_phy_reg(pi, 0x939, rfoverride4val_old); - write_phy_reg(pi, 0x43b, afectrlovr_old); - write_phy_reg(pi, 0x43c, afectrlovrval_old); - write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); - write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); + write_phy_reg(pi, 0x44c, RFOverrideVal0_old); + write_phy_reg(pi, 0x44d, RFOverrideVal0_old); + write_phy_reg(pi, 0x4b0, rfoverride2_old); + write_phy_reg(pi, 0x4b1, rfoverride2val_old); + write_phy_reg(pi, 0x4f9, rfoverride3_old); + write_phy_reg(pi, 0x4fa, rfoverride3val_old); + write_phy_reg(pi, 0x938, rfoverride4_old); + write_phy_reg(pi, 0x939, rfoverride4val_old); + write_phy_reg(pi, 0x43b, afectrlovr_old); + write_phy_reg(pi, 0x43c, afectrlovrval_old); + write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); + write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); - wlc_lcnphy_clear_trsw_override(pi); + wlc_lcnphy_clear_trsw_override(pi); - mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); + mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); - for (i = 0; i < 11; i++) - write_radio_reg(pi, rxiq_cal_rf_reg[i], - values_to_save[i]); + for (i = 0; i < 11; i++) + write_radio_reg(pi, rxiq_cal_rf_reg[i], + values_to_save[i]); - if (tx_gain_override_old) - wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); - else - wlc_lcnphy_disable_tx_gain_override(pi); + if (tx_gain_override_old) + wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); + else + wlc_lcnphy_disable_tx_gain_override(pi); - wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); - wlc_lcnphy_rx_gain_override_enable(pi, false); + wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); + wlc_lcnphy_rx_gain_override_enable(pi, false); + } cal_done: kfree(ptr); @@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel) write_radio_reg(pi, RADIO_2064_REG038, 3); write_radio_reg(pi, RADIO_2064_REG091, 7); } - - if (!(pi->sh->boardflags & BFL_FEM)) { - u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc, - 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0}; - - write_radio_reg(pi, RADIO_2064_REG02A, 0xf); - write_radio_reg(pi, RADIO_2064_REG091, 0x3); - write_radio_reg(pi, RADIO_2064_REG038, 0x3); - - write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]); - } } static int @@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos) } else { mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); - mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0); - mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2); - mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0); - mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4); - mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); - mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77); - mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1); - mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7); - mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1); - mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4); } } else { mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); @@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi) (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); - mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0)); } static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) { struct phytbl_info tab; u32 rfseq, ind; - u8 tssi_sel; tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; @@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); - if (pi->sh->boardflags & BFL_FEM) { - tssi_sel = 0x1; - wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); - } else { - tssi_sel = 0xe; - wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA); - } + wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); @@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { - mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel); + mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); } else { - mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1); mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); } @@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); - mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0); - mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); - mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); - wlc_lcnphy_pwrctrl_rssiparams(pi); } @@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) read_radio_reg(pi, RADIO_2064_REG007) & 1; u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; - u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi); - idleTssi = read_phy_reg(pi, 0x4ab); suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)); @@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); wlc_lcnphy_tssi_setup(pi); - - mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0)); - mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6)); - - wlc_lcnphy_set_bbmult(pi, 0x0); - wlc_phy_do_dummy_tx(pi, true, OFF); idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) >> 0); @@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); - wlc_lcnphy_set_bbmult(pi, SAVE_bbmult); wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); wlc_lcnphy_set_tx_gain(pi, &old_gains); wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); @@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) wlc_lcnphy_write_table(pi, &tab); tab.tbl_offset++; } - mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0); - mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0); - mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8); - mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4); - mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2); mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); @@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) target_gains.pad_gain = 21; target_gains.dac_gain = 0; wlc_lcnphy_set_tx_gain(pi, &target_gains); + wlc_lcnphy_set_tx_pwr_by_index(pi, 16); if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { @@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) lcnphy_recal ? LCNPHY_CAL_RECAL : LCNPHY_CAL_FULL), false); } else { - wlc_lcnphy_set_tx_pwr_by_index(pi, 16); wlc_lcnphy_tx_iqlo_soft_cal_full(pi); } @@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi, if (CHSPEC_IS5G(pi->radio_chanspec)) pa_gain = 0x70; else - pa_gain = 0x60; + pa_gain = 0x70; if (pi->sh->boardflags & BFL_FEM) pa_gain = 0x10; - tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_len = 1; tab.tbl_ptr = &val; for (j = 0; j < 128; j++) { - if (pi->sh->boardflags & BFL_FEM) - gm_gain = gain_table[j].gm; - else - gm_gain = 15; - + gm_gain = gain_table[j].gm; val = (((u32) pa_gain << 24) | (gain_table[j].pad << 16) | (gain_table[j].pga << 8) | gm_gain); @@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) write_phy_reg(pi, 0x4ea, 0x4688); - if (pi->sh->boardflags & BFL_FEM) - mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); - else - mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0); + mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); @@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) wlc_lcnphy_rcal(pi); wlc_lcnphy_rc_cal(pi); - - if (!(pi->sh->boardflags & BFL_FEM)) { - write_radio_reg(pi, RADIO_2064_REG032, 0x6f); - write_radio_reg(pi, RADIO_2064_REG033, 0x19); - write_radio_reg(pi, RADIO_2064_REG039, 0xe); - } - } static void wlc_lcnphy_radio_init(struct brcms_phy *pi) @@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) wlc_lcnphy_write_table(pi, &tab); } - if (!(pi->sh->boardflags & BFL_FEM)) { - tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; - tab.tbl_width = 16; - tab.tbl_ptr = &val; - tab.tbl_len = 1; + tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; + tab.tbl_width = 16; + tab.tbl_ptr = &val; + tab.tbl_len = 1; - val = 150; - tab.tbl_offset = 0; - wlc_lcnphy_write_table(pi, &tab); + val = 114; + tab.tbl_offset = 0; + wlc_lcnphy_write_table(pi, &tab); - val = 220; - tab.tbl_offset = 1; - wlc_lcnphy_write_table(pi, &tab); - } + val = 130; + tab.tbl_offset = 1; + wlc_lcnphy_write_table(pi, &tab); + + val = 6; + tab.tbl_offset = 8; + wlc_lcnphy_write_table(pi, &tab); if (CHSPEC_IS2G(pi->radio_chanspec)) { if (pi->sh->boardflags & BFL_FEM) @@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec) wlc_lcnphy_load_tx_iir_filter(pi, true, 3); mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); - wlc_lcnphy_tssi_setup(pi); } void wlc_phy_detach_lcnphy(struct brcms_phy *pi) @@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) return false; - if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { + if ((pi->sh->boardflags & BFL_FEM) && + (LCNREV_IS(pi->pubpi.phy_rev, 1))) { if (pi_lcn->lcnphy_tempsense_option == 3) { pi->hwpwrctrl = true; pi->hwpwrctrl_capable = true; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c index b7e95acc2084..622c01ca72c5 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c @@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = { }; static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { - 0x0009, 0x000a, - 0x0005, - 0x0006, 0x0009, - 0x000a, - 0x0005, 0x0006, - 0x0009, - 0x000a, 0x0005, - 0x0006, - 0x0009, 0x000a, - 0x0005, - 0x0006, 0x0009, - 0x000a, - 0x0005, 0x0006, - 0x0009, - 0x000a, 0x0005, - 0x0006, - 0x0009, 0x000a, - 0x0005, - 0x0006, 0x0009, - 0x000a, - 0x0005, 0x0006, - 0x0009, - 0x000a, 0x0005, - 0x0006, - 0x0009, 0x000a, - 0x0005, - 0x0006, 0x0009, - 0x000a, - 0x0005, 0x0006, - 0x0009, - 0x000a, 0x0005, - 0x0006, + 0x000a, 0x0009, + 0x0006, + 0x0005, 0x000a, + 0x0009, + 0x0006, 0x0005, + 0x000a, + 0x0009, 0x0006, + 0x0005, + 0x000a, 0x0009, + 0x0006, + 0x0005, 0x000a, + 0x0009, + 0x0006, 0x0005, + 0x000a, + 0x0009, 0x0006, + 0x0005, + 0x000a, 0x0009, + 0x0006, + 0x0005, 0x000a, + 0x0009, + 0x0006, 0x0005, + 0x000a, + 0x0009, 0x0006, + 0x0005, + 0x000a, 0x0009, + 0x0006, + 0x0005, 0x000a, + 0x0009, + 0x0006, 0x0005, + 0x000a, + 0x0009, 0x0006, + 0x0005, }; static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index e8324b5e5bfe..6c7493c2d698 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c @@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, int rate_idx; int i; u32 rate; - u8 use_green = il4965_rs_use_green(il, sta); + u8 use_green; u8 active_tbl = 0; u8 valid_tx_ant; struct il_station_priv *sta_priv; @@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, if (!sta || !lq_sta) return; + use_green = il4965_rs_use_green(il, sta); sta_priv = (void *)sta->drv_priv; i = lq_sta->last_txrate_idx; diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 86ea5f4c3939..44ca0e57f9f7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -1262,6 +1262,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) } /* + * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag + * in iwl_down but cancel the workers only later. + */ + if (!priv->ucode_loaded) { + IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id); + return -EIO; + } + + /* * Synchronous commands from this op-mode must hold * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 23be948cf162..a82b6b39d4ff 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1419,6 +1419,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, mutex_lock(&priv->mutex); + if (changes & BSS_CHANGED_IDLE && bss_conf->idle) { + /* + * If we go idle, then clearly no "passive-no-rx" + * workaround is needed any more, this is a reset. + */ + iwlagn_lift_passive_no_rx(priv); + } + if (unlikely(!iwl_is_ready(priv))) { IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); mutex_unlock(&priv->mutex); @@ -1450,16 +1458,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, priv->timestamp = bss_conf->sync_tsf; ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; } else { - /* - * If we disassociate while there are pending - * frames, just wake up the queues and let the - * frames "escape" ... This shouldn't really - * be happening to start with, but we should - * not get stuck in this case either since it - * can happen if userspace gets confused. - */ - iwlagn_lift_passive_no_rx(priv); - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; if (ctx->ctxid == IWL_RXON_CTX_BSS) diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 6aec2df3bb27..d1a670d7b10c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -1192,7 +1192,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, memset(&info->status, 0, sizeof(info->status)); if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && - iwl_is_associated_ctx(ctx) && ctx->vif && + ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) { /* block and stop all queues */ priv->passive_no_rx = true; diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 736fe9bb140e..1a4ac9236a44 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, return -EIO; } + priv->ucode_loaded = true; + if (ucode_type != IWL_UCODE_WOWLAN) { /* delay a bit to give rfkill time to run */ msleep(5); @@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, return ret; } - priv->ucode_loaded = true; - return 0; } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 17bedc50e753..12c4f31ca8fb 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans_pcie->status); + else + clear_bit(STATUS_RFKILL, &trans_pcie->status); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); if (hw_rfkill && !run_in_rfkill) return -ERFKILL; @@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; int err; @@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) iwl_enable_rfkill_int(trans); hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans_pcie->status); + else + clear_bit(STATUS_RFKILL, &trans_pcie->status); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); return 0; @@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, * op_mode. */ hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans_pcie->status); + else + clear_bit(STATUS_RFKILL, &trans_pcie->status); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); } } diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 8595c16f74de..cb5c6792e3a8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy = 0; - if (!cmd->len) + if (!cmd->len[i]) continue; /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index a44023a7bd57..8aaf56ade4d9 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1892,7 +1892,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, } } - for (i = 0; i < request->n_channels; i++) { + for (i = 0; i < min_t(u32, request->n_channels, + MWIFIEX_USER_SCAN_CHAN_MAX); i++) { chan = request->channels[i]; priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; priv->user_scan_cfg->chan_list[i].radio_type = chan->band; diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 5c395e2e6a2b..feb204613397 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) } memcpy(adapter->upld_buf, skb->data, min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); + skb_push(skb, INTF_HEADER_LEN); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, PCI_DMA_FROMDEVICE)) return -1; diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index d215b4d3c51b..e7f6deaf715e 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1393,8 +1393,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, queue_work(adapter->workqueue, &adapter->main_work); /* Perform internal scan synchronously */ - if (!priv->scan_request) + if (!priv->scan_request) { + dev_dbg(adapter->dev, "wait internal scan\n"); mwifiex_wait_queue_complete(adapter, cmd_node); + } } else { spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); @@ -1793,7 +1795,12 @@ check_next_scan: /* Need to indicate IOCTL complete */ if (adapter->curr_cmd->wait_q_enabled) { adapter->cmd_wait_q.status = 0; - mwifiex_complete_cmd(adapter, adapter->curr_cmd); + if (!priv->scan_request) { + dev_dbg(adapter->dev, + "complete internal scan\n"); + mwifiex_complete_cmd(adapter, + adapter->curr_cmd); + } } if (priv->report_scan_result) priv->report_scan_result = false; diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 2bf4efa33186..76cd47eb901e 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -20,6 +20,7 @@ if RT2X00 config RT2400PCI tristate "Ralink rt2400 (PCI/PCMCIA) support" depends on PCI + select RT2X00_LIB_MMIO select RT2X00_LIB_PCI select EEPROM_93CX6 ---help--- @@ -31,6 +32,7 @@ config RT2400PCI config RT2500PCI tristate "Ralink rt2500 (PCI/PCMCIA) support" depends on PCI + select RT2X00_LIB_MMIO select RT2X00_LIB_PCI select EEPROM_93CX6 ---help--- @@ -43,6 +45,7 @@ config RT61PCI tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" depends on PCI select RT2X00_LIB_PCI + select RT2X00_LIB_MMIO select RT2X00_LIB_FIRMWARE select RT2X00_LIB_CRYPTO select CRC_ITU_T @@ -57,6 +60,7 @@ config RT2800PCI tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" depends on PCI || SOC_RT288X || SOC_RT305X select RT2800_LIB + select RT2X00_LIB_MMIO select RT2X00_LIB_PCI if PCI select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X select RT2X00_LIB_FIRMWARE @@ -185,6 +189,9 @@ endif config RT2800_LIB tristate +config RT2X00_LIB_MMIO + tristate + config RT2X00_LIB_PCI tristate select RT2X00_LIB diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile index 349d5b8284a4..f069d8bc5b67 100644 --- a/drivers/net/wireless/rt2x00/Makefile +++ b/drivers/net/wireless/rt2x00/Makefile @@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o +obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 221beaaa83f1..dcfb54e0c516 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -34,6 +34,7 @@ #include <linux/slab.h> #include "rt2x00.h" +#include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt2400pci.h" diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 39edc59e8d03..e1d2dc9ed28a 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -34,6 +34,7 @@ #include <linux/slab.h> #include "rt2x00.h" +#include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt2500pci.h" diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index ded73da4de0b..ba5a05625aaa 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -41,6 +41,7 @@ #include <linux/eeprom_93cx6.h> #include "rt2x00.h" +#include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt2x00soc.h" #include "rt2800lib.h" diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c new file mode 100644 index 000000000000..d84a680ba0c9 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c @@ -0,0 +1,216 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00mmio + Abstract: rt2x00 generic mmio device routines. + */ + +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "rt2x00.h" +#include "rt2x00mmio.h" + +/* + * Register access. + */ +int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const struct rt2x00_field32 field, + u32 *reg) +{ + unsigned int i; + + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return 0; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, offset, reg); + if (!rt2x00_get_field32(*reg, field)) + return 1; + udelay(REGISTER_BUSY_DELAY); + } + + printk_once(KERN_ERR "%s() Indirect register access failed: " + "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg); + *reg = ~0; + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); + +bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue = rt2x00dev->rx; + struct queue_entry *entry; + struct queue_entry_priv_pci *entry_priv; + struct skb_frame_desc *skbdesc; + int max_rx = 16; + + while (--max_rx) { + entry = rt2x00queue_get_entry(queue, Q_INDEX); + entry_priv = entry->priv_data; + + if (rt2x00dev->ops->lib->get_entry_state(entry)) + break; + + /* + * Fill in desc fields of the skb descriptor + */ + skbdesc = get_skb_frame_desc(entry->skb); + skbdesc->desc = entry_priv->desc; + skbdesc->desc_len = entry->queue->desc_size; + + /* + * DMA is already done, notify rt2x00lib that + * it finished successfully. + */ + rt2x00lib_dmastart(entry); + rt2x00lib_dmadone(entry); + + /* + * Send the frame to rt2x00lib for further processing. + */ + rt2x00lib_rxdone(entry, GFP_ATOMIC); + } + + return !max_rx; +} +EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); + +void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) +{ + unsigned int i; + + for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) + msleep(10); +} +EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); + +/* + * Device initialization handlers. + */ +static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue) +{ + struct queue_entry_priv_pci *entry_priv; + void *addr; + dma_addr_t dma; + unsigned int i; + + /* + * Allocate DMA memory for descriptor and buffer. + */ + addr = dma_alloc_coherent(rt2x00dev->dev, + queue->limit * queue->desc_size, + &dma, GFP_KERNEL); + if (!addr) + return -ENOMEM; + + memset(addr, 0, queue->limit * queue->desc_size); + + /* + * Initialize all queue entries to contain valid addresses. + */ + for (i = 0; i < queue->limit; i++) { + entry_priv = queue->entries[i].priv_data; + entry_priv->desc = addr + i * queue->desc_size; + entry_priv->desc_dma = dma + i * queue->desc_size; + } + + return 0; +} + +static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue) +{ + struct queue_entry_priv_pci *entry_priv = + queue->entries[0].priv_data; + + if (entry_priv->desc) + dma_free_coherent(rt2x00dev->dev, + queue->limit * queue->desc_size, + entry_priv->desc, entry_priv->desc_dma); + entry_priv->desc = NULL; +} + +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + int status; + + /* + * Allocate DMA + */ + queue_for_each(rt2x00dev, queue) { + status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); + if (status) + goto exit; + } + + /* + * Register interrupt handler. + */ + status = request_irq(rt2x00dev->irq, + rt2x00dev->ops->lib->irq_handler, + IRQF_SHARED, rt2x00dev->name, rt2x00dev); + if (status) { + ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", + rt2x00dev->irq, status); + goto exit; + } + + return 0; + +exit: + queue_for_each(rt2x00dev, queue) + rt2x00pci_free_queue_dma(rt2x00dev, queue); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00pci_initialize); + +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + + /* + * Free irq line. + */ + free_irq(rt2x00dev->irq, rt2x00dev); + + /* + * Free DMA + */ + queue_for_each(rt2x00dev, queue) + rt2x00pci_free_queue_dma(rt2x00dev, queue); +} +EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); + +/* + * rt2x00mmio module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 mmio library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h new file mode 100644 index 000000000000..4ecaf60175bf --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h @@ -0,0 +1,119 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00mmio + Abstract: Data structures for the rt2x00mmio module. + */ + +#ifndef RT2X00MMIO_H +#define RT2X00MMIO_H + +#include <linux/io.h> + +/* + * Register access. + */ +static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 *value) +{ + *value = readl(rt2x00dev->csr.base + offset); +} + +static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + void *value, const u32 length) +{ + memcpy_fromio(value, rt2x00dev->csr.base + offset, length); +} + +static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 value) +{ + writel(value, rt2x00dev->csr.base + offset); +} + +static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const void *value, + const u32 length) +{ + __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); +} + +/** + * rt2x00pci_regbusy_read - Read from register with busy check + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @field: Field to check if register is busy + * @reg: Pointer to where register contents should be stored + * + * This function will read the given register, and checks if the + * register is busy. If it is, it will sleep for a couple of + * microseconds before reading the register again. If the register + * is not read after a certain timeout, this function will return + * FALSE. + */ +int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const struct rt2x00_field32 field, + u32 *reg); + +/** + * struct queue_entry_priv_pci: Per entry PCI specific information + * + * @desc: Pointer to device descriptor + * @desc_dma: DMA pointer to &desc. + * @data: Pointer to device's entry memory. + * @data_dma: DMA pointer to &data. + */ +struct queue_entry_priv_pci { + __le32 *desc; + dma_addr_t desc_dma; +}; + +/** + * rt2x00pci_rxdone - Handle RX done events + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * + * Returns true if there are still rx frames pending and false if all + * pending rx frames were processed. + */ +bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); + +/** + * rt2x00pci_flush_queue - Flush data queue + * @queue: Data queue to stop + * @drop: True to drop all pending frames. + * + * This will wait for a maximum of 100ms, waiting for the queues + * to become empty. + */ +void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); + +/* + * Device initialization handlers. + */ +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); + +#endif /* RT2X00MMIO_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index a0c8caef3b0a..e87865e33113 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -33,182 +33,6 @@ #include "rt2x00pci.h" /* - * Register access. - */ -int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - const struct rt2x00_field32 field, - u32 *reg) -{ - unsigned int i; - - if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) - return 0; - - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2x00pci_register_read(rt2x00dev, offset, reg); - if (!rt2x00_get_field32(*reg, field)) - return 1; - udelay(REGISTER_BUSY_DELAY); - } - - ERROR(rt2x00dev, "Indirect register access failed: " - "offset=0x%.08x, value=0x%.08x\n", offset, *reg); - *reg = ~0; - - return 0; -} -EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); - -bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) -{ - struct data_queue *queue = rt2x00dev->rx; - struct queue_entry *entry; - struct queue_entry_priv_pci *entry_priv; - struct skb_frame_desc *skbdesc; - int max_rx = 16; - - while (--max_rx) { - entry = rt2x00queue_get_entry(queue, Q_INDEX); - entry_priv = entry->priv_data; - - if (rt2x00dev->ops->lib->get_entry_state(entry)) - break; - - /* - * Fill in desc fields of the skb descriptor - */ - skbdesc = get_skb_frame_desc(entry->skb); - skbdesc->desc = entry_priv->desc; - skbdesc->desc_len = entry->queue->desc_size; - - /* - * DMA is already done, notify rt2x00lib that - * it finished successfully. - */ - rt2x00lib_dmastart(entry); - rt2x00lib_dmadone(entry); - - /* - * Send the frame to rt2x00lib for further processing. - */ - rt2x00lib_rxdone(entry, GFP_ATOMIC); - } - - return !max_rx; -} -EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); - -void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) -{ - unsigned int i; - - for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) - msleep(10); -} -EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); - -/* - * Device initialization handlers. - */ -static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, - struct data_queue *queue) -{ - struct queue_entry_priv_pci *entry_priv; - void *addr; - dma_addr_t dma; - unsigned int i; - - /* - * Allocate DMA memory for descriptor and buffer. - */ - addr = dma_alloc_coherent(rt2x00dev->dev, - queue->limit * queue->desc_size, - &dma, GFP_KERNEL); - if (!addr) - return -ENOMEM; - - memset(addr, 0, queue->limit * queue->desc_size); - - /* - * Initialize all queue entries to contain valid addresses. - */ - for (i = 0; i < queue->limit; i++) { - entry_priv = queue->entries[i].priv_data; - entry_priv->desc = addr + i * queue->desc_size; - entry_priv->desc_dma = dma + i * queue->desc_size; - } - - return 0; -} - -static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, - struct data_queue *queue) -{ - struct queue_entry_priv_pci *entry_priv = - queue->entries[0].priv_data; - - if (entry_priv->desc) - dma_free_coherent(rt2x00dev->dev, - queue->limit * queue->desc_size, - entry_priv->desc, entry_priv->desc_dma); - entry_priv->desc = NULL; -} - -int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) -{ - struct data_queue *queue; - int status; - - /* - * Allocate DMA - */ - queue_for_each(rt2x00dev, queue) { - status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); - if (status) - goto exit; - } - - /* - * Register interrupt handler. - */ - status = request_irq(rt2x00dev->irq, - rt2x00dev->ops->lib->irq_handler, - IRQF_SHARED, rt2x00dev->name, rt2x00dev); - if (status) { - ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", - rt2x00dev->irq, status); - goto exit; - } - - return 0; - -exit: - queue_for_each(rt2x00dev, queue) - rt2x00pci_free_queue_dma(rt2x00dev, queue); - - return status; -} -EXPORT_SYMBOL_GPL(rt2x00pci_initialize); - -void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) -{ - struct data_queue *queue; - - /* - * Free irq line. - */ - free_irq(rt2x00dev->irq, rt2x00dev); - - /* - * Free DMA - */ - queue_for_each(rt2x00dev, queue) - rt2x00pci_free_queue_dma(rt2x00dev, queue); -} -EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); - -/* * PCI driver handlers. */ static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h index e2c99f2b9a14..60d90b20f8b9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.h +++ b/drivers/net/wireless/rt2x00/rt2x00pci.h @@ -36,94 +36,6 @@ #define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) /* - * Register access. - */ -static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - u32 *value) -{ - *value = readl(rt2x00dev->csr.base + offset); -} - -static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - void *value, const u32 length) -{ - memcpy_fromio(value, rt2x00dev->csr.base + offset, length); -} - -static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - u32 value) -{ - writel(value, rt2x00dev->csr.base + offset); -} - -static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - const void *value, - const u32 length) -{ - __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); -} - -/** - * rt2x00pci_regbusy_read - Read from register with busy check - * @rt2x00dev: Device pointer, see &struct rt2x00_dev. - * @offset: Register offset - * @field: Field to check if register is busy - * @reg: Pointer to where register contents should be stored - * - * This function will read the given register, and checks if the - * register is busy. If it is, it will sleep for a couple of - * microseconds before reading the register again. If the register - * is not read after a certain timeout, this function will return - * FALSE. - */ -int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - const struct rt2x00_field32 field, - u32 *reg); - -/** - * struct queue_entry_priv_pci: Per entry PCI specific information - * - * @desc: Pointer to device descriptor - * @desc_dma: DMA pointer to &desc. - * @data: Pointer to device's entry memory. - * @data_dma: DMA pointer to &data. - */ -struct queue_entry_priv_pci { - __le32 *desc; - dma_addr_t desc_dma; -}; - -/** - * rt2x00pci_rxdone - Handle RX done events - * @rt2x00dev: Device pointer, see &struct rt2x00_dev. - * - * Returns true if there are still rx frames pending and false if all - * pending rx frames were processed. - */ -bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); - -/** - * rt2x00pci_flush_queue - Flush data queue - * @queue: Data queue to stop - * @drop: True to drop all pending frames. - * - * This will wait for a maximum of 100ms, waiting for the queues - * to become empty. - */ -void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); - -/* - * Device initialization handlers. - */ -int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); -void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); - -/* * PCI driver handlers. */ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index f95792cfcf89..9e3c8ff53e3f 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -35,6 +35,7 @@ #include <linux/eeprom_93cx6.h> #include "rt2x00.h" +#include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt61pci.h" diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index eef38cfd812e..ca33ae193935 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c @@ -22,7 +22,7 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/gpio.h> -#include <linux/mei_bus.h> +#include <linux/mei_cl_bus.h> #include <linux/nfc.h> #include <net/nfc/hci.h> @@ -32,9 +32,6 @@ #define MICROREAD_DRIVER_NAME "microread" -#define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \ - 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) - struct mei_nfc_hdr { u8 cmd; u8 status; @@ -48,7 +45,7 @@ struct mei_nfc_hdr { #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) struct microread_mei_phy { - struct mei_device *mei_device; + struct mei_cl_device *device; struct nfc_hci_dev *hdev; int powered; @@ -105,14 +102,14 @@ static int microread_mei_write(void *phy_id, struct sk_buff *skb) MEI_DUMP_SKB_OUT("mei frame sent", skb); - r = mei_send(phy->device, skb->data, skb->len); + r = mei_cl_send(phy->device, skb->data, skb->len); if (r > 0) r = 0; return r; } -static void microread_event_cb(struct mei_device *device, u32 events, +static void microread_event_cb(struct mei_cl_device *device, u32 events, void *context) { struct microread_mei_phy *phy = context; @@ -120,7 +117,7 @@ static void microread_event_cb(struct mei_device *device, u32 events, if (phy->hard_fault != 0) return; - if (events & BIT(MEI_EVENT_RX)) { + if (events & BIT(MEI_CL_EVENT_RX)) { struct sk_buff *skb; int reply_size; @@ -128,7 +125,7 @@ static void microread_event_cb(struct mei_device *device, u32 events, if (!skb) return; - reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ); + reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ); if (reply_size < MEI_NFC_HEADER_SIZE) { kfree(skb); return; @@ -149,8 +146,8 @@ static struct nfc_phy_ops mei_phy_ops = { .disable = microread_mei_disable, }; -static int microread_mei_probe(struct mei_device *device, - const struct mei_id *id) +static int microread_mei_probe(struct mei_cl_device *device, + const struct mei_cl_device_id *id) { struct microread_mei_phy *phy; int r; @@ -164,9 +161,9 @@ static int microread_mei_probe(struct mei_device *device, } phy->device = device; - mei_set_clientdata(device, phy); + mei_cl_set_drvdata(device, phy); - r = mei_register_event_cb(device, microread_event_cb, phy); + r = mei_cl_register_event_cb(device, microread_event_cb, phy); if (r) { pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); goto err_out; @@ -186,9 +183,9 @@ err_out: return r; } -static int microread_mei_remove(struct mei_device *device) +static int microread_mei_remove(struct mei_cl_device *device) { - struct microread_mei_phy *phy = mei_get_clientdata(device); + struct microread_mei_phy *phy = mei_cl_get_drvdata(device); pr_info("Removing microread\n"); @@ -202,16 +199,15 @@ static int microread_mei_remove(struct mei_device *device) return 0; } -static struct mei_id microread_mei_tbl[] = { - { MICROREAD_DRIVER_NAME, MICROREAD_UUID }, +static struct mei_cl_device_id microread_mei_tbl[] = { + { MICROREAD_DRIVER_NAME }, /* required last entry */ { } }; - MODULE_DEVICE_TABLE(mei, microread_mei_tbl); -static struct mei_driver microread_driver = { +static struct mei_cl_driver microread_driver = { .id_table = microread_mei_tbl, .name = MICROREAD_DRIVER_NAME, @@ -225,7 +221,7 @@ static int microread_mei_init(void) pr_debug(DRIVER_DESC ": %s\n", __func__); - r = mei_driver_register(µread_driver); + r = mei_cl_driver_register(µread_driver); if (r) { pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); return r; @@ -236,7 +232,7 @@ static int microread_mei_init(void) static void microread_mei_exit(void) { - mei_driver_unregister(µread_driver); + mei_cl_driver_unregister(µread_driver); } module_init(microread_mei_init); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dee5dddaa292..5147c210df52 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -53,14 +53,15 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) return; } - if (!pci_dev->pm_cap || !pci_dev->pme_support - || pci_check_pme_status(pci_dev)) { - if (pci_dev->pme_poll) - pci_dev->pme_poll = false; + /* Clear PME Status if set. */ + if (pci_dev->pme_support) + pci_check_pme_status(pci_dev); - pci_wakeup_event(pci_dev); - pm_runtime_resume(&pci_dev->dev); - } + if (pci_dev->pme_poll) + pci_dev->pme_poll = false; + + pci_wakeup_event(pci_dev); + pm_runtime_resume(&pci_dev->dev); if (pci_dev->subordinate) pci_pme_wakeup_bus(pci_dev->subordinate); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 1fa1e482a999..79277fb36c6b 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -390,9 +390,10 @@ static void pci_device_shutdown(struct device *dev) /* * Turn off Bus Master bit on the device to tell it to not - * continue to do DMA + * continue to do DMA. Don't touch devices in D3cold or unknown states. */ - pci_clear_master(pci_dev); + if (pci_dev->current_state <= PCI_D3hot) + pci_clear_master(pci_dev); } #ifdef CONFIG_PM diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 08c243ab034e..ed4d09498337 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -185,14 +185,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { #endif /* !PM */ /* - * PCIe port runtime suspend is broken for some chipsets, so use a - * black list to disable runtime PM for these chipsets. - */ -static const struct pci_device_id port_runtime_pm_black_list[] = { - { /* end: all zeroes */ } -}; - -/* * pcie_portdrv_probe - Probe PCI-Express port devices * @dev: PCI-Express port device being probed * @@ -225,16 +217,11 @@ static int pcie_portdrv_probe(struct pci_dev *dev, * it by default. */ dev->d3cold_allowed = false; - if (!pci_match_id(port_runtime_pm_black_list, dev)) - pm_runtime_put_noidle(&dev->dev); - return 0; } static void pcie_portdrv_remove(struct pci_dev *dev) { - if (!pci_match_id(port_runtime_pm_black_list, dev)) - pm_runtime_get_noresume(&dev->dev); pcie_port_device_remove(dev); pci_disable_device(dev); } diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index b41ac7756a4b..c5d0a08a8747 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -100,27 +100,6 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) return min((size_t)(image - rom), size); } -static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) -{ - struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; - loff_t start; - - /* assign the ROM an address if it doesn't have one */ - if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE)) - return 0; - start = pci_resource_start(pdev, PCI_ROM_RESOURCE); - *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); - - if (*size == 0) - return 0; - - /* Enable ROM space decodes */ - if (pci_enable_rom(pdev)) - return 0; - - return start; -} - /** * pci_map_rom - map a PCI ROM to kernel space * @pdev: pointer to pci device struct @@ -135,7 +114,7 @@ static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; - loff_t start = 0; + loff_t start; void __iomem *rom; /* @@ -154,21 +133,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) return (void __iomem *)(unsigned long) pci_resource_start(pdev, PCI_ROM_RESOURCE); } else { - start = pci_find_rom(pdev, size); - } - } + /* assign the ROM an address if it doesn't have one */ + if (res->parent == NULL && + pci_assign_resource(pdev,PCI_ROM_RESOURCE)) + return NULL; + start = pci_resource_start(pdev, PCI_ROM_RESOURCE); + *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + if (*size == 0) + return NULL; - /* - * Some devices may provide ROMs via a source other than the BAR - */ - if (!start && pdev->rom && pdev->romlen) { - *size = pdev->romlen; - return phys_to_virt(pdev->rom); + /* Enable ROM space decodes */ + if (pci_enable_rom(pdev)) + return NULL; + } } - if (!start) - return NULL; - rom = ioremap(start, *size); if (!rom) { /* restore enable if ioremap fails */ @@ -202,8 +181,7 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) return; - if (!pdev->rom || !pdev->romlen) - iounmap(rom); + iounmap(rom); /* Disable again before continuing, leave enabled if pci=rom */ if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) @@ -227,7 +205,24 @@ void pci_cleanup_rom(struct pci_dev *pdev) } } +/** + * pci_platform_rom - provides a pointer to any ROM image provided by the + * platform + * @pdev: pointer to pci device struct + * @size: pointer to receive size of pci window over ROM + */ +void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) +{ + if (pdev->rom && pdev->romlen) { + *size = pdev->romlen; + return phys_to_virt((phys_addr_t)pdev->rom); + } + + return NULL; +} + EXPORT_SYMBOL(pci_map_rom); EXPORT_SYMBOL(pci_unmap_rom); EXPORT_SYMBOL_GPL(pci_enable_rom); EXPORT_SYMBOL_GPL(pci_disable_rom); +EXPORT_SYMBOL(pci_platform_rom); diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 45cacf79f3a7..1a779bbfb87d 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -134,7 +134,6 @@ static const struct key_entry hp_wmi_keymap[] = { { KE_KEY, 0x2142, { KEY_MEDIA } }, { KE_KEY, 0x213b, { KEY_INFO } }, { KE_KEY, 0x2169, { KEY_DIRECTION } }, - { KE_KEY, 0x216a, { KEY_SETUP } }, { KE_KEY, 0x231b, { KEY_HELP } }, { KE_END, 0 } }; @@ -925,9 +924,6 @@ static int __init hp_wmi_init(void) err = hp_wmi_input_setup(); if (err) return err; - - //Enable magic for hotkeys that run on the SMBus - ec_write(0xe6,0x6e); } if (bios_capable) { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9a907567f41e..edec135b1685 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -1964,9 +1964,6 @@ struct tp_nvram_state { /* kthread for the hotkey poller */ static struct task_struct *tpacpi_hotkey_task; -/* Acquired while the poller kthread is running, use to sync start/stop */ -static struct mutex hotkey_thread_mutex; - /* * Acquire mutex to write poller control variables as an * atomic block. @@ -2462,8 +2459,6 @@ static int hotkey_kthread(void *data) unsigned int poll_freq; bool was_frozen; - mutex_lock(&hotkey_thread_mutex); - if (tpacpi_lifecycle == TPACPI_LIFE_EXITING) goto exit; @@ -2523,7 +2518,6 @@ static int hotkey_kthread(void *data) } exit: - mutex_unlock(&hotkey_thread_mutex); return 0; } @@ -2533,9 +2527,6 @@ static void hotkey_poll_stop_sync(void) if (tpacpi_hotkey_task) { kthread_stop(tpacpi_hotkey_task); tpacpi_hotkey_task = NULL; - mutex_lock(&hotkey_thread_mutex); - /* at this point, the thread did exit */ - mutex_unlock(&hotkey_thread_mutex); } } @@ -3234,7 +3225,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) mutex_init(&hotkey_mutex); #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL - mutex_init(&hotkey_thread_mutex); mutex_init(&hotkey_thread_data_mutex); #endif diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index cc1f7bf53fd0..c6d77e20622c 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -4,7 +4,7 @@ menu "Remoteproc drivers" config REMOTEPROC tristate depends on HAS_DMA - select FW_CONFIG + select FW_LOADER select VIRTIO config OMAP_REMOTEPROC diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 29387df4bfc9..8edb4aed5d36 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) * TODO: support predefined notifyids (via resource table) */ ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); - if (ret) { + if (ret < 0) { dev_err(dev, "idr_alloc failed: %d\n", ret); dma_free_coherent(dev->parent, size, va, dma); return ret; @@ -366,10 +366,12 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, /* it is now safe to add the virtio device */ ret = rproc_add_virtio_dev(rvdev, rsc->id); if (ret) - goto free_rvdev; + goto remove_rvdev; return 0; +remove_rvdev: + list_del(&rvdev->node); free_rvdev: kfree(rvdev); return ret; diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c index a7743c069339..fb95c4220052 100644 --- a/drivers/remoteproc/ste_modem_rproc.c +++ b/drivers/remoteproc/ste_modem_rproc.c @@ -240,6 +240,8 @@ static int sproc_drv_remove(struct platform_device *pdev) /* Unregister as remoteproc device */ rproc_del(sproc->rproc); + dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE, + sproc->fw_addr, sproc->fw_dma_addr); rproc_put(sproc->rproc); mdev->drv_data = NULL; @@ -297,10 +299,13 @@ static int sproc_probe(struct platform_device *pdev) /* Register as a remoteproc device */ err = rproc_add(rproc); if (err) - goto free_rproc; + goto free_mem; return 0; +free_mem: + dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE, + sproc->fw_addr, sproc->fw_dma_addr); free_rproc: /* Reset device data upon error */ mdev->drv_data = NULL; diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 0a9f27e094ea..434ebc3a99dc 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -44,7 +44,6 @@ static DECLARE_COMPLETION(at91_rtc_updated); static unsigned int at91_alarm_year = AT91_RTC_EPOCH; static void __iomem *at91_rtc_regs; static int irq; -static u32 at91_rtc_imr; /* * Decode time/date into rtc_time structure @@ -109,11 +108,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm) cr = at91_rtc_read(AT91_RTC_CR); at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); - at91_rtc_imr |= AT91_RTC_ACKUPD; at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); - at91_rtc_imr &= ~AT91_RTC_ACKUPD; at91_rtc_write(AT91_RTC_TIMR, bin2bcd(tm->tm_sec) << 0 @@ -145,7 +142,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); tm->tm_year = at91_alarm_year - 1900; - alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM) + alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) ? 1 : 0; dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, @@ -171,7 +168,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) tm.tm_sec = alrm->time.tm_sec; at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); - at91_rtc_imr &= ~AT91_RTC_ALARM; at91_rtc_write(AT91_RTC_TIMALR, bin2bcd(tm.tm_sec) << 0 | bin2bcd(tm.tm_min) << 8 @@ -184,7 +180,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) if (alrm->enabled) { at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); - at91_rtc_imr |= AT91_RTC_ALARM; at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); } @@ -201,12 +196,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) if (enabled) { at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); - at91_rtc_imr |= AT91_RTC_ALARM; at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); - } else { + } else at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); - at91_rtc_imr &= ~AT91_RTC_ALARM; - } return 0; } @@ -215,10 +207,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) */ static int at91_rtc_proc(struct device *dev, struct seq_file *seq) { + unsigned long imr = at91_rtc_read(AT91_RTC_IMR); + seq_printf(seq, "update_IRQ\t: %s\n", - (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no"); + (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); seq_printf(seq, "periodic_IRQ\t: %s\n", - (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no"); + (imr & AT91_RTC_SECEV) ? "yes" : "no"); return 0; } @@ -233,7 +227,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) unsigned int rtsr; unsigned long events = 0; - rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr; + rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); if (rtsr) { /* this interrupt is shared! Is it ours? */ if (rtsr & AT91_RTC_ALARM) events |= (RTC_AF | RTC_IRQF); @@ -297,7 +291,6 @@ static int __init at91_rtc_probe(struct platform_device *pdev) at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV); - at91_rtc_imr = 0; ret = request_irq(irq, at91_rtc_interrupt, IRQF_SHARED, @@ -336,7 +329,6 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV); - at91_rtc_imr = 0; free_irq(irq, pdev); rtc_device_unregister(rtc); @@ -349,35 +341,31 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) /* AT91RM9200 RTC Power management control */ -static u32 at91_rtc_bkpimr; - +static u32 at91_rtc_imr; static int at91_rtc_suspend(struct device *dev) { /* this IRQ is shared with DBGU and other hardware which isn't * necessarily doing PM like we are... */ - at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV); - if (at91_rtc_bkpimr) { - if (device_may_wakeup(dev)) { + at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) + & (AT91_RTC_ALARM|AT91_RTC_SECEV); + if (at91_rtc_imr) { + if (device_may_wakeup(dev)) enable_irq_wake(irq); - } else { - at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr); - at91_rtc_imr &= ~at91_rtc_bkpimr; - } -} + else + at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); + } return 0; } static int at91_rtc_resume(struct device *dev) { - if (at91_rtc_bkpimr) { - if (device_may_wakeup(dev)) { + if (at91_rtc_imr) { + if (device_may_wakeup(dev)) disable_irq_wake(irq); - } else { - at91_rtc_imr |= at91_rtc_bkpimr; - at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr); - } + else + at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); } return 0; } diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h index 5f940b6844cb..da1945e5f714 100644 --- a/drivers/rtc/rtc-at91rm9200.h +++ b/drivers/rtc/rtc-at91rm9200.h @@ -64,6 +64,7 @@ #define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ #define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ #define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ +#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */ #define AT91_RTC_VER 0x2c /* Valid Entry Register */ #define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */ diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 5ac9c935c151..e9b9c8392832 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -307,7 +307,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq) case EQC_WR_PROHIBIT: spin_lock_irqsave(&bdev->lock, flags); if (bdev->state != SCM_WR_PROHIBIT) - pr_info("%lu: Write access to the SCM increment is suspended\n", + pr_info("%lx: Write access to the SCM increment is suspended\n", (unsigned long) bdev->scmdev->address); bdev->state = SCM_WR_PROHIBIT; spin_unlock_irqrestore(&bdev->lock, flags); @@ -445,7 +445,7 @@ void scm_blk_set_available(struct scm_blk_dev *bdev) spin_lock_irqsave(&bdev->lock, flags); if (bdev->state == SCM_WR_PROHIBIT) - pr_info("%lu: Write access to the SCM increment is restored\n", + pr_info("%lx: Write access to the SCM increment is restored\n", (unsigned long) bdev->scmdev->address); bdev->state = SCM_OPER; spin_unlock_irqrestore(&bdev->lock, flags); @@ -463,12 +463,15 @@ static int __init scm_blk_init(void) goto out; scm_major = ret; - if (scm_alloc_rqs(nr_requests)) + ret = scm_alloc_rqs(nr_requests); + if (ret) goto out_unreg; scm_debug = debug_register("scm_log", 16, 1, 16); - if (!scm_debug) + if (!scm_debug) { + ret = -ENOMEM; goto out_free; + } debug_register_view(scm_debug, &debug_hex_ascii_view); debug_set_level(scm_debug, 2); diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c index 5f6180d6ff08..c98cf52d78d1 100644 --- a/drivers/s390/block/scm_drv.c +++ b/drivers/s390/block/scm_drv.c @@ -19,7 +19,7 @@ static void scm_notify(struct scm_device *scmdev, enum scm_event event) switch (event) { case SCM_CHANGE: - pr_info("%lu: The capabilities of the SCM increment changed\n", + pr_info("%lx: The capabilities of the SCM increment changed\n", (unsigned long) scmdev->address); SCM_LOG(2, "State changed"); SCM_LOG_STATE(2, scmdev); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index b907dba24025..cee69dac3e18 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) int i, rc; /* Check if the tty3270 is already there. */ - view = raw3270_find_view(&tty3270_fn, tty->index); + view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR); if (!IS_ERR(view)) { tp = container_of(view, struct tty3270, view); tty->driver_data = tp; @@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) tp->inattr = TF_INPUT; return tty_port_install(&tp->port, driver, tty); } - if (tty3270_max_index < tty->index) - tty3270_max_index = tty->index; + if (tty3270_max_index < tty->index + 1) + tty3270_max_index = tty->index + 1; /* Allocate tty3270 structure on first open. */ tp = tty3270_alloc_view(); if (IS_ERR(tp)) return PTR_ERR(tp); - rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); + rc = raw3270_add_view(&tp->view, &tty3270_fn, + tty->index + RAW3270_FIRSTMINOR); if (rc) { tty3270_free_view(tp); return rc; @@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = { void tty3270_create_cb(int minor) { - tty_register_device(tty3270_driver, minor, NULL); + tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL); } void tty3270_destroy_cb(int minor) { - tty_unregister_device(tty3270_driver, minor); + tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR); } struct raw3270_notifier tty3270_notifier = @@ -1884,7 +1885,8 @@ static int __init tty3270_init(void) driver->driver_name = "tty3270"; driver->name = "3270/tty"; driver->major = IBM_TTY3270_MAJOR; - driver->minor_start = 0; + driver->minor_start = RAW3270_FIRSTMINOR; + driver->name_base = RAW3270_FIRSTMINOR; driver->type = TTY_DRIVER_TYPE_SYSTEM; driver->subtype = SYSTEM_TYPE_TTY; driver->init_termios = tty_std_termios; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 8c0622399fcd..6ccb7457746b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -769,6 +769,7 @@ struct qeth_card { unsigned long thread_start_mask; unsigned long thread_allowed_mask; unsigned long thread_running_mask; + struct task_struct *recovery_task; spinlock_t ip_lock; struct list_head ip_list; struct list_head *ip_tbd_list; @@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list; extern struct kmem_cache *qeth_core_header_cache; extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; +void qeth_set_recovery_task(struct qeth_card *); +void qeth_clear_recovery_task(struct qeth_card *); void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); int qeth_threads_running(struct qeth_card *, unsigned long); int qeth_wait_for_threads(struct qeth_card *, unsigned long); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0d73a999983d..451f92020599 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card) return "n/a"; } +void qeth_set_recovery_task(struct qeth_card *card) +{ + card->recovery_task = current; +} +EXPORT_SYMBOL_GPL(qeth_set_recovery_task); + +void qeth_clear_recovery_task(struct qeth_card *card) +{ + card->recovery_task = NULL; +} +EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); + +static bool qeth_is_recovery_task(const struct qeth_card *card) +{ + return card->recovery_task == current; +} + void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, int clear_start_mask) { @@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running); int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) { + if (qeth_is_recovery_task(card)) + return 0; return wait_event_interruptible(card->wait_q, qeth_threads_running(card, threads) == 0); } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d690166efeaf..155b101bd730 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr) QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); + qeth_set_recovery_task(card); __qeth_l2_set_offline(card->gdev, 1); rc = __qeth_l2_set_online(card->gdev, 1); if (!rc) @@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr) dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); } + qeth_clear_recovery_task(card); qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); return 0; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 8710337dab3e..1f7edf1b26c3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3515,6 +3515,7 @@ static int qeth_l3_recover(void *ptr) QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); + qeth_set_recovery_task(card); __qeth_l3_set_offline(card->gdev, 1); rc = __qeth_l3_set_online(card->gdev, 1); if (!rc) @@ -3525,6 +3526,7 @@ static int qeth_l3_recover(void *ptr) dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); } + qeth_clear_recovery_task(card); qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); return 0; diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c index 1a9d1e3ce64c..c1441ed282eb 100644 --- a/drivers/sbus/char/bbc_i2c.c +++ b/drivers/sbus/char/bbc_i2c.c @@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void __init reset_one_i2c(struct bbc_i2c_bus *bp) +static void reset_one_i2c(struct bbc_i2c_bus *bp) { writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); writeb(bp->own, bp->i2c_control_regs + 0x1); @@ -291,7 +291,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp) writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); } -static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index) +static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index) { struct bbc_i2c_bus *bp; struct device_node *dp; diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 2daf4b0da434..90bc7bd00966 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -940,6 +940,7 @@ static int bnx2fc_libfc_config(struct fc_lport *lport) fc_exch_init(lport); fc_rport_init(lport); fc_disc_init(lport); + fc_disc_config(lport, lport); return 0; } @@ -2133,6 +2134,7 @@ static int _bnx2fc_create(struct net_device *netdev, } ctlr = bnx2fc_to_ctlr(interface); + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); interface->vlan_id = vlan_id; interface->timer_work_queue = @@ -2143,7 +2145,7 @@ static int _bnx2fc_create(struct net_device *netdev, goto ifput_err; } - lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0); + lport = bnx2fc_if_create(interface, &cdev->dev, 0); if (!lport) { printk(KERN_ERR PFX "Failed to create interface (%s)\n", netdev->name); @@ -2159,8 +2161,6 @@ static int _bnx2fc_create(struct net_device *netdev, /* Make this master N_port */ ctlr->lp = lport; - cdev = fcoe_ctlr_to_ctlr_dev(ctlr); - if (link_state == BNX2FC_CREATE_LINK_UP) cdev->enabled = FCOE_CTLR_ENABLED; else diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b5d92fc93c70..9bfdc9a3f897 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -490,7 +490,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) { struct net_device *netdev = fcoe->netdev; struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); - struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); rtnl_lock(); if (!fcoe->removed) @@ -501,7 +500,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) /* tear-down the FCoE controller */ fcoe_ctlr_destroy(fip); scsi_host_put(fip->lp->host); - fcoe_ctlr_device_delete(ctlr_dev); dev_put(netdev); module_put(THIS_MODULE); } @@ -2194,6 +2192,8 @@ out_nodev: */ static void fcoe_destroy_work(struct work_struct *work) { + struct fcoe_ctlr_device *cdev; + struct fcoe_ctlr *ctlr; struct fcoe_port *port; struct fcoe_interface *fcoe; struct Scsi_Host *shost; @@ -2224,10 +2224,15 @@ static void fcoe_destroy_work(struct work_struct *work) mutex_lock(&fcoe_config_mutex); fcoe = port->priv; + ctlr = fcoe_to_ctlr(fcoe); + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); + fcoe_if_destroy(port->lport); fcoe_interface_cleanup(fcoe); mutex_unlock(&fcoe_config_mutex); + + fcoe_ctlr_device_delete(cdev); } /** @@ -2335,7 +2340,9 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, rc = -EIO; rtnl_unlock(); fcoe_interface_cleanup(fcoe); - goto out_nortnl; + mutex_unlock(&fcoe_config_mutex); + fcoe_ctlr_device_delete(ctlr_dev); + goto out; } /* Make this the "master" N_Port */ @@ -2375,8 +2382,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, out_nodev: rtnl_unlock(); -out_nortnl: mutex_unlock(&fcoe_config_mutex); +out: return rc; } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 08c3bc398da2..a76247201be5 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -2815,6 +2815,47 @@ unlock: } /** + * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode + * @lport: The local port to be (re)configured + * @fip: The FCoE controller whose mode is changing + * @fip_mode: The new fip mode + * + * Note that the we shouldn't be changing the libfc discovery settings + * (fc_disc_config) while an lport is going through the libfc state + * machine. The mode can only be changed when a fcoe_ctlr device is + * disabled, so that should ensure that this routine is only called + * when nothing is happening. + */ +void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, + enum fip_state fip_mode) +{ + void *priv; + + WARN_ON(lport->state != LPORT_ST_RESET && + lport->state != LPORT_ST_DISABLED); + + if (fip_mode == FIP_MODE_VN2VN) { + lport->rport_priv_size = sizeof(struct fcoe_rport); + lport->point_to_multipoint = 1; + lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; + lport->tt.disc_start = fcoe_ctlr_disc_start; + lport->tt.disc_stop = fcoe_ctlr_disc_stop; + lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; + priv = fip; + } else { + lport->rport_priv_size = 0; + lport->point_to_multipoint = 0; + lport->tt.disc_recv_req = NULL; + lport->tt.disc_start = NULL; + lport->tt.disc_stop = NULL; + lport->tt.disc_stop_final = NULL; + priv = lport; + } + + fc_disc_config(lport, priv); +} + +/** * fcoe_libfc_config() - Sets up libfc related properties for local port * @lport: The local port to configure libfc for * @fip: The FCoE controller in use by the local port @@ -2833,21 +2874,9 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, fc_exch_init(lport); fc_elsct_init(lport); fc_lport_init(lport); - if (fip->mode == FIP_MODE_VN2VN) - lport->rport_priv_size = sizeof(struct fcoe_rport); fc_rport_init(lport); - if (fip->mode == FIP_MODE_VN2VN) { - lport->point_to_multipoint = 1; - lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; - lport->tt.disc_start = fcoe_ctlr_disc_start; - lport->tt.disc_stop = fcoe_ctlr_disc_stop; - lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; - mutex_init(&lport->disc.disc_mutex); - INIT_LIST_HEAD(&lport->disc.rports); - lport->disc.priv = fip; - } else { - fc_disc_init(lport); - } + fc_disc_init(lport); + fcoe_ctlr_mode_set(lport, fip, fip->mode); return 0; } EXPORT_SYMBOL_GPL(fcoe_libfc_config); @@ -2875,6 +2904,7 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected); void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) { struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct fc_lport *lport = ctlr->lp; mutex_lock(&ctlr->ctlr_mutex); switch (ctlr_dev->mode) { @@ -2888,5 +2918,7 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) } mutex_unlock(&ctlr->ctlr_mutex); + + fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode); } EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index a044f593e8b9..d0fa4b6c551f 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) sdev->allow_restart = 1; blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); } - scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); spin_unlock_irqrestore(shost->host_lock, lock_flags); + scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); return 0; } diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f328089a1060..2197b57fb225 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) ipr_trace; } - list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); if (!ipr_is_naca_model(res)) res->needs_sync_complete = 1; @@ -9349,7 +9349,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); + if (ioa_cfg->intr_flag == IPR_USE_MSIX) + rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); + else + rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); if (rc) { dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); return rc; @@ -9371,7 +9374,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - free_irq(pdev->irq, ioa_cfg); + if (ioa_cfg->intr_flag == IPR_USE_MSIX) + free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); + else + free_irq(pdev->irq, ioa_cfg); LEAVE; @@ -9722,6 +9728,7 @@ static void __ipr_remove(struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); flush_work(&ioa_cfg->work_q); + INIT_LIST_HEAD(&ioa_cfg->used_res_q); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); spin_lock(&ipr_driver_lock); diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8e561e6a557c..880a9068ca12 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -712,12 +712,13 @@ static void fc_disc_stop_final(struct fc_lport *lport) } /** - * fc_disc_init() - Initialize the discovery layer for a local port - * @lport: The local port that needs the discovery layer to be initialized + * fc_disc_config() - Configure the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be configured + * @priv: Private data structre for users of the discovery layer */ -int fc_disc_init(struct fc_lport *lport) +void fc_disc_config(struct fc_lport *lport, void *priv) { - struct fc_disc *disc; + struct fc_disc *disc = &lport->disc; if (!lport->tt.disc_start) lport->tt.disc_start = fc_disc_start; @@ -732,12 +733,21 @@ int fc_disc_init(struct fc_lport *lport) lport->tt.disc_recv_req = fc_disc_recv_req; disc = &lport->disc; + + disc->priv = priv; +} +EXPORT_SYMBOL(fc_disc_config); + +/** + * fc_disc_init() - Initialize the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be initialized + */ +void fc_disc_init(struct fc_lport *lport) +{ + struct fc_disc *disc = &lport->disc; + INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); mutex_init(&disc->disc_mutex); INIT_LIST_HEAD(&disc->rports); - - disc->priv = lport; - - return 0; } EXPORT_SYMBOL(fc_disc_init); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index aec2e0da5016..55cbd0180159 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) linkrate = phy->linkrate; memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + /* Handle vacant phy - rest of dr data is not valid so skip it */ + if (phy->phy_state == PHY_VACANT) { + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + phy->attached_dev_type = NO_DEVICE; + if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { + phy->phy_id = phy_id; + goto skip; + } else + goto out; + } + phy->attached_dev_type = to_dev_type(dr); if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) goto out; @@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; + skip: if (new_phy) if (sas_phy_add(phy->phy)) { sas_phy_free(phy->phy); @@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single) if (!disc_req) return -ENOMEM; - disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) { kfree(disc_req); return -ENOMEM; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 74b67d98e952..d43faf34c1e2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -438,11 +438,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, struct lpfc_rqe *temp_hrqe; struct lpfc_rqe *temp_drqe; struct lpfc_register doorbell; - int put_index = hq->host_index; + int put_index; /* sanity check on queue memory */ if (unlikely(!hq) || unlikely(!dq)) return -ENOMEM; + put_index = hq->host_index; temp_hrqe = hq->qe[hq->host_index].rqe; temp_drqe = dq->qe[dq->host_index].rqe; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 1d82eef4e1eb..b3db9dcc2619 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1938,11 +1938,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) "Timer for the VP[%d] has stopped\n", vha->vp_idx); } - /* No pending activities shall be there on the vha now */ - if (ql2xextended_error_logging & ql_dbg_user) - msleep(random32()%10); /* Just to see if something falls on - * the net we have placed below */ - BUG_ON(atomic_read(&vha->vref_count)); qla2x00_free_fcports(vha); diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 1626de52e32a..fbc305f1c87c 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -15,6 +15,7 @@ * | Mailbox commands | 0x115b | 0x111a-0x111b | * | | | 0x112c-0x112e | * | | | 0x113a | + * | | | 0x1155-0x1158 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | * | | | 0x2016 | * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | @@ -401,7 +402,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, void *ring; } aq, *aqp; - if (!ha->tgt.atio_q_length) + if (!ha->tgt.atio_ring) return ptr; num_queues = 1; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c6509911772b..65c5ff75936b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -863,7 +863,6 @@ typedef struct { #define MBX_1 BIT_1 #define MBX_0 BIT_0 -#define RNID_TYPE_SET_VERSION 0x9 #define RNID_TYPE_ASIC_TEMP 0xC /* diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index eb3ca21a7f17..b310fa97b545 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -358,9 +358,6 @@ extern int qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); extern int -qla2x00_set_driver_version(scsi_qla_host_t *, char *); - -extern int qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, uint16_t, uint16_t, uint16_t, uint16_t); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index edf4d14a1335..b59203393cb2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -619,8 +619,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) qla24xx_read_fcp_prio_cfg(vha); - qla2x00_set_driver_version(vha, QLA2XXX_VERSION); - return (rval); } @@ -1399,7 +1397,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mq_size += ha->max_rsp_queues * (rsp->length * sizeof(response_t)); } - if (ha->tgt.atio_q_length) + if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); /* Allocate memory for Fibre Channel Event Buffer. */ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 186dd59ce4fa..43345af56431 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3866,64 +3866,6 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) return rval; } -int -qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version) -{ - int rval; - mbx_cmd_t mc; - mbx_cmd_t *mcp = &mc; - int len; - uint16_t dwlen; - uint8_t *str; - dma_addr_t str_dma; - struct qla_hw_data *ha = vha->hw; - - if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha)) - return QLA_FUNCTION_FAILED; - - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155, - "Entered %s.\n", __func__); - - str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); - if (!str) { - ql_log(ql_log_warn, vha, 0x1156, - "Failed to allocate driver version param.\n"); - return QLA_MEMORY_ALLOC_FAILED; - } - - memcpy(str, "\x7\x3\x11\x0", 4); - dwlen = str[0]; - len = dwlen * sizeof(uint32_t) - 4; - memset(str + 4, 0, len); - if (len > strlen(version)) - len = strlen(version); - memcpy(str + 4, version, len); - - mcp->mb[0] = MBC_SET_RNID_PARAMS; - mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; - mcp->mb[2] = MSW(LSD(str_dma)); - mcp->mb[3] = LSW(LSD(str_dma)); - mcp->mb[6] = MSW(MSD(str_dma)); - mcp->mb[7] = LSW(MSD(str_dma)); - mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_0; - mcp->tov = MBX_TOV_SECONDS; - mcp->flags = 0; - rval = qla2x00_mailbox_command(vha, mcp); - - if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0x1157, - "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); - } else { - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158, - "Done %s.\n", __func__); - } - - dma_pool_free(ha->s_dma_pool, str, str_dma); - - return rval; -} - static int qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) { diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 2b6e478d9e33..ec54036d1e12 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.04.00.08-k" +#define QLA2XXX_VERSION "8.04.00.13-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 4 diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 86974471af68..2a32036a9404 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -4112,6 +4112,10 @@ static int st_probe(struct device *dev) tpnt->disk = disk; disk->private_data = &tpnt->driver; disk->queue = SDp->request_queue; + /* SCSI tape doesn't register this gendisk via add_disk(). Manually + * take queue reference that release_disk() expects. */ + if (!blk_get_queue(disk->queue)) + goto out_put_disk; tpnt->driver = &st_template; tpnt->device = SDp; @@ -4185,7 +4189,7 @@ static int st_probe(struct device *dev) idr_preload_end(); if (error < 0) { pr_warn("st: idr allocation failed: %d\n", error); - goto out_put_disk; + goto out_put_queue; } tpnt->index = error; sprintf(disk->disk_name, "st%d", tpnt->index); @@ -4211,6 +4215,8 @@ out_remove_devs: spin_lock(&st_index_lock); idr_remove(&st_index_idr, tpnt->index); spin_unlock(&st_index_lock); +out_put_queue: + blk_put_queue(disk->queue); out_put_disk: put_disk(disk); kfree(tpnt); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f80eee74a311..2be0de920d67 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -55,6 +55,7 @@ comment "SPI Master Controller Drivers" config SPI_ALTERA tristate "Altera SPI Controller" + depends on GENERIC_HARDIRQS select SPI_BITBANG help This is the driver for the Altera SPI Controller. @@ -310,7 +311,7 @@ config SPI_PXA2XX_DMA config SPI_PXA2XX tristate "PXA2xx SSP SPI master" - depends on ARCH_PXA || PCI || ACPI + depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS select PXA_SSP if ARCH_PXA help This enables using a PXA2xx or Sodaville SSP port as a SPI master diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 9578af782a77..d7df435d962e 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -152,7 +152,6 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi, static int bcm63xx_spi_setup(struct spi_device *spi) { struct bcm63xx_spi *bs; - int ret; bs = spi_master_get_devdata(spi->master); @@ -490,7 +489,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) default: dev_err(dev, "unsupported MSG_CTL width: %d\n", bs->msg_ctl_width); - goto out_clk_disable; + goto out_err; } /* Initialize hardware */ diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 89480b281d74..3e490ee7f275 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c @@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, for (i = count; i > 0; i--) { data = tx_buf ? *tx_buf++ : 0; - if (len == EOFBYTE) + if (len == EOFBYTE && t->cs_change) setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); out_8(&fifo->txdata_8, data); len--; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 90b27a3508a6..810413883c79 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1168,7 +1168,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) master->dev.parent = &pdev->dev; master->dev.of_node = pdev->dev.of_node; - ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev)); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index e862ab8853aa..4188b2faac5c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -994,25 +994,30 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data) { struct s3c64xx_spi_driver_data *sdd = data; struct spi_master *spi = sdd->master; - unsigned int val; + unsigned int val, clr = 0; - val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR); + val = readl(sdd->regs + S3C64XX_SPI_STATUS); - val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR | - S3C64XX_SPI_PND_RX_UNDERRUN_CLR | - S3C64XX_SPI_PND_TX_OVERRUN_CLR | - S3C64XX_SPI_PND_TX_UNDERRUN_CLR; - - writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR); - - if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR) + if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { + clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; dev_err(&spi->dev, "RX overrun\n"); - if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR) + } + if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { + clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; dev_err(&spi->dev, "RX underrun\n"); - if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR) + } + if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { + clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; dev_err(&spi->dev, "TX overrun\n"); - if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR) + } + if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { + clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; dev_err(&spi->dev, "TX underrun\n"); + } + + /* Clear the pending irq by setting and then clearing it */ + writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); + writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); return IRQ_HANDLED; } @@ -1036,9 +1041,13 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) writel(0, regs + S3C64XX_SPI_MODE_CFG); writel(0, regs + S3C64XX_SPI_PACKET_CNT); - /* Clear any irq pending bits */ - writel(readl(regs + S3C64XX_SPI_PENDING_CLR), - regs + S3C64XX_SPI_PENDING_CLR); + /* Clear any irq pending bits, should set and clear the bits */ + val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | + S3C64XX_SPI_PND_RX_UNDERRUN_CLR | + S3C64XX_SPI_PND_TX_OVERRUN_CLR | + S3C64XX_SPI_PND_TX_UNDERRUN_CLR; + writel(val, regs + S3C64XX_SPI_PENDING_CLR); + writel(0, regs + S3C64XX_SPI_PENDING_CLR); writel(0, regs + S3C64XX_SPI_SWAP_CFG); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index b8698b389ef3..a829563f4713 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -858,21 +858,6 @@ static int tegra_slink_setup(struct spi_device *spi) return 0; } -static int tegra_slink_prepare_transfer(struct spi_master *master) -{ - struct tegra_slink_data *tspi = spi_master_get_devdata(master); - - return pm_runtime_get_sync(tspi->dev); -} - -static int tegra_slink_unprepare_transfer(struct spi_master *master) -{ - struct tegra_slink_data *tspi = spi_master_get_devdata(master); - - pm_runtime_put(tspi->dev); - return 0; -} - static int tegra_slink_transfer_one_message(struct spi_master *master, struct spi_message *msg) { @@ -885,6 +870,12 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, msg->status = 0; msg->actual_length = 0; + ret = pm_runtime_get_sync(tspi->dev); + if (ret < 0) { + dev_err(tspi->dev, "runtime get failed: %d\n", ret); + goto done; + } + single_xfer = list_is_singular(&msg->transfers); list_for_each_entry(xfer, &msg->transfers, transfer_list) { INIT_COMPLETION(tspi->xfer_completion); @@ -921,6 +912,8 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, exit: tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); + pm_runtime_put(tspi->dev); +done: msg->status = ret; spi_finalize_current_message(master); return ret; @@ -1148,9 +1141,7 @@ static int tegra_slink_probe(struct platform_device *pdev) /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->setup = tegra_slink_setup; - master->prepare_transfer_hardware = tegra_slink_prepare_transfer; master->transfer_one_message = tegra_slink_transfer_one_message; - master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer; master->num_chipselect = MAX_CHIP_SELECT; master->bus_num = -1; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index f996c600eb8c..004b10f184d4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -543,17 +543,16 @@ static void spi_pump_messages(struct kthread_work *work) /* Lock queue and check for queue work */ spin_lock_irqsave(&master->queue_lock, flags); if (list_empty(&master->queue) || !master->running) { - if (master->busy && master->unprepare_transfer_hardware) { - ret = master->unprepare_transfer_hardware(master); - if (ret) { - spin_unlock_irqrestore(&master->queue_lock, flags); - dev_err(&master->dev, - "failed to unprepare transfer hardware\n"); - return; - } + if (!master->busy) { + spin_unlock_irqrestore(&master->queue_lock, flags); + return; } master->busy = false; spin_unlock_irqrestore(&master->queue_lock, flags); + if (master->unprepare_transfer_hardware && + master->unprepare_transfer_hardware(master)) + dev_err(&master->dev, + "failed to unprepare transfer hardware\n"); return; } @@ -984,7 +983,7 @@ static void acpi_register_spi_devices(struct spi_master *master) acpi_status status; acpi_handle handle; - handle = ACPI_HANDLE(&master->dev); + handle = ACPI_HANDLE(master->dev.parent); if (!handle) return; diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index 4c0f6d883dd3..7b0bce936762 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -675,3 +675,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc) return 0; } } + +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid) +{ + u32 pmu_ctl = 0; + + switch (cc->dev->bus->chip_id) { + case 0x4322: + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070); + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a); + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854); + if (spuravoid == 1) + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828); + else + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828); + pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD; + break; + case 43222: + /* TODO: BCM43222 requires updating PLLs too */ + return; + default: + ssb_printk(KERN_ERR PFX + "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", + cc->dev->bus->chip_id); + return; + } + + chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl); +} +EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index ff1c5ee352cb..cbe48ab41745 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -409,6 +409,7 @@ static inline int core_alua_state_standby( case REPORT_LUNS: case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: + return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: @@ -451,6 +452,7 @@ static inline int core_alua_state_unavailable( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: + return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: @@ -491,6 +493,7 @@ static inline int core_alua_state_transition( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: + return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 484b6a3c9b03..302909ccf183 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev, mxvar_sdriver, brd->idx + i, &pdev->dev); if (IS_ERR(tty_dev)) { retval = PTR_ERR(tty_dev); - for (i--; i >= 0; i--) + for (; i > 0; i--) tty_unregister_device(mxvar_sdriver, - brd->idx + i); + brd->idx + i - 1); goto err_relbrd; } } @@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void) tty_dev = tty_port_register_device(&brd->ports[i].port, mxvar_sdriver, brd->idx + i, NULL); if (IS_ERR(tty_dev)) { - for (i--; i >= 0; i--) + for (; i > 0; i--) tty_unregister_device(mxvar_sdriver, - brd->idx + i); + brd->idx + i - 1); for (i = 0; i < brd->info->nports; i++) tty_port_destroy(&brd->ports[i].port); free_irq(brd->irq, brd); diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c index b3455a970a1d..35d9ab95c5cb 100644 --- a/drivers/tty/serial/8250/8250_pnp.c +++ b/drivers/tty/serial/8250/8250_pnp.c @@ -429,7 +429,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { struct uart_8250_port uart; int ret, line, flags = dev_id->driver_data; - struct resource *res = NULL; if (flags & UNKNOWN_DEV) { ret = serial_pnp_guess_board(dev); @@ -440,12 +439,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) memset(&uart, 0, sizeof(uart)); if (pnp_irq_valid(dev, 0)) uart.port.irq = pnp_irq(dev, 0); - if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) - res = pnp_get_resource(dev, IORESOURCE_IO, 2); - else if (pnp_port_valid(dev, 0)) - res = pnp_get_resource(dev, IORESOURCE_IO, 0); - if (pnp_resource_enabled(res)) { - uart.port.iobase = res->start; + if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) { + uart.port.iobase = pnp_port_start(dev, 2); + uart.port.iotype = UPIO_PORT; + } else if (pnp_port_valid(dev, 0)) { + uart.port.iobase = pnp_port_start(dev, 0); uart.port.iotype = UPIO_PORT; } else if (pnp_mem_valid(dev, 0)) { uart.port.mapbase = pnp_mem_start(dev, 0); diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 4dc41408ecb7..30d4f7a783cd 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -886,6 +886,17 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); /* FIFO ENABLE, DMA MODE */ + up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK; + /* + * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK + * sets Enables the granularity of 1 for TRIGGER RX + * level. Along with setting RX FIFO trigger level + * to 1 (as noted below, 16 characters) and TLR[3:0] + * to zero this will result RX FIFO threshold level + * to 1 character, instead of 16 as noted in comment + * below. + */ + /* Set receive FIFO threshold to 16 characters and * transmit FIFO threshold to 16 spaces */ diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 05400acbc456..b0452688308c 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -941,6 +941,14 @@ void start_tty(struct tty_struct *tty) EXPORT_SYMBOL(start_tty); +static void tty_update_time(struct timespec *time) +{ + unsigned long sec = get_seconds(); + sec -= sec % 60; + if ((long)(sec - time->tv_sec) > 0) + time->tv_sec = sec; +} + /** * tty_read - read method for tty device files * @file: pointer to tty file @@ -960,10 +968,11 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int i; + struct inode *inode = file_inode(file); struct tty_struct *tty = file_tty(file); struct tty_ldisc *ld; - if (tty_paranoia_check(tty, file_inode(file), "tty_read")) + if (tty_paranoia_check(tty, inode, "tty_read")) return -EIO; if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) return -EIO; @@ -977,6 +986,9 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, i = -EIO; tty_ldisc_deref(ld); + if (i > 0) + tty_update_time(&inode->i_atime); + return i; } @@ -1077,8 +1089,10 @@ static inline ssize_t do_tty_write( break; cond_resched(); } - if (written) + if (written) { + tty_update_time(&file_inode(file)->i_mtime); ret = written; + } out: tty_write_unlock(tty); return ret; diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 797f9d514732..65d4e55552c6 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -67,7 +67,6 @@ static void usb_port_device_release(struct device *dev) { struct usb_port *port_dev = to_usb_port(dev); - dev_pm_qos_hide_flags(dev); kfree(port_dev); } diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 8189cb6a86af..7abc5c81af2c 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -346,6 +346,7 @@ static long vfio_pci_ioctl(void *device_data, if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { size_t size; + int max = vfio_pci_get_irq_count(vdev, hdr.index); if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) size = sizeof(uint8_t); @@ -355,7 +356,7 @@ static long vfio_pci_ioctl(void *device_data, return -EINVAL; if (hdr.argsz - minsz < hdr.count * size || - hdr.count > vfio_pci_get_irq_count(vdev, hdr.index)) + hdr.start >= max || hdr.start + hdr.count > max) return -EINVAL; data = memdup_user((void __user *)(arg + minsz), diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 2968b4934659..957a0b98a5d9 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -74,9 +74,8 @@ enum { struct vhost_scsi { /* Protected by vhost_scsi->dev.mutex */ - struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; + struct tcm_vhost_tpg **vs_tpg; char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; - bool vs_endpoint; struct vhost_dev dev; struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; @@ -579,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work) } } +static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, + struct vhost_virtqueue *vq, int head, unsigned out) +{ + struct virtio_scsi_cmd_resp __user *resp; + struct virtio_scsi_cmd_resp rsp; + int ret; + + memset(&rsp, 0, sizeof(rsp)); + rsp.response = VIRTIO_SCSI_S_BAD_TARGET; + resp = vq->iov[out].iov_base; + ret = __copy_to_user(resp, &rsp, sizeof(rsp)); + if (!ret) + vhost_add_used_and_signal(&vs->dev, vq, head, 0); + else + pr_err("Faulted on virtio_scsi_cmd_resp\n"); +} + static void vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) { + struct tcm_vhost_tpg **vs_tpg; struct virtio_scsi_cmd_req v_req; struct tcm_vhost_tpg *tv_tpg; struct tcm_vhost_cmd *tv_cmd; @@ -590,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, int head, ret; u8 target; - /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ - if (unlikely(!vs->vs_endpoint)) + /* + * We can handle the vq only after the endpoint is setup by calling the + * VHOST_SCSI_SET_ENDPOINT ioctl. + * + * TODO: Check that we are running from vhost_worker which acts + * as read-side critical section for vhost kind of RCU. + * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h + */ + vs_tpg = rcu_dereference_check(vq->private_data, 1); + if (!vs_tpg) return; mutex_lock(&vq->mutex); @@ -661,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, /* Extract the tpgt */ target = v_req.lun[1]; - tv_tpg = vs->vs_tpg[target]; + tv_tpg = ACCESS_ONCE(vs_tpg[target]); /* Target does not exist, fail the request */ if (unlikely(!tv_tpg)) { - struct virtio_scsi_cmd_resp __user *resp; - struct virtio_scsi_cmd_resp rsp; - - memset(&rsp, 0, sizeof(rsp)); - rsp.response = VIRTIO_SCSI_S_BAD_TARGET; - resp = vq->iov[out].iov_base; - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); - if (!ret) - vhost_add_used_and_signal(&vs->dev, - vq, head, 0); - else - pr_err("Faulted on virtio_scsi_cmd_resp\n"); - + vhost_scsi_send_bad_target(vs, vq, head, out); continue; } @@ -690,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, if (IS_ERR(tv_cmd)) { vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", PTR_ERR(tv_cmd)); - break; + goto err_cmd; } pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" ": %d\n", tv_cmd, exp_data_len, data_direction); tv_cmd->tvc_vhost = vs; tv_cmd->tvc_vq = vq; - - if (unlikely(vq->iov[out].iov_len != - sizeof(struct virtio_scsi_cmd_resp))) { - vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" - " bytes, out: %d, in: %d\n", - vq->iov[out].iov_len, out, in); - break; - } - tv_cmd->tvc_resp = vq->iov[out].iov_base; /* @@ -725,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(tv_cmd->tvc_cdb), TCM_VHOST_MAX_CDB_SIZE); - break; /* TODO */ + goto err_free; } tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; @@ -738,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, data_direction == DMA_TO_DEVICE); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); - break; /* TODO */ + goto err_free; } } @@ -759,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, } mutex_unlock(&vq->mutex); + return; + +err_free: + vhost_scsi_free_cmd(tv_cmd); +err_cmd: + vhost_scsi_send_bad_target(vs, vq, head, out); + mutex_unlock(&vq->mutex); } static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) @@ -780,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work) vhost_scsi_handle_vq(vs, vq); } +static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) +{ + vhost_poll_flush(&vs->dev.vqs[index].poll); +} + +static void vhost_scsi_flush(struct vhost_scsi *vs) +{ + int i; + + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) + vhost_scsi_flush_vq(vs, i); + vhost_work_flush(&vs->dev, &vs->vs_completion_work); +} + /* * Called from vhost_scsi_ioctl() context to walk the list of available * tcm_vhost_tpg with an active struct tcm_vhost_nexus @@ -790,8 +815,10 @@ static int vhost_scsi_set_endpoint( { struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tv_tpg; + struct tcm_vhost_tpg **vs_tpg; + struct vhost_virtqueue *vq; + int index, ret, i, len; bool match = false; - int index, ret; mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ @@ -803,6 +830,15 @@ static int vhost_scsi_set_endpoint( } } + len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; + vs_tpg = kzalloc(len, GFP_KERNEL); + if (!vs_tpg) { + mutex_unlock(&vs->dev.mutex); + return -ENOMEM; + } + if (vs->vs_tpg) + memcpy(vs_tpg, vs->vs_tpg, len); + mutex_lock(&tcm_vhost_mutex); list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { mutex_lock(&tv_tpg->tv_tpg_mutex); @@ -817,14 +853,15 @@ static int vhost_scsi_set_endpoint( tv_tport = tv_tpg->tport; if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { - if (vs->vs_tpg[tv_tpg->tport_tpgt]) { + if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { mutex_unlock(&tv_tpg->tv_tpg_mutex); mutex_unlock(&tcm_vhost_mutex); mutex_unlock(&vs->dev.mutex); + kfree(vs_tpg); return -EEXIST; } tv_tpg->tv_tpg_vhost_count++; - vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; + vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; smp_mb__after_atomic_inc(); match = true; } @@ -835,12 +872,27 @@ static int vhost_scsi_set_endpoint( if (match) { memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, sizeof(vs->vs_vhost_wwpn)); - vs->vs_endpoint = true; + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { + vq = &vs->vqs[i]; + /* Flushing the vhost_work acts as synchronize_rcu */ + mutex_lock(&vq->mutex); + rcu_assign_pointer(vq->private_data, vs_tpg); + vhost_init_used(vq); + mutex_unlock(&vq->mutex); + } ret = 0; } else { ret = -EEXIST; } + /* + * Act as synchronize_rcu to make sure access to + * old vs->vs_tpg is finished. + */ + vhost_scsi_flush(vs); + kfree(vs->vs_tpg); + vs->vs_tpg = vs_tpg; + mutex_unlock(&vs->dev.mutex); return ret; } @@ -851,6 +903,8 @@ static int vhost_scsi_clear_endpoint( { struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tv_tpg; + struct vhost_virtqueue *vq; + bool match = false; int index, ret, i; u8 target; @@ -862,9 +916,14 @@ static int vhost_scsi_clear_endpoint( goto err_dev; } } + + if (!vs->vs_tpg) { + mutex_unlock(&vs->dev.mutex); + return 0; + } + for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { target = i; - tv_tpg = vs->vs_tpg[target]; if (!tv_tpg) continue; @@ -886,10 +945,27 @@ static int vhost_scsi_clear_endpoint( } tv_tpg->tv_tpg_vhost_count--; vs->vs_tpg[target] = NULL; - vs->vs_endpoint = false; + match = true; mutex_unlock(&tv_tpg->tv_tpg_mutex); } + if (match) { + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { + vq = &vs->vqs[i]; + /* Flushing the vhost_work acts as synchronize_rcu */ + mutex_lock(&vq->mutex); + rcu_assign_pointer(vq->private_data, NULL); + mutex_unlock(&vq->mutex); + } + } + /* + * Act as synchronize_rcu to make sure access to + * old vs->vs_tpg is finished. + */ + vhost_scsi_flush(vs); + kfree(vs->vs_tpg); + vs->vs_tpg = NULL; mutex_unlock(&vs->dev.mutex); + return 0; err_tpg: @@ -899,6 +975,24 @@ err_dev: return ret; } +static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) +{ + if (features & ~VHOST_SCSI_FEATURES) + return -EOPNOTSUPP; + + mutex_lock(&vs->dev.mutex); + if ((features & (1 << VHOST_F_LOG_ALL)) && + !vhost_log_access_ok(&vs->dev)) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + vs->dev.acked_features = features; + smp_wmb(); + vhost_scsi_flush(vs); + mutex_unlock(&vs->dev.mutex); + return 0; +} + static int vhost_scsi_open(struct inode *inode, struct file *f) { struct vhost_scsi *s; @@ -939,38 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) return 0; } -static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) -{ - vhost_poll_flush(&vs->dev.vqs[index].poll); -} - -static void vhost_scsi_flush(struct vhost_scsi *vs) -{ - int i; - - for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) - vhost_scsi_flush_vq(vs, i); - vhost_work_flush(&vs->dev, &vs->vs_completion_work); -} - -static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) -{ - if (features & ~VHOST_SCSI_FEATURES) - return -EOPNOTSUPP; - - mutex_lock(&vs->dev.mutex); - if ((features & (1 << VHOST_F_LOG_ALL)) && - !vhost_log_access_ok(&vs->dev)) { - mutex_unlock(&vs->dev.mutex); - return -EFAULT; - } - vs->dev.acked_features = features; - smp_wmb(); - vhost_scsi_flush(vs); - mutex_unlock(&vs->dev.mutex); - return 0; -} - static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 7c254084b6a0..86291dcd964a 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1373,15 +1373,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) { struct fb_info *info = file_fb_info(file); struct fb_ops *fb; - unsigned long off; + unsigned long mmio_pgoff; unsigned long start; u32 len; if (!info) return -ENODEV; - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) - return -EINVAL; - off = vma->vm_pgoff << PAGE_SHIFT; fb = info->fbops; if (!fb) return -ENODEV; @@ -1393,32 +1390,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) return res; } - /* frame buffer memory */ + /* + * Ugh. This can be either the frame buffer mapping, or + * if pgoff points past it, the mmio mapping. + */ start = info->fix.smem_start; - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); - if (off >= len) { - /* memory mapped io */ - off -= len; - if (info->var.accel_flags) { - mutex_unlock(&info->mm_lock); - return -EINVAL; - } + len = info->fix.smem_len; + mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; + if (vma->vm_pgoff >= mmio_pgoff) { + vma->vm_pgoff -= mmio_pgoff; start = info->fix.mmio_start; - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); + len = info->fix.mmio_len; } mutex_unlock(&info->mm_lock); - start &= PAGE_MASK; - if ((vma->vm_end - vma->vm_start + off) > len) - return -EINVAL; - off += start; - vma->vm_pgoff = off >> PAGE_SHIFT; - /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - fb_pgprotect(file, vma, off); - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return -EAGAIN; - return 0; + fb_pgprotect(file, vma, start); + + return vm_iomap_memory(vma, start, len); } static int diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c index 94ad0f71383c..7f6709991a5c 100644 --- a/drivers/video/fbmon.c +++ b/drivers/video/fbmon.c @@ -1400,7 +1400,7 @@ int fb_videomode_from_videomode(const struct videomode *vm, fbmode->vmode = 0; if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; - if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) + if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH) fbmode->sync |= FB_SYNC_VERT_HIGH_ACT; if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) fbmode->vmode |= FB_VMODE_INTERLACED; diff --git a/drivers/video/mmp/core.c b/drivers/video/mmp/core.c index 9ed83419038b..84de2632857a 100644 --- a/drivers/video/mmp/core.c +++ b/drivers/video/mmp/core.c @@ -252,7 +252,5 @@ void mmp_unregister_path(struct mmp_path *path) kfree(path); mutex_unlock(&disp_lock); - - dev_info(path->dev, "de-register %s\n", path->name); } EXPORT_SYMBOL_GPL(mmp_unregister_path); diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 63203acef812..0264704a52be 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -858,6 +858,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16) | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7); lcdc_write_chan(ch, LDHAJR, tmp); + lcdc_write_chan_mirror(ch, LDHAJR, tmp); } static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index b75db0186488..d4284458377e 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -1973,7 +1973,8 @@ static int uvesafb_init(void) err = -ENOMEM; if (err) { - platform_device_put(uvesafb_device); + if (uvesafb_device) + platform_device_put(uvesafb_device); platform_driver_unregister(&uvesafb_driver); cn_del_callback(&uvesafb_cn_id); return err; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 9fcc70c11cea..e89fc3133972 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG config AT91RM9200_WATCHDOG tristate "AT91RM9200 watchdog" - depends on ARCH_AT91 + depends on ARCH_AT91RM9200 help Watchdog timer embedded into AT91RM9200 chips. This will reboot your system when the timeout is reached. diff --git a/drivers/xen/events.c b/drivers/xen/events.c index aa85881d17b2..2647ad8e1f19 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -1316,7 +1316,7 @@ static void __xen_evtchn_do_upcall(void) { int start_word_idx, start_bit_idx; int word_idx, bit_idx; - int i; + int i, irq; int cpu = get_cpu(); struct shared_info *s = HYPERVISOR_shared_info; struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); @@ -1324,6 +1324,8 @@ static void __xen_evtchn_do_upcall(void) do { xen_ulong_t pending_words; + xen_ulong_t pending_bits; + struct irq_desc *desc; vcpu_info->evtchn_upcall_pending = 0; @@ -1335,6 +1337,17 @@ static void __xen_evtchn_do_upcall(void) * selector flag. xchg_xen_ulong must contain an * appropriate barrier. */ + if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) { + int evtchn = evtchn_from_irq(irq); + word_idx = evtchn / BITS_PER_LONG; + pending_bits = evtchn % BITS_PER_LONG; + if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) { + desc = irq_to_desc(irq); + if (desc) + generic_handle_irq_desc(irq, desc); + } + } + pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); start_word_idx = __this_cpu_read(current_word_idx); @@ -1343,7 +1356,6 @@ static void __xen_evtchn_do_upcall(void) word_idx = start_word_idx; for (i = 0; pending_words != 0; i++) { - xen_ulong_t pending_bits; xen_ulong_t words; words = MASK_LSBS(pending_words, word_idx); @@ -1372,8 +1384,7 @@ static void __xen_evtchn_do_upcall(void) do { xen_ulong_t bits; - int port, irq; - struct irq_desc *desc; + int port; bits = MASK_LSBS(pending_bits, bit_idx); |