diff options
Diffstat (limited to 'drivers')
151 files changed, 5712 insertions, 746 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 652fd5ce303c..cab13f2fc28e 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -164,15 +164,24 @@ static int acpi_lpss_create_device(struct acpi_device *adev, if (dev_desc->clk_required) { ret = register_device_clock(adev, pdata); if (ret) { - /* - * Skip the device, but don't terminate the namespace - * scan. - */ - kfree(pdata); - return 0; + /* Skip the device, but continue the namespace scan. */ + ret = 0; + goto err_out; } } + /* + * This works around a known issue in ACPI tables where LPSS devices + * have _PS0 and _PS3 without _PSC (and no power resources), so + * acpi_bus_init_power() will assume that the BIOS has put them into D0. + */ + ret = acpi_device_fix_up_power(adev); + if (ret) { + /* Skip the device, but continue the namespace scan. */ + ret = 0; + goto err_out; + } + adev->driver_data = pdata; ret = acpi_create_platform_device(adev, id); if (ret > 0) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 318fa32a141e..31c217a42839 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -290,6 +290,26 @@ int acpi_bus_init_power(struct acpi_device *device) return 0; } +/** + * acpi_device_fix_up_power - Force device with missing _PSC into D0. + * @device: Device object whose power state is to be fixed up. + * + * Devices without power resources and _PSC, but having _PS0 and _PS3 defined, + * are assumed to be put into D0 by the BIOS. However, in some cases that may + * not be the case and this function should be used then. + */ +int acpi_device_fix_up_power(struct acpi_device *device) +{ + int ret = 0; + + if (!device->power.flags.power_resources + && !device->power.flags.explicit_get + && device->power.state == ACPI_STATE_D0) + ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); + + return ret; +} + int acpi_bus_update_power(acpi_handle handle, int *state_p) { struct acpi_device *device; diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 4fdea381ef21..14de9f46972e 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -66,20 +66,21 @@ struct dock_station { spinlock_t dd_lock; struct mutex hp_lock; struct list_head dependent_devices; - struct list_head hotplug_devices; struct list_head sibling; struct platform_device *dock_device; }; static LIST_HEAD(dock_stations); static int dock_station_count; +static DEFINE_MUTEX(hotplug_lock); struct dock_dependent_device { struct list_head list; - struct list_head hotplug_list; acpi_handle handle; - const struct acpi_dock_ops *ops; - void *context; + const struct acpi_dock_ops *hp_ops; + void *hp_context; + unsigned int hp_refcount; + void (*hp_release)(void *); }; #define DOCK_DOCKING 0x00000001 @@ -111,7 +112,6 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle) dd->handle = handle; INIT_LIST_HEAD(&dd->list); - INIT_LIST_HEAD(&dd->hotplug_list); spin_lock(&ds->dd_lock); list_add_tail(&dd->list, &ds->dependent_devices); @@ -121,35 +121,90 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle) } /** - * dock_add_hotplug_device - associate a hotplug handler with the dock station - * @ds: The dock station - * @dd: The dependent device struct - * - * Add the dependent device to the dock's hotplug device list + * dock_init_hotplug - Initialize a hotplug device on a docking station. + * @dd: Dock-dependent device. + * @ops: Dock operations to attach to the dependent device. + * @context: Data to pass to the @ops callbacks and @release. + * @init: Optional initialization routine to run after setting up context. + * @release: Optional release routine to run on removal. */ -static void -dock_add_hotplug_device(struct dock_station *ds, - struct dock_dependent_device *dd) +static int dock_init_hotplug(struct dock_dependent_device *dd, + const struct acpi_dock_ops *ops, void *context, + void (*init)(void *), void (*release)(void *)) { - mutex_lock(&ds->hp_lock); - list_add_tail(&dd->hotplug_list, &ds->hotplug_devices); - mutex_unlock(&ds->hp_lock); + int ret = 0; + + mutex_lock(&hotplug_lock); + + if (dd->hp_context) { + ret = -EEXIST; + } else { + dd->hp_refcount = 1; + dd->hp_ops = ops; + dd->hp_context = context; + dd->hp_release = release; + } + + if (!WARN_ON(ret) && init) + init(context); + + mutex_unlock(&hotplug_lock); + return ret; } /** - * dock_del_hotplug_device - remove a hotplug handler from the dock station - * @ds: The dock station - * @dd: the dependent device struct + * dock_release_hotplug - Decrement hotplug reference counter of dock device. + * @dd: Dock-dependent device. * - * Delete the dependent device from the dock's hotplug device list + * Decrement the reference counter of @dd and if 0, detach its hotplug + * operations from it, reset its context pointer and run the optional release + * routine if present. */ -static void -dock_del_hotplug_device(struct dock_station *ds, - struct dock_dependent_device *dd) +static void dock_release_hotplug(struct dock_dependent_device *dd) { - mutex_lock(&ds->hp_lock); - list_del(&dd->hotplug_list); - mutex_unlock(&ds->hp_lock); + void (*release)(void *) = NULL; + void *context = NULL; + + mutex_lock(&hotplug_lock); + + if (dd->hp_context && !--dd->hp_refcount) { + dd->hp_ops = NULL; + context = dd->hp_context; + dd->hp_context = NULL; + release = dd->hp_release; + dd->hp_release = NULL; + } + + if (release && context) + release(context); + + mutex_unlock(&hotplug_lock); +} + +static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event, + bool uevent) +{ + acpi_notify_handler cb = NULL; + bool run = false; + + mutex_lock(&hotplug_lock); + + if (dd->hp_context) { + run = true; + dd->hp_refcount++; + if (dd->hp_ops) + cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler; + } + + mutex_unlock(&hotplug_lock); + + if (!run) + return; + + if (cb) + cb(dd->handle, event, dd->hp_context); + + dock_release_hotplug(dd); } /** @@ -360,9 +415,8 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event) /* * First call driver specific hotplug functions */ - list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) - if (dd->ops && dd->ops->handler) - dd->ops->handler(dd->handle, event, dd->context); + list_for_each_entry(dd, &ds->dependent_devices, list) + dock_hotplug_event(dd, event, false); /* * Now make sure that an acpi_device is created for each @@ -398,9 +452,8 @@ static void dock_event(struct dock_station *ds, u32 event, int num) if (num == DOCK_EVENT) kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); - list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) - if (dd->ops && dd->ops->uevent) - dd->ops->uevent(dd->handle, event, dd->context); + list_for_each_entry(dd, &ds->dependent_devices, list) + dock_hotplug_event(dd, event, true); if (num != DOCK_EVENT) kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); @@ -570,19 +623,24 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier); * @handle: the handle of the device * @ops: handlers to call after docking * @context: device specific data + * @init: Optional initialization routine to run after registration + * @release: Optional release routine to run on unregistration * * If a driver would like to perform a hotplug operation after a dock * event, they can register an acpi_notifiy_handler to be called by * the dock driver after _DCK is executed. */ -int -register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, - void *context) +int register_hotplug_dock_device(acpi_handle handle, + const struct acpi_dock_ops *ops, void *context, + void (*init)(void *), void (*release)(void *)) { struct dock_dependent_device *dd; struct dock_station *dock_station; int ret = -EINVAL; + if (WARN_ON(!context)) + return -EINVAL; + if (!dock_station_count) return -ENODEV; @@ -597,12 +655,8 @@ register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops * ops */ dd = find_dock_dependent_device(dock_station, handle); - if (dd) { - dd->ops = ops; - dd->context = context; - dock_add_hotplug_device(dock_station, dd); + if (dd && !dock_init_hotplug(dd, ops, context, init, release)) ret = 0; - } } return ret; @@ -624,7 +678,7 @@ void unregister_hotplug_dock_device(acpi_handle handle) list_for_each_entry(dock_station, &dock_stations, sibling) { dd = find_dock_dependent_device(dock_station, handle); if (dd) - dock_del_hotplug_device(dock_station, dd); + dock_release_hotplug(dd); } } EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device); @@ -868,8 +922,10 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr, if (!count) return -EINVAL; + acpi_scan_lock_acquire(); begin_undock(dock_station); ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST); + acpi_scan_lock_release(); return ret ? ret: count; } static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock); @@ -951,7 +1007,6 @@ static int __init dock_add(acpi_handle handle) mutex_init(&dock_station->hp_lock); spin_lock_init(&dock_station->dd_lock); INIT_LIST_HEAD(&dock_station->sibling); - INIT_LIST_HEAD(&dock_station->hotplug_devices); ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); INIT_LIST_HEAD(&dock_station->dependent_devices); @@ -992,30 +1047,6 @@ err_unregister: } /** - * dock_remove - free up resources related to the dock station - */ -static int dock_remove(struct dock_station *ds) -{ - struct dock_dependent_device *dd, *tmp; - struct platform_device *dock_device = ds->dock_device; - - if (!dock_station_count) - return 0; - - /* remove dependent devices */ - list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list) - kfree(dd); - - list_del(&ds->sibling); - - /* cleanup sysfs */ - sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group); - platform_device_unregister(dock_device); - - return 0; -} - -/** * find_dock_and_bay - look for dock stations and bays * @handle: acpi handle of a device * @lvl: unused @@ -1033,7 +1064,7 @@ find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; } -static int __init dock_init(void) +int __init acpi_dock_init(void) { if (acpi_disabled) return 0; @@ -1052,19 +1083,3 @@ static int __init dock_init(void) ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); return 0; } - -static void __exit dock_exit(void) -{ - struct dock_station *tmp, *dock_station; - - unregister_acpi_bus_notifier(&dock_acpi_notifier); - list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling) - dock_remove(dock_station); -} - -/* - * Must be called before drivers of devices in dock, otherwise we can't know - * which devices are in a dock - */ -subsys_initcall(dock_init); -module_exit(dock_exit); diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 297cbf456f86..c610a76d92c4 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -40,6 +40,11 @@ void acpi_container_init(void); #else static inline void acpi_container_init(void) {} #endif +#ifdef CONFIG_ACPI_DOCK +void acpi_dock_init(void); +#else +static inline void acpi_dock_init(void) {} +#endif #ifdef CONFIG_ACPI_HOTPLUG_MEMORY void acpi_memory_hotplug_init(void); #else diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index f962047c6c85..288bb270f8ed 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -885,6 +885,7 @@ int acpi_add_power_resource(acpi_handle handle) ACPI_STA_DEFAULT); mutex_init(&resource->resource_lock); INIT_LIST_HEAD(&resource->dependent); + INIT_LIST_HEAD(&resource->list_node); resource->name = device->pnp.bus_id; strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_CLASS); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index a3868f6c222a..3322b47ab7ca 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -304,7 +304,8 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi) } static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, - u8 triggering, u8 polarity, u8 shareable) + u8 triggering, u8 polarity, u8 shareable, + bool legacy) { int irq, p, t; @@ -317,14 +318,19 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, * In IO-APIC mode, use overrided attribute. Two reasons: * 1. BIOS bug in DSDT * 2. BIOS uses IO-APIC mode Interrupt Source Override + * + * We do this only if we are dealing with IRQ() or IRQNoFlags() + * resource (the legacy ISA resources). With modern ACPI 5 devices + * using extended IRQ descriptors we take the IRQ configuration + * from _CRS directly. */ - if (!acpi_get_override_irq(gsi, &t, &p)) { + if (legacy && !acpi_get_override_irq(gsi, &t, &p)) { u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; if (triggering != trig || polarity != pol) { pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, - t ? "edge" : "level", p ? "low" : "high"); + t ? "level" : "edge", p ? "low" : "high"); triggering = trig; polarity = pol; } @@ -373,7 +379,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, } acpi_dev_get_irqresource(res, irq->interrupts[index], irq->triggering, irq->polarity, - irq->sharable); + irq->sharable, true); break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: ext_irq = &ares->data.extended_irq; @@ -383,7 +389,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, } acpi_dev_get_irqresource(res, ext_irq->interrupts[index], ext_irq->triggering, ext_irq->polarity, - ext_irq->sharable); + ext_irq->sharable, false); break; default: return false; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index b14ac46948c9..27da63061e11 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2042,6 +2042,7 @@ int __init acpi_scan_init(void) acpi_lpss_init(); acpi_container_init(); acpi_memory_hotplug_init(); + acpi_dock_init(); mutex_lock(&acpi_scan_lock); /* diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 87f2f395d79a..cf4e7020adac 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -156,8 +156,10 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, spin_unlock_irqrestore(ap->lock, flags); - if (wait) + if (wait) { ata_port_wait_eh(ap); + flush_work(&ap->hotplug_task.work); + } } static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data) @@ -214,6 +216,39 @@ static const struct acpi_dock_ops ata_acpi_ap_dock_ops = { .uevent = ata_acpi_ap_uevent, }; +void ata_acpi_hotplug_init(struct ata_host *host) +{ + int i; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + acpi_handle handle; + struct ata_device *dev; + + if (!ap) + continue; + + handle = ata_ap_acpi_handle(ap); + if (handle) { + /* we might be on a docking station */ + register_hotplug_dock_device(handle, + &ata_acpi_ap_dock_ops, ap, + NULL, NULL); + } + + ata_for_each_dev(dev, &ap->link, ALL) { + handle = ata_dev_acpi_handle(dev); + if (!handle) + continue; + + /* we might be on a docking station */ + register_hotplug_dock_device(handle, + &ata_acpi_dev_dock_ops, + dev, NULL, NULL); + } + } +} + /** * ata_acpi_dissociate - dissociate ATA host from ACPI objects * @host: target ATA host diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index f2184276539d..adf002a3c584 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -6148,6 +6148,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) if (rc) goto err_tadd; + ata_acpi_hotplug_init(host); + /* set cable, sata_spd_limit and report */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index c949dd311b2e..577d902bc4de 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -122,6 +122,7 @@ extern int ata_acpi_register(void); extern void ata_acpi_unregister(void); extern void ata_acpi_bind(struct ata_device *dev); extern void ata_acpi_unbind(struct ata_device *dev); +extern void ata_acpi_hotplug_init(struct ata_host *host); #else static inline void ata_acpi_dissociate(struct ata_host *host) { } static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } @@ -134,6 +135,7 @@ static inline int ata_acpi_register(void) { return 0; } static inline void ata_acpi_unregister(void) { } static inline void ata_acpi_bind(struct ata_device *dev) { } static inline void ata_acpi_unbind(struct ata_device *dev) { } +static inline void ata_acpi_hotplug_init(struct ata_host *host) {} #endif /* libata-scsi.c */ diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 4b1f9265887f..01e21037d8fe 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -450,8 +450,18 @@ static void fw_load_abort(struct firmware_priv *fw_priv) { struct firmware_buf *buf = fw_priv->buf; + /* + * There is a small window in which user can write to 'loading' + * between loading done and disappearance of 'loading' + */ + if (test_bit(FW_STATUS_DONE, &buf->status)) + return; + set_bit(FW_STATUS_ABORT, &buf->status); complete_all(&buf->completion); + + /* avoid user action after loading abort */ + fw_priv->buf = NULL; } #define is_fw_load_aborted(buf) \ @@ -528,7 +538,12 @@ static ssize_t firmware_loading_show(struct device *dev, struct device_attribute *attr, char *buf) { struct firmware_priv *fw_priv = to_firmware_priv(dev); - int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); + int loading = 0; + + mutex_lock(&fw_lock); + if (fw_priv->buf) + loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); + mutex_unlock(&fw_lock); return sprintf(buf, "%d\n", loading); } @@ -570,12 +585,12 @@ static ssize_t firmware_loading_store(struct device *dev, const char *buf, size_t count) { struct firmware_priv *fw_priv = to_firmware_priv(dev); - struct firmware_buf *fw_buf = fw_priv->buf; + struct firmware_buf *fw_buf; int loading = simple_strtol(buf, NULL, 10); int i; mutex_lock(&fw_lock); - + fw_buf = fw_priv->buf; if (!fw_buf) goto out; @@ -777,10 +792,6 @@ static void firmware_class_timeout_work(struct work_struct *work) struct firmware_priv, timeout_work.work); mutex_lock(&fw_lock); - if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) { - mutex_unlock(&fw_lock); - return; - } fw_load_abort(fw_priv); mutex_unlock(&fw_lock); } @@ -861,8 +872,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, cancel_delayed_work_sync(&fw_priv->timeout_work); - fw_priv->buf = NULL; - device_remove_file(f_dev, &dev_attr_loading); err_del_bin_attr: device_remove_bin_file(f_dev, &firmware_attr_data); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3063452e55da..aff789d6fccd 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1036,12 +1036,16 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) char *name; u64 segment; int ret; + char *name_format; name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); if (!name) return NULL; segment = offset >> rbd_dev->header.obj_order; - ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx", + name_format = "%s.%012llx"; + if (rbd_dev->image_format == 2) + name_format = "%s.%016llx"; + ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format, rbd_dev->header.object_prefix, segment); if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) { pr_err("error formatting segment name for #%llu (%d)\n", @@ -2248,13 +2252,17 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, obj_request->pages, length, offset & ~PAGE_MASK, false, false); + /* + * set obj_request->img_request before formatting + * the osd_request so that it gets the right snapc + */ + rbd_img_obj_request_add(img_request, obj_request); if (write_request) rbd_osd_req_format_write(obj_request); else rbd_osd_req_format_read(obj_request); obj_request->img_offset = img_offset; - rbd_img_obj_request_add(img_request, obj_request); img_offset += length; resid -= length; @@ -4239,6 +4247,10 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) down_write(&rbd_dev->header_rwsem); + ret = rbd_dev_v2_image_size(rbd_dev); + if (ret) + goto out; + if (first_time) { ret = rbd_dev_v2_header_onetime(rbd_dev); if (ret) @@ -4272,10 +4284,6 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) "is EXPERIMENTAL!"); } - ret = rbd_dev_v2_image_size(rbd_dev); - if (ret) - goto out; - if (rbd_dev->spec->snap_id == CEPH_NOSNAP) if (rbd_dev->mapping.size != rbd_dev->header.image_size) rbd_dev->mapping.size = rbd_dev->header.image_size; diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index 3a4343b3bd6d..9a9f51875df5 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -498,6 +498,10 @@ static int btmrvl_service_main_thread(void *data) add_wait_queue(&thread->wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) { + BT_DBG("main_thread: break from main thread"); + break; + } if (adapter->wakeup_tries || ((!adapter->int_count) && @@ -513,11 +517,6 @@ static int btmrvl_service_main_thread(void *data) BT_DBG("main_thread woke up"); - if (kthread_should_stop()) { - BT_DBG("main_thread: break from main thread"); - break; - } - spin_lock_irqsave(&priv->driver_lock, flags); if (adapter->int_count) { adapter->int_count = 0; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 934cfd18f72d..1144e8c7579d 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1955,6 +1955,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) /* XXX the notifier code should handle this better */ if (!cn->notifier_head.head) { srcu_cleanup_notifier_head(&cn->notifier_head); + list_del(&cn->node); kfree(cn); } diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index 5c97e75924a8..22d7699e7ced 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -155,7 +155,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = { /* list of all parent clock list */ PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; -PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", }; +PNAME(mout_cpu_p) = { "mout_apll", "sclk_mpll", }; PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" }; PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" }; PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" }; @@ -208,10 +208,10 @@ struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = { }; struct samsung_mux_clock exynos5250_mux_clks[] __initdata = { - MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1), - MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1), + MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"), + MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"), MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1), - MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1), + MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"), MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1), MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1), MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1), @@ -378,7 +378,7 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), - GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0), + GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0), diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index 89135f6be116..362f12dcd944 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -111,7 +111,8 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw); - u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1; + u32 mdiv, pdiv, sdiv, pll_con0, pll_con1; + s16 kdiv; u64 fvco = parent_rate; pll_con0 = __raw_readl(pll->con_reg); @@ -119,7 +120,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw, mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK; pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK; sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK; - kdiv = pll_con1 & PLL36XX_KDIV_MASK; + kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK); fvco *= (mdiv << 16) + kdiv; do_div(fvco, (pdiv << sdiv)); diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c index f9ec43fd1320..080c3c5e33f6 100644 --- a/drivers/clk/spear/spear3xx_clock.c +++ b/drivers/clk/spear/spear3xx_clock.c @@ -369,7 +369,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base) clk_register_clkdev(clk, NULL, "60100000.serial"); } #else -static inline void spear320_clk_init(void) { } +static inline void spear320_clk_init(void __iomem *soc_config_base) { } #endif void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base) diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index c6921f538e28..ba99e3844106 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -1598,6 +1598,12 @@ static void __init tegra30_periph_clk_init(void) clk_register_clkdev(clk, "afi", "tegra-pcie"); clks[afi] = clk; + /* pciex */ + clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0, + 74, &periph_u_regs, periph_clk_enb_refcnt); + clk_register_clkdev(clk, "pciex", "tegra-pcie"); + clks[pciex] = clk; + /* kfuse */ clk = tegra_clk_register_periph_gate("kfuse", "clk_m", TEGRA_PERIPH_ON_APB, @@ -1716,11 +1722,6 @@ static void __init tegra30_fixed_clk_init(void) 1, 0, &cml_lock); clk_register_clkdev(clk, "cml1", NULL); clks[cml1] = clk; - - /* pciex */ - clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000); - clk_register_clkdev(clk, "pciex", NULL); - clks[pciex] = clk; } static void __init tegra30_osc_clk_init(void) diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 4b9bb5def6f1..93eb5cbcc1f6 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -47,6 +47,8 @@ static struct od_ops od_ops; static struct cpufreq_governor cpufreq_gov_ondemand; #endif +static unsigned int default_powersave_bias; + static void ondemand_powersave_bias_init_cpu(int cpu) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); @@ -543,7 +545,7 @@ static int od_init(struct dbs_data *dbs_data) tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; tuners->ignore_nice = 0; - tuners->powersave_bias = 0; + tuners->powersave_bias = default_powersave_bias; tuners->io_is_busy = should_io_be_busy(); dbs_data->tuners = tuners; @@ -585,6 +587,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias) unsigned int cpu; cpumask_t done; + default_powersave_bias = powersave_bias; cpumask_clear(&done); get_online_cpus(); @@ -593,11 +596,17 @@ static void od_set_powersave_bias(unsigned int powersave_bias) continue; policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; - dbs_data = policy->governor_data; - od_tuners = dbs_data->tuners; - od_tuners->powersave_bias = powersave_bias; + if (!policy) + continue; cpumask_or(&done, &done, policy->cpus); + + if (policy->governor != &cpufreq_gov_ondemand) + continue; + + dbs_data = policy->governor_data; + od_tuners = dbs_data->tuners; + od_tuners->powersave_bias = default_powersave_bias; } put_online_cpus(); } diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index d3f7d2db870f..4a430360af5a 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -1094,6 +1094,9 @@ static int omap_gpio_probe(struct platform_device *pdev) const struct omap_gpio_platform_data *pdata; struct resource *res; struct gpio_bank *bank; +#ifdef CONFIG_ARCH_OMAP1 + int irq_base; +#endif match = of_match_device(of_match_ptr(omap_gpio_match), dev); @@ -1135,11 +1138,28 @@ static int omap_gpio_probe(struct platform_device *pdev) pdata->get_context_loss_count; } +#ifdef CONFIG_ARCH_OMAP1 + /* + * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop + * irq_alloc_descs() and irq_domain_add_legacy() and just use a + * linear IRQ domain mapping for all OMAP platforms. + */ + irq_base = irq_alloc_descs(-1, 0, bank->width, 0); + if (irq_base < 0) { + dev_err(dev, "Couldn't allocate IRQ numbers\n"); + return -ENODEV; + } + bank->domain = irq_domain_add_legacy(node, bank->width, irq_base, + 0, &irq_domain_simple_ops, NULL); +#else bank->domain = irq_domain_add_linear(node, bank->width, &irq_domain_simple_ops, NULL); - if (!bank->domain) +#endif + if (!bank->domain) { + dev_err(dev, "Couldn't register an IRQ domain\n"); return -ENODEV; + } if (bank->regs->set_dataout && bank->regs->clr_dataout) bank->set_dataout = _set_gpio_dataout_reg; diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index dcde35231e25..5b7b9110254b 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, if (ret) return ERR_PTR(ret); } - return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, - 0600); + return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); } EXPORT_SYMBOL(drm_gem_prime_export); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b9d00dcf9a2d..9669a0b8b440 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1697,6 +1697,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags); +void i915_gem_restore_fences(struct drm_device *dev); + /* i915_gem_context.c */ void i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 970ad17c99ab..9e35dafc5807 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1801,7 +1801,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; gfp &= ~(__GFP_IO | __GFP_WAIT); } - +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + st->nents++; + sg_set_page(sg, page, PAGE_SIZE, 0); + sg = sg_next(sg); + continue; + } +#endif if (!i || page_to_pfn(page) != last_pfn + 1) { if (i) sg = sg_next(sg); @@ -1812,8 +1819,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) } last_pfn = page_to_pfn(page); } - - sg_mark_end(sg); +#ifdef CONFIG_SWIOTLB + if (!swiotlb_nr_tbl()) +#endif + sg_mark_end(sg); obj->pages = st; if (i915_gem_object_needs_bit17_swizzle(obj)) @@ -2117,25 +2126,15 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, } } -static void i915_gem_reset_fences(struct drm_device *dev) +void i915_gem_restore_fences(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; - - if (reg->obj) - i915_gem_object_fence_lost(reg->obj); - - i915_gem_write_fence(dev, i, NULL); - - reg->pin_count = 0; - reg->obj = NULL; - INIT_LIST_HEAD(®->lru_list); + i915_gem_write_fence(dev, i, reg->obj); } - - INIT_LIST_HEAD(&dev_priv->mm.fence_list); } void i915_gem_reset(struct drm_device *dev) @@ -2158,8 +2157,7 @@ void i915_gem_reset(struct drm_device *dev) obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } - /* The fence registers are invalidated so clear them out */ - i915_gem_reset_fences(dev); + i915_gem_restore_fences(dev); } /** @@ -3865,8 +3863,6 @@ i915_gem_idle(struct drm_device *dev) if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_gem_evict_everything(dev); - i915_gem_reset_fences(dev); - /* Hack! Don't let anybody do execbuf while we don't control the chip. * We need to replace this with a semaphore, or something. * And not confound mm.suspended! @@ -4193,7 +4189,8 @@ i915_gem_load(struct drm_device *dev) dev_priv->num_fence_regs = 8; /* Initialize fence registers to zero */ - i915_gem_reset_fences(dev); + INIT_LIST_HEAD(&dev_priv->mm.fence_list); + i915_gem_restore_fences(dev); i915_gem_detect_bit_6_swizzle(dev); init_waitqueue_head(&dev_priv->pending_flip_queue); diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 41f0fdecfbdc..369b3d8776ab 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -384,6 +384,7 @@ int i915_restore_state(struct drm_device *dev) mutex_lock(&dev->struct_mutex); + i915_gem_restore_fences(dev); i915_restore_display(dev); if (!drm_core_check_feature(dev, DRIVER_MODESET)) { diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index a4b71b25fa53..a30f29425c21 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -171,6 +171,11 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) return -EINVAL; + if (!access_ok(VERIFY_READ, + (void *)(unsigned long)user_cmd.command, + user_cmd.command_size)) + return -EFAULT; + ret = qxl_alloc_release_reserved(qdev, sizeof(union qxl_release_info) + user_cmd.command_size, diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0e5341695922..6948eb88c2b7 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2687,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev) int r600_uvd_init(struct radeon_device *rdev) { int i, j, r; + /* disable byte swapping */ + u32 lmi_swap_cntl = 0; + u32 mp_swap_cntl = 0; /* raise clocks while booting up the VCPU */ radeon_set_uvd_clocks(rdev, 53300, 40000); @@ -2711,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev) WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | (1 << 21) | (1 << 9) | (1 << 20)); - /* disable byte swapping */ - WREG32(UVD_LMI_SWAP_CNTL, 0); - WREG32(UVD_MP_SWAP_CNTL, 0); +#ifdef __BIG_ENDIAN + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; + mp_swap_cntl = 0; +#endif + WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl); + WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl); WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); WREG32(UVD_MPC_SET_MUXA1, 0x0); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 189973836cff..b0dc0b6cb4e0 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) */ void radeon_wb_disable(struct radeon_device *rdev) { - int r; - - if (rdev->wb.wb_obj) { - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) - return; - radeon_bo_kunmap(rdev->wb.wb_obj); - radeon_bo_unpin(rdev->wb.wb_obj); - radeon_bo_unreserve(rdev->wb.wb_obj); - } rdev->wb.enabled = false; } @@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev) { radeon_wb_disable(rdev); if (rdev->wb.wb_obj) { + if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { + radeon_bo_kunmap(rdev->wb.wb_obj); + radeon_bo_unpin(rdev->wb.wb_obj); + radeon_bo_unreserve(rdev->wb.wb_obj); + } radeon_bo_unref(&rdev->wb.wb_obj); rdev->wb.wb = NULL; rdev->wb.wb_obj = NULL; @@ -295,26 +290,26 @@ int radeon_wb_init(struct radeon_device *rdev) dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); return r; } - } - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) { - radeon_wb_fini(rdev); - return r; - } - r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); - if (r) { + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) { + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, + &rdev->wb.gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->wb.wb_obj); + dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); radeon_bo_unreserve(rdev->wb.wb_obj); - dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); - radeon_wb_fini(rdev); - return r; - } - r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); - radeon_bo_unreserve(rdev->wb.wb_obj); - if (r) { - dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); - radeon_wb_fini(rdev); - return r; + if (r) { + dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; + } } /* clear wb memory */ diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 5b937dfe6f65..ddb8f8e04eb5 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) { struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; if (likely(rdev->wb.enabled || !drv->scratch_reg)) { - *drv->cpu_addr = cpu_to_le32(seq); + if (drv->cpu_addr) { + *drv->cpu_addr = cpu_to_le32(seq); + } } else { WREG32(drv->scratch_reg, seq); } @@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) u32 seq = 0; if (likely(rdev->wb.enabled || !drv->scratch_reg)) { - seq = le32_to_cpu(*drv->cpu_addr); + if (drv->cpu_addr) { + seq = le32_to_cpu(*drv->cpu_addr); + } else { + seq = lower_32_bits(atomic64_read(&drv->last_seq)); + } } else { seq = RREG32(drv->scratch_reg); } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 2c1341f63dc5..43ec4a401f07 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, int radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_bo_va *bo_va) { - int r; + int r = 0; mutex_lock(&rdev->vm_manager.lock); mutex_lock(&bo_va->vm->mutex); - r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); + if (bo_va->soffset) { + r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); + } mutex_unlock(&rdev->vm_manager.lock); list_del(&bo_va->vm_list); mutex_unlock(&bo_va->vm->mutex); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index e17faa7cf732..82434018cbe8 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi return -ENOMEM; /* Align requested size with padding so unlock_commit can * pad safely */ + radeon_ring_free_size(rdev, ring); + if (ring->ring_free_dw == (ring->ring_size / 4)) { + /* This is an empty ring update lockup info to avoid + * false positive. + */ + radeon_ring_lockup_update(ring); + } ndw = (ndw + ring->align_mask) & ~ring->align_mask; while (ndw > (ring->ring_free_dw - 1)) { radeon_ring_free_size(rdev, ring); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 906e5c0ca3b9..cad735dd02c6 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev) if (!r) { radeon_bo_kunmap(rdev->uvd.vcpu_bo); radeon_bo_unpin(rdev->uvd.vcpu_bo); + rdev->uvd.cpu_addr = NULL; + if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) { + radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); + } radeon_bo_unreserve(rdev->uvd.vcpu_bo); + + if (rdev->uvd.cpu_addr) { + radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); + } else { + rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL; + } } return r; } @@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev) return r; } + /* Have been pin in cpu unmap unpin */ + radeon_bo_kunmap(rdev->uvd.vcpu_bo); + radeon_bo_unpin(rdev->uvd.vcpu_bo); + r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, &rdev->uvd.gpu_addr); if (r) { @@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, } /* stitch together an UVD create msg */ - msg[0] = 0x00000de4; - msg[1] = 0x00000000; - msg[2] = handle; - msg[3] = 0x00000000; - msg[4] = 0x00000000; - msg[5] = 0x00000000; - msg[6] = 0x00000000; - msg[7] = 0x00000780; - msg[8] = 0x00000440; - msg[9] = 0x00000000; - msg[10] = 0x01b37000; + msg[0] = cpu_to_le32(0x00000de4); + msg[1] = cpu_to_le32(0x00000000); + msg[2] = cpu_to_le32(handle); + msg[3] = cpu_to_le32(0x00000000); + msg[4] = cpu_to_le32(0x00000000); + msg[5] = cpu_to_le32(0x00000000); + msg[6] = cpu_to_le32(0x00000000); + msg[7] = cpu_to_le32(0x00000780); + msg[8] = cpu_to_le32(0x00000440); + msg[9] = cpu_to_le32(0x00000000); + msg[10] = cpu_to_le32(0x01b37000); for (i = 11; i < 1024; ++i) - msg[i] = 0x0; + msg[i] = cpu_to_le32(0x0); radeon_bo_kunmap(bo); radeon_bo_unreserve(bo); @@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, } /* stitch together an UVD destroy msg */ - msg[0] = 0x00000de4; - msg[1] = 0x00000002; - msg[2] = handle; - msg[3] = 0x00000000; + msg[0] = cpu_to_le32(0x00000de4); + msg[1] = cpu_to_le32(0x00000002); + msg[2] = cpu_to_le32(handle); + msg[3] = cpu_to_le32(0x00000000); for (i = 4; i < 1024; ++i) - msg[i] = 0x0; + msg[i] = cpu_to_le32(0x0); radeon_bo_kunmap(bo); radeon_bo_unreserve(bo); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index d6cbfe9df218..fa061d46527f 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -137,7 +137,7 @@ static const struct xpad_device { { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, - { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 }, + { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 62a2c0e4cc99..7ac9c9818d55 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -431,6 +431,7 @@ config KEYBOARD_TEGRA config KEYBOARD_OPENCORES tristate "OpenCores Keyboard Controller" + depends on HAS_IOMEM help Say Y here if you want to use the OpenCores Keyboard Controller http://www.opencores.org/project,keyboardcontroller diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index aebfe3ecb945..1bda828f4b55 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig @@ -205,6 +205,7 @@ config SERIO_XILINX_XPS_PS2 config SERIO_ALTERA_PS2 tristate "Altera UP PS/2 controller" + depends on HAS_IOMEM help Say Y here if you have Altera University Program PS/2 ports. diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 518282da6d85..384fbcd0cee0 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -363,6 +363,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ case 0x160802: /* Cintiq 13HD Pro Pen */ case 0x180802: /* DTH2242 Pen */ + case 0x100802: /* Intuos4/5 13HD/24HD General Pen */ wacom->tool[idx] = BTN_TOOL_PEN; break; @@ -401,6 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ case 0x18080a: /* DTH2242 Eraser */ + case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */ wacom->tool[idx] = BTN_TOOL_RUBBER; break; diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c index 8e60437ac85b..ae89d2609ab0 100644 --- a/drivers/input/touchscreen/cyttsp_core.c +++ b/drivers/input/touchscreen/cyttsp_core.c @@ -116,6 +116,15 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd) return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); } +static int cyttsp_handshake(struct cyttsp *ts) +{ + if (ts->pdata->use_hndshk) + return ttsp_send_command(ts, + ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); + + return 0; +} + static int cyttsp_load_bl_regs(struct cyttsp *ts) { memset(&ts->bl_data, 0, sizeof(ts->bl_data)); @@ -133,7 +142,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts) memcpy(bl_cmd, bl_command, sizeof(bl_command)); if (ts->pdata->bl_keys) memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], - ts->pdata->bl_keys, sizeof(bl_command)); + ts->pdata->bl_keys, CY_NUM_BL_KEYS); error = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(bl_cmd), bl_cmd); @@ -167,6 +176,10 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts) if (error) return error; + error = cyttsp_handshake(ts); + if (error) + return error; + return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; } @@ -188,6 +201,10 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts) if (error) return error; + error = cyttsp_handshake(ts); + if (error) + return error; + if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) return -EIO; @@ -344,12 +361,9 @@ static irqreturn_t cyttsp_irq(int irq, void *handle) goto out; /* provide flow control handshake */ - if (ts->pdata->use_hndshk) { - error = ttsp_send_command(ts, - ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); - if (error) - goto out; - } + error = cyttsp_handshake(ts); + if (error) + goto out; if (unlikely(ts->state == CY_IDLE_STATE)) goto out; diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h index 1aa3c6967e70..f1ebde369f86 100644 --- a/drivers/input/touchscreen/cyttsp_core.h +++ b/drivers/input/touchscreen/cyttsp_core.h @@ -67,8 +67,8 @@ struct cyttsp_xydata { /* TTSP System Information interface definition */ struct cyttsp_sysinfo_data { u8 hst_mode; - u8 mfg_cmd; u8 mfg_stat; + u8 mfg_cmd; u8 cid[3]; u8 tt_undef1; u8 uid[8]; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 1760ceb68b7b..19ceaa60e0f4 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d, static int __cpuinit gic_secondary_init(struct notifier_block *nfb, unsigned long action, void *hcpu) { - if (action == CPU_STARTING) + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) gic_cpu_init(&gic_data[0]); return NOTIFY_OK; } diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 7f5a7cac6dc7..8270388e2a0d 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -136,9 +136,9 @@ config DVB_NET # This Kconfig option is used by both PCI and USB drivers config TTPCI_EEPROM - tristate - depends on I2C - default n + tristate + depends on I2C + default n source "drivers/media/dvb-core/Kconfig" @@ -189,6 +189,12 @@ config MEDIA_SUBDRV_AUTOSELECT If unsure say Y. +config MEDIA_ATTACH + bool + depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT + depends on MODULES + default MODULES + source "drivers/media/i2c/Kconfig" source "drivers/media/tuners/Kconfig" source "drivers/media/dvb-frontends/Kconfig" diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index cb52438e53ac..9eac5310942f 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -956,7 +956,7 @@ static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd, if (fie->pad != OIF_SOURCE_PAD) return -EINVAL; - if (fie->index > ARRAY_SIZE(s5c73m3_intervals)) + if (fie->index >= ARRAY_SIZE(s5c73m3_intervals)) return -EINVAL; mutex_lock(&state->lock); diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index 27d62623274b..aba5b1c649e6 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -615,7 +615,7 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol, int changed = 0; u32 old; - if (core->board.audio_chip == V4L2_IDENT_WM8775) + if (core->sd_wm8775) snd_cx88_wm8775_volume_put(kcontrol, value); left = value->value.integer.value[0] & 0x3f; @@ -682,8 +682,7 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol, vol ^= bit; cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol); /* Pass mute onto any WM8775 */ - if ((core->board.audio_chip == V4L2_IDENT_WM8775) && - ((1<<6) == bit)) + if (core->sd_wm8775 && ((1<<6) == bit)) wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit)); ret = 1; } @@ -903,7 +902,7 @@ static int cx88_audio_initdev(struct pci_dev *pci, goto error; /* If there's a wm8775 then add a Line-In ALC switch */ - if (core->board.audio_chip == V4L2_IDENT_WM8775) + if (core->sd_wm8775) snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip)); strcpy (card->driver, "CX88x"); diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 1b00615fd395..c7a9be1065c0 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -385,8 +385,7 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input) /* The wm8775 module has the "2" route hardwired into the initialization. Some boards may use different routes for different inputs. HVR-1300 surely does */ - if (core->board.audio_chip && - core->board.audio_chip == V4L2_IDENT_WM8775) { + if (core->sd_wm8775) { call_all(core, audio, s_routing, INPUT(input).audioroute, 0, 0); } @@ -771,8 +770,7 @@ static int video_open(struct file *file) cx_write(MO_GP1_IO, core->board.radio.gpio1); cx_write(MO_GP2_IO, core->board.radio.gpio2); if (core->board.radio.audioroute) { - if(core->board.audio_chip && - core->board.audio_chip == V4L2_IDENT_WM8775) { + if (core->sd_wm8775) { call_all(core, audio, s_routing, core->board.radio.audioroute, 0, 0); } @@ -959,7 +957,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl) u32 value,mask; /* Pass changes onto any WM8775 */ - if (core->board.audio_chip == V4L2_IDENT_WM8775) { + if (core->sd_wm8775) { switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: wm8775_s_ctrl(core, ctrl->id, ctrl->val); diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index 48b8d7af386d..9d1481a60bd9 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c @@ -576,6 +576,14 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); } +static int vidioc_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create) +{ + struct coda_ctx *ctx = fh_to_ctx(priv); + + return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create); +} + static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { @@ -610,6 +618,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = { .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, + .vidioc_create_bufs = vidioc_create_bufs, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index 1802f11e939f..d0b375cf565f 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -916,6 +916,21 @@ static int vpbe_display_s_fmt(struct file *file, void *priv, other video window */ layer->pix_fmt = *pixfmt; + if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) { + struct vpbe_layer *otherlayer; + + otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer); + /* if other layer is available, only + * claim it, do not configure it + */ + ret = osd_device->ops.request_layer(osd_device, + otherlayer->layer_info.id); + if (ret < 0) { + v4l2_err(&vpbe_dev->v4l2_dev, + "Display Manager failed to allocate layer\n"); + return -EBUSY; + } + } /* Get osd layer config */ osd_device->ops.get_layer_config(osd_device, diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index 8c50d3074866..93609091cb23 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -1837,7 +1837,7 @@ static int vpfe_probe(struct platform_device *pdev) if (NULL == ccdc_cfg) { v4l2_err(pdev->dev.driver, "Memory allocation failed for ccdc_cfg\n"); - goto probe_free_lock; + goto probe_free_dev_mem; } mutex_lock(&ccdc_lock); @@ -1991,7 +1991,6 @@ probe_out_release_irq: free_irq(vpfe_dev->ccdc_irq0, vpfe_dev); probe_free_ccdc_cfg_mem: kfree(ccdc_cfg); -probe_free_lock: mutex_unlock(&ccdc_lock); probe_free_dev_mem: kfree(vpfe_dev); diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c index b0ff67bc1b05..d05eaa2c8490 100644 --- a/drivers/media/platform/exynos4-is/fimc-is-regs.c +++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c @@ -174,7 +174,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is) HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO, }; - if (WARN_ON(is->config_index > ARRAY_SIZE(cmd))) + if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd))) return -EINVAL; mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0)); diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index 47c6363d04e2..0741945b79ed 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -48,7 +48,6 @@ static char *fimc_is_clocks[ISS_CLKS_MAX] = { [ISS_CLK_LITE0] = "lite0", [ISS_CLK_LITE1] = "lite1", [ISS_CLK_MPLL] = "mpll", - [ISS_CLK_SYSREG] = "sysreg", [ISS_CLK_ISP] = "isp", [ISS_CLK_DRC] = "drc", [ISS_CLK_FD] = "fd", @@ -71,7 +70,6 @@ static void fimc_is_put_clocks(struct fimc_is *is) for (i = 0; i < ISS_CLKS_MAX; i++) { if (IS_ERR(is->clocks[i])) continue; - clk_unprepare(is->clocks[i]); clk_put(is->clocks[i]); is->clocks[i] = ERR_PTR(-EINVAL); } @@ -90,12 +88,6 @@ static int fimc_is_get_clocks(struct fimc_is *is) ret = PTR_ERR(is->clocks[i]); goto err; } - ret = clk_prepare(is->clocks[i]); - if (ret < 0) { - clk_put(is->clocks[i]); - is->clocks[i] = ERR_PTR(-EINVAL); - goto err; - } } return 0; @@ -103,7 +95,7 @@ err: fimc_is_put_clocks(is); dev_err(&is->pdev->dev, "failed to get clock: %s\n", fimc_is_clocks[i]); - return -ENXIO; + return ret; } static int fimc_is_setup_clocks(struct fimc_is *is) @@ -144,7 +136,7 @@ int fimc_is_enable_clocks(struct fimc_is *is) for (i = 0; i < ISS_GATE_CLKS_MAX; i++) { if (IS_ERR(is->clocks[i])) continue; - ret = clk_enable(is->clocks[i]); + ret = clk_prepare_enable(is->clocks[i]); if (ret < 0) { dev_err(&is->pdev->dev, "clock %s enable failed\n", fimc_is_clocks[i]); @@ -163,7 +155,7 @@ void fimc_is_disable_clocks(struct fimc_is *is) for (i = 0; i < ISS_GATE_CLKS_MAX; i++) { if (!IS_ERR(is->clocks[i])) { - clk_disable(is->clocks[i]); + clk_disable_unprepare(is->clocks[i]); pr_debug("disabled clock: %s\n", fimc_is_clocks[i]); } } @@ -326,6 +318,11 @@ int fimc_is_start_firmware(struct fimc_is *is) struct device *dev = &is->pdev->dev; int ret; + if (is->fw.f_w == NULL) { + dev_err(dev, "firmware is not loaded\n"); + return -EINVAL; + } + memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size); wmb(); @@ -837,23 +834,11 @@ static int fimc_is_probe(struct platform_device *pdev) goto err_clk; } pm_runtime_enable(dev); - /* - * Enable only the ISP power domain, keep FIMC-IS clocks off until - * the whole clock tree is configured. The ISP power domain needs - * be active in order to acces any CMU_ISP clock registers. - */ - ret = pm_runtime_get_sync(dev); - if (ret < 0) - goto err_irq; - - ret = fimc_is_setup_clocks(is); - pm_runtime_put_sync(dev); + ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_irq; - is->clk_init = true; - is->alloc_ctx = vb2_dma_contig_init_ctx(dev); if (IS_ERR(is->alloc_ctx)) { ret = PTR_ERR(is->alloc_ctx); @@ -875,6 +860,8 @@ static int fimc_is_probe(struct platform_device *pdev) if (ret < 0) goto err_dfs; + pm_runtime_put_sync(dev); + dev_dbg(dev, "FIMC-IS registered successfully\n"); return 0; @@ -894,9 +881,11 @@ err_clk: static int fimc_is_runtime_resume(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); + int ret; - if (!is->clk_init) - return 0; + ret = fimc_is_setup_clocks(is); + if (ret) + return ret; return fimc_is_enable_clocks(is); } @@ -905,9 +894,7 @@ static int fimc_is_runtime_suspend(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); - if (is->clk_init) - fimc_is_disable_clocks(is); - + fimc_is_disable_clocks(is); return 0; } @@ -941,7 +928,8 @@ static int fimc_is_remove(struct platform_device *pdev) vb2_dma_contig_cleanup_ctx(is->alloc_ctx); fimc_is_put_clocks(is); fimc_is_debugfs_remove(is); - release_firmware(is->fw.f_w); + if (is->fw.f_w) + release_firmware(is->fw.f_w); fimc_is_free_cpu_memory(is); return 0; diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h index f5275a5b0156..d7db133b493f 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.h +++ b/drivers/media/platform/exynos4-is/fimc-is.h @@ -73,7 +73,6 @@ enum { ISS_CLK_LITE0, ISS_CLK_LITE1, ISS_CLK_MPLL, - ISS_CLK_SYSREG, ISS_CLK_ISP, ISS_CLK_DRC, ISS_CLK_FD, @@ -265,7 +264,6 @@ struct fimc_is { spinlock_t slock; struct clk *clocks[ISS_CLKS_MAX]; - bool clk_init; void __iomem *regs; void __iomem *pmu_regs; int irq; diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c index d63947f7b302..7ede30b5910f 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp.c +++ b/drivers/media/platform/exynos4-is/fimc-isp.c @@ -138,7 +138,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd, return 0; } - mf->colorspace = V4L2_COLORSPACE_JPEG; + mf->colorspace = V4L2_COLORSPACE_SRGB; mutex_lock(&isp->subdev_lock); __is_get_frame_size(is, &cur_fmt); @@ -194,7 +194,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd, v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n", __func__, fmt->pad, mf->code, mf->width, mf->height); - mf->colorspace = V4L2_COLORSPACE_JPEG; + mf->colorspace = V4L2_COLORSPACE_SRGB; mutex_lock(&isp->subdev_lock); __isp_subdev_try_format(isp, fmt); diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c index a2eda9d5ac87..254d70fe762a 100644 --- a/drivers/media/platform/exynos4-is/mipi-csis.c +++ b/drivers/media/platform/exynos4-is/mipi-csis.c @@ -746,7 +746,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev, node = v4l2_of_get_next_endpoint(node, NULL); if (!node) { dev_err(&pdev->dev, "No port node at %s\n", - node->full_name); + pdev->dev.of_node->full_name); return -EINVAL; } /* Get port node and validate MIPI-CSI channel id. */ diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h index 261134baa655..35d2fcdc0036 100644 --- a/drivers/media/platform/s3c-camif/camif-core.h +++ b/drivers/media/platform/s3c-camif/camif-core.h @@ -229,7 +229,7 @@ struct camif_vp { unsigned int state; u16 fmt_flags; u8 id; - u8 rotation; + u16 rotation; u8 hflip; u8 vflip; unsigned int offset; diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/s5p-jpeg/Makefile index ddc2900d88a2..d18cb5edd2d5 100644 --- a/drivers/media/platform/s5p-jpeg/Makefile +++ b/drivers/media/platform/s5p-jpeg/Makefile @@ -1,2 +1,2 @@ s5p-jpeg-objs := jpeg-core.o -obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o +obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o diff --git a/drivers/media/platform/s5p-mfc/Makefile b/drivers/media/platform/s5p-mfc/Makefile index 379008c6d09a..15f59b324fef 100644 --- a/drivers/media/platform/s5p-mfc/Makefile +++ b/drivers/media/platform/s5p-mfc/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o +obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 01f9ae0dadb0..d12faa691af8 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -397,7 +397,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, leave_handle_frame: spin_unlock_irqrestore(&dev->irqlock, flags); if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING) - || ctx->dst_queue_cnt < ctx->dpb_count) + || ctx->dst_queue_cnt < ctx->pb_count) clear_work_bit(ctx); s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); wake_up_ctx(ctx, reason, err); @@ -473,7 +473,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx, s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx); - ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count, + ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count, dev); ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count, dev); @@ -562,7 +562,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx, struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *mb_entry; - mfc_debug(2, "Stream completed"); + mfc_debug(2, "Stream completed\n"); s5p_mfc_clear_int_flags(dev); ctx->int_type = reason; @@ -1362,7 +1362,6 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = { .port_num = MFC_NUM_PORTS, .buf_size = &buf_size_v5, .buf_align = &mfc_buf_align_v5, - .mclk_name = "sclk_mfc", .fw_name = "s5p-mfc.fw", }; @@ -1389,7 +1388,6 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = { .port_num = MFC_NUM_PORTS_V6, .buf_size = &buf_size_v6, .buf_align = &mfc_buf_align_v6, - .mclk_name = "aclk_333", .fw_name = "s5p-mfc-v6.fw", }; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index 202d1d7a37a8..ef4074cd5316 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h @@ -138,6 +138,7 @@ enum s5p_mfc_inst_state { MFCINST_INIT = 100, MFCINST_GOT_INST, MFCINST_HEAD_PARSED, + MFCINST_HEAD_PRODUCED, MFCINST_BUFS_SET, MFCINST_RUNNING, MFCINST_FINISHING, @@ -231,7 +232,6 @@ struct s5p_mfc_variant { unsigned int port_num; struct s5p_mfc_buf_size *buf_size; struct s5p_mfc_buf_align *buf_align; - char *mclk_name; char *fw_name; }; @@ -438,7 +438,7 @@ struct s5p_mfc_enc_params { u32 rc_framerate_num; u32 rc_framerate_denom; - union { + struct { struct s5p_mfc_h264_enc_params h264; struct s5p_mfc_mpeg4_enc_params mpeg4; } codec; @@ -602,7 +602,7 @@ struct s5p_mfc_ctx { int after_packed_pb; int sei_fp_parse; - int dpb_count; + int pb_count; int total_dpb_count; int mv_count; /* Buffers */ diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index 2e5f30b40dea..dc1fc94a488d 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -38,7 +38,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev) dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size, &dev->bank1, GFP_KERNEL); - if (IS_ERR(dev->fw_virt_addr)) { + if (IS_ERR_OR_NULL(dev->fw_virt_addr)) { dev->fw_virt_addr = NULL; mfc_err("Allocating bitprocessor buffer failed\n"); return -ENOMEM; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h index bd5cd4ae993c..8e608f5aa0d7 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h @@ -30,8 +30,8 @@ extern int debug; #define mfc_debug(level, fmt, args...) #endif -#define mfc_debug_enter() mfc_debug(5, "enter") -#define mfc_debug_leave() mfc_debug(5, "leave") +#define mfc_debug_enter() mfc_debug(5, "enter\n") +#define mfc_debug_leave() mfc_debug(5, "leave\n") #define mfc_err(fmt, args...) \ do { \ diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 4af53bd2f182..00b07032f4f0 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -210,11 +210,11 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) /* Context is to decode a frame */ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_RUNNING && - ctx->dst_queue_cnt >= ctx->dpb_count) + ctx->dst_queue_cnt >= ctx->pb_count) return 1; /* Context is to return last frame */ if (ctx->state == MFCINST_FINISHING && - ctx->dst_queue_cnt >= ctx->dpb_count) + ctx->dst_queue_cnt >= ctx->pb_count) return 1; /* Context is to set buffers */ if (ctx->src_queue_cnt >= 1 && @@ -224,7 +224,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) /* Resolution change */ if ((ctx->state == MFCINST_RES_CHANGE_INIT || ctx->state == MFCINST_RES_CHANGE_FLUSH) && - ctx->dst_queue_cnt >= ctx->dpb_count) + ctx->dst_queue_cnt >= ctx->pb_count) return 1; if (ctx->state == MFCINST_RES_CHANGE_END && ctx->src_queue_cnt >= 1) @@ -537,7 +537,7 @@ static int vidioc_reqbufs(struct file *file, void *priv, mfc_err("vb2_reqbufs on capture failed\n"); return ret; } - if (reqbufs->count < ctx->dpb_count) { + if (reqbufs->count < ctx->pb_count) { mfc_err("Not enough buffers allocated\n"); reqbufs->count = 0; s5p_mfc_clock_on(); @@ -751,7 +751,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: if (ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { - ctrl->val = ctx->dpb_count; + ctrl->val = ctx->pb_count; break; } else if (ctx->state != MFCINST_INIT) { v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); @@ -763,7 +763,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl) S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0); if (ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { - ctrl->val = ctx->dpb_count; + ctrl->val = ctx->pb_count; } else { v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); return -EINVAL; @@ -924,10 +924,10 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq, /* Output plane count is 2 - one for Y and one for CbCr */ *plane_count = 2; /* Setup buffer count */ - if (*buf_count < ctx->dpb_count) - *buf_count = ctx->dpb_count; - if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB) - *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB; + if (*buf_count < ctx->pb_count) + *buf_count = ctx->pb_count; + if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB) + *buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB; if (*buf_count > MFC_MAX_BUFFERS) *buf_count = MFC_MAX_BUFFERS; } else { diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 4f6b553c4b2d..2549967b2f85 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -592,7 +592,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) return 1; /* context is ready to encode a frame */ if ((ctx->state == MFCINST_RUNNING || - ctx->state == MFCINST_HEAD_PARSED) && + ctx->state == MFCINST_HEAD_PRODUCED) && ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1) return 1; /* context is ready to encode remaining frames */ @@ -649,6 +649,7 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_buf *dst_mb; unsigned long flags; + unsigned int enc_pb_count; if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) { spin_lock_irqsave(&dev->irqlock, flags); @@ -661,18 +662,19 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE); spin_unlock_irqrestore(&dev->irqlock, flags); } - if (IS_MFCV6(dev)) { - ctx->state = MFCINST_HEAD_PARSED; /* for INIT_BUFFER cmd */ - } else { + + if (!IS_MFCV6(dev)) { ctx->state = MFCINST_RUNNING; if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); - } - - if (IS_MFCV6(dev)) - ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, + } else { + enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_enc_dpb_count, dev); + if (ctx->pb_count < enc_pb_count) + ctx->pb_count = enc_pb_count; + ctx->state = MFCINST_HEAD_PRODUCED; + } return 0; } @@ -717,9 +719,9 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); - mfc_debug(2, "Encoded slice type: %d", slice_type); - mfc_debug(2, "Encoded stream size: %d", strm_size); - mfc_debug(2, "Display order: %d", + mfc_debug(2, "Encoded slice type: %d\n", slice_type); + mfc_debug(2, "Encoded stream size: %d\n", strm_size); + mfc_debug(2, "Display order: %d\n", mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT)); spin_lock_irqsave(&dev->irqlock, flags); if (slice_type >= 0) { @@ -1055,15 +1057,13 @@ static int vidioc_reqbufs(struct file *file, void *priv, } ctx->capture_state = QUEUE_BUFS_REQUESTED; - if (!IS_MFCV6(dev)) { - ret = s5p_mfc_hw_call(ctx->dev->mfc_ops, - alloc_codec_buffers, ctx); - if (ret) { - mfc_err("Failed to allocate encoding buffers\n"); - reqbufs->count = 0; - ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); - return -ENOMEM; - } + ret = s5p_mfc_hw_call(ctx->dev->mfc_ops, + alloc_codec_buffers, ctx); + if (ret) { + mfc_err("Failed to allocate encoding buffers\n"); + reqbufs->count = 0; + ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); + return -ENOMEM; } } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { if (ctx->output_state != QUEUE_FREE) { @@ -1071,6 +1071,19 @@ static int vidioc_reqbufs(struct file *file, void *priv, ctx->output_state); return -EINVAL; } + + if (IS_MFCV6(dev)) { + /* Check for min encoder buffers */ + if (ctx->pb_count && + (reqbufs->count < ctx->pb_count)) { + reqbufs->count = ctx->pb_count; + mfc_debug(2, "Minimum %d output buffers needed\n", + ctx->pb_count); + } else { + ctx->pb_count = reqbufs->count; + } + } + ret = vb2_reqbufs(&ctx->vq_src, reqbufs); if (ret != 0) { mfc_err("error in vb2_reqbufs() for E(S)\n"); @@ -1533,14 +1546,14 @@ int vidioc_encoder_cmd(struct file *file, void *priv, spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue)) { - mfc_debug(2, "EOS: empty src queue, entering finishing state"); + mfc_debug(2, "EOS: empty src queue, entering finishing state\n"); ctx->state = MFCINST_FINISHING; if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); spin_unlock_irqrestore(&dev->irqlock, flags); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } else { - mfc_debug(2, "EOS: marking last buffer of stream"); + mfc_debug(2, "EOS: marking last buffer of stream\n"); buf = list_entry(ctx->src_queue.prev, struct s5p_mfc_buf, list); if (buf->flags & MFC_BUF_FLAG_USED) @@ -1609,9 +1622,9 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb) mfc_err("failed to get plane cookie\n"); return -EINVAL; } - mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx", - vb->v4l2_buf.index, i, - vb2_dma_contig_plane_dma_addr(vb, i)); + mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx\n", + vb->v4l2_buf.index, i, + vb2_dma_contig_plane_dma_addr(vb, i)); } return 0; } @@ -1760,11 +1773,27 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count) struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; - v4l2_ctrl_handler_setup(&ctx->ctrl_handler); + if (IS_MFCV6(dev) && (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) { + + if ((ctx->state == MFCINST_GOT_INST) && + (dev->curr_ctx == ctx->num) && dev->hw_lock) { + s5p_mfc_wait_for_done_ctx(ctx, + S5P_MFC_R2H_CMD_SEQ_DONE_RET, + 0); + } + + if (ctx->src_bufs_cnt < ctx->pb_count) { + mfc_err("Need minimum %d OUTPUT buffers\n", + ctx->pb_count); + return -EINVAL; + } + } + /* If context is ready then dev = work->data;schedule it to run */ if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); + return 0; } @@ -1920,6 +1949,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx) if (controls[i].is_volatile && ctx->ctrls[i]) ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE; } + v4l2_ctrl_handler_setup(&ctx->ctrl_handler); return 0; } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c index 0af05a2d1cd4..368582b091bf 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c @@ -1275,8 +1275,8 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); - mfc_debug(2, "encoding buffer with index=%d state=%d", - src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); + mfc_debug(2, "encoding buffer with index=%d state=%d\n", + src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); s5p_mfc_encode_one_frame_v5(ctx); return 0; } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index 7e76fce2e524..66f0d042357f 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c @@ -62,12 +62,6 @@ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx) /* NOP */ } -static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev) -{ - /* NOP */ - return -1; -} - /* Allocate codec buffers */ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) { @@ -167,7 +161,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size + ctx->tmv_buffer_size + - (ctx->dpb_count * (ctx->luma_dpb_size + + (ctx->pb_count * (ctx->luma_dpb_size + ctx->chroma_dpb_size + ctx->me_buffer_size)); ctx->bank2.size = 0; break; @@ -181,7 +175,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size + ctx->tmv_buffer_size + - (ctx->dpb_count * (ctx->luma_dpb_size + + (ctx->pb_count * (ctx->luma_dpb_size + ctx->chroma_dpb_size + ctx->me_buffer_size)); ctx->bank2.size = 0; break; @@ -198,7 +192,6 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) } BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); } - return 0; } @@ -449,8 +442,8 @@ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx, WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */ WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6); - mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d", - addr, size); + mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n", + addr, size); return 0; } @@ -463,8 +456,8 @@ static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */ WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6); - mfc_debug(2, "enc src y buf addr: 0x%08lx", y_addr); - mfc_debug(2, "enc src c buf addr: 0x%08lx", c_addr); + mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr); + mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr); } static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, @@ -479,8 +472,8 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6); enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6); - mfc_debug(2, "recon y addr: 0x%08lx", enc_recon_y_addr); - mfc_debug(2, "recon c addr: 0x%08lx", enc_recon_c_addr); + mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr); + mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr); } /* Set encoding ref & codec buffer */ @@ -497,7 +490,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx) mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1); - for (i = 0; i < ctx->dpb_count; i++) { + for (i = 0; i < ctx->pb_count; i++) { WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i)); buf_addr1 += ctx->luma_dpb_size; WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i)); @@ -520,7 +513,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx) buf_size1 -= ctx->tmv_buffer_size; mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n", - buf_addr1, buf_size1, ctx->dpb_count); + buf_addr1, buf_size1, ctx->pb_count); if (buf_size1 < 0) { mfc_debug(2, "Not enough memory has been allocated.\n"); return -ENOMEM; @@ -1431,8 +1424,8 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0); src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1); - mfc_debug(2, "enc src y addr: 0x%08lx", src_y_addr); - mfc_debug(2, "enc src c addr: 0x%08lx", src_c_addr); + mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr); + mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr); s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr); @@ -1522,22 +1515,6 @@ static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx) struct s5p_mfc_dev *dev = ctx->dev; int ret; - ret = s5p_mfc_alloc_codec_buffers_v6(ctx); - if (ret) { - mfc_err("Failed to allocate encoding buffers.\n"); - return -ENOMEM; - } - - /* Header was generated now starting processing - * First set the reference frame buffers - */ - if (ctx->capture_state != QUEUE_BUFS_REQUESTED) { - mfc_err("It seems that destionation buffers were not\n" - "requested.MFC requires that header should be generated\n" - "before allocating codec buffer.\n"); - return -EAGAIN; - } - dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_set_enc_ref_buffer_v6(ctx); @@ -1582,7 +1559,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev) mfc_debug(1, "Seting new context to %p\n", ctx); /* Got context to run in ctx */ mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n", - ctx->dst_queue_cnt, ctx->dpb_count, ctx->src_queue_cnt); + ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt); mfc_debug(1, "ctx->state=%d\n", ctx->state); /* Last frame has already been sent to MFC * Now obtaining frames from MFC buffer */ @@ -1647,7 +1624,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev) case MFCINST_GOT_INST: s5p_mfc_run_init_enc(ctx); break; - case MFCINST_HEAD_PARSED: /* Only for MFC6.x */ + case MFCINST_HEAD_PRODUCED: ret = s5p_mfc_run_init_enc_buffers(ctx); break; default: @@ -1730,7 +1707,7 @@ static int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev) return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6); } -static int s5p_mfc_get_decoded_status_v6(struct s5p_mfc_dev *dev) +static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6); } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c index 6aa38a56aaf2..11d5f1dada32 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c @@ -50,19 +50,6 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev) goto err_p_ip_clk; } - pm->clock = clk_get(&dev->plat_dev->dev, dev->variant->mclk_name); - if (IS_ERR(pm->clock)) { - mfc_err("Failed to get MFC clock\n"); - ret = PTR_ERR(pm->clock); - goto err_g_ip_clk_2; - } - - ret = clk_prepare(pm->clock); - if (ret) { - mfc_err("Failed to prepare MFC clock\n"); - goto err_p_ip_clk_2; - } - atomic_set(&pm->power, 0); #ifdef CONFIG_PM_RUNTIME pm->device = &dev->plat_dev->dev; @@ -72,10 +59,6 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev) atomic_set(&clk_ref, 0); #endif return 0; -err_p_ip_clk_2: - clk_put(pm->clock); -err_g_ip_clk_2: - clk_unprepare(pm->clock_gate); err_p_ip_clk: clk_put(pm->clock_gate); err_g_ip_clk: @@ -86,8 +69,6 @@ void s5p_mfc_final_pm(struct s5p_mfc_dev *dev) { clk_unprepare(pm->clock_gate); clk_put(pm->clock_gate); - clk_unprepare(pm->clock); - clk_put(pm->clock); #ifdef CONFIG_PM_RUNTIME pm_runtime_disable(pm->device); #endif @@ -98,7 +79,7 @@ int s5p_mfc_clock_on(void) int ret; #ifdef CLK_DEBUG atomic_inc(&clk_ref); - mfc_debug(3, "+ %d", atomic_read(&clk_ref)); + mfc_debug(3, "+ %d\n", atomic_read(&clk_ref)); #endif ret = clk_enable(pm->clock_gate); return ret; @@ -108,7 +89,7 @@ void s5p_mfc_clock_off(void) { #ifdef CLK_DEBUG atomic_dec(&clk_ref); - mfc_debug(3, "- %d", atomic_read(&clk_ref)); + mfc_debug(3, "- %d\n", atomic_read(&clk_ref)); #endif clk_disable(pm->clock_gate); } diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c index 0b32cc3f6a47..59a9deefb242 100644 --- a/drivers/media/platform/sh_veu.c +++ b/drivers/media/platform/sh_veu.c @@ -905,11 +905,11 @@ static int sh_veu_queue_setup(struct vb2_queue *vq, if (ftmp.fmt.pix.width != pix->width || ftmp.fmt.pix.height != pix->height) return -EINVAL; - size = pix->bytesperline ? pix->bytesperline * pix->height : - pix->width * pix->height * fmt->depth >> 3; + size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth : + pix->width * pix->height * fmt->depth / fmt->ydepth; } else { vfmt = sh_veu_get_vfmt(veu, vq->type); - size = vfmt->bytesperline * vfmt->frame.height; + size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth; } if (count < 2) @@ -1033,8 +1033,6 @@ static int sh_veu_release(struct file *file) dev_dbg(veu->dev, "Releasing instance %p\n", veu_file); - pm_runtime_put(veu->dev); - if (veu_file == veu->capture) { veu->capture = NULL; vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)); @@ -1050,6 +1048,8 @@ static int sh_veu_release(struct file *file) veu->m2m_ctx = NULL; } + pm_runtime_put(veu->dev); + kfree(veu_file); return 0; @@ -1138,10 +1138,7 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id) veu->xaction++; - if (!veu->aborting) - return IRQ_WAKE_THREAD; - - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } static int sh_veu_probe(struct platform_device *pdev) diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index eea832c5fd01..3a4efbdc7668 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -643,9 +643,9 @@ static int soc_camera_close(struct file *file) if (ici->ops->init_videobuf2) vb2_queue_release(&icd->vb2_vidq); - ici->ops->remove(icd); - __soc_camera_power_off(icd); + + ici->ops->remove(icd); } if (icd->streamer == file) diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig index c0beee2fa37c..d529ba788f41 100644 --- a/drivers/media/radio/Kconfig +++ b/drivers/media/radio/Kconfig @@ -22,6 +22,7 @@ config RADIO_SI476X tristate "Silicon Laboratories Si476x I2C FM Radio" depends on I2C && VIDEO_V4L2 depends on MFD_SI476X_CORE + depends on SND_SOC select SND_SOC_SI476X ---help--- Choose Y here if you have this FM radio chip. diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 9430c6a29937..9dc8bafe6486 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -44,7 +44,7 @@ #define FREQ_MUL (10000000 / 625) -#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0b10000000 & (status)) +#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0x80 & (status)) #define DRIVER_NAME "si476x-radio" #define DRIVER_CARD "SI476x AM/FM Receiver" diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig index f6768cad001a..15665debc572 100644 --- a/drivers/media/tuners/Kconfig +++ b/drivers/media/tuners/Kconfig @@ -1,23 +1,3 @@ -config MEDIA_ATTACH - bool "Load and attach frontend and tuner driver modules as needed" - depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT - depends on MODULES - default y if !EXPERT - help - Remove the static dependency of DVB card drivers on all - frontend modules for all possible card variants. Instead, - allow the card drivers to only load the frontend modules - they require. - - Also, tuner module will automatically load a tuner driver - when needed, for analog mode. - - This saves several KBytes of memory. - - Note: You will need module-init-tools v3.2 or later for this feature. - - If unsure say Y. - # Analog TV tuners, auto-loaded via tuner.ko config MEDIA_TUNER tristate diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 22015fe1a0f3..2cc8ec70e3b6 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -376,7 +376,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf}; struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf}; struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; - struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf}; + struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf}; dev_dbg(&d->udev->dev, "%s:\n", __func__); @@ -481,9 +481,9 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) goto found; } - /* check R820T by reading tuner stats at I2C addr 0x1a */ + /* check R820T ID register; reg=00 val=69 */ ret = rtl28xxu_ctrl_msg(d, &req_r820t); - if (ret == 0) { + if (ret == 0 && buf[0] == 0x69) { priv->tuner = TUNER_RTL2832_R820T; priv->tuner_name = "R820T"; goto found; diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 3fe207e038c7..d7ff3b9687c5 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -1159,6 +1159,13 @@ static int sd_start(struct gspca_dev *gspca_dev) regs[0x01] = 0x44; /* Select 24 Mhz clock */ regs[0x12] = 0x02; /* Set hstart to 2 */ } + break; + case SENSOR_PAS202: + /* For some unknown reason we need to increase hstart by 1 on + the sn9c103, otherwise we get wrong colors (bayer shift). */ + if (sd->bridge == BRIDGE_103) + regs[0x12] += 1; + break; } /* Disable compression when the raw bayer format has been selected */ if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW) diff --git a/drivers/media/usb/pwc/pwc.h b/drivers/media/usb/pwc/pwc.h index 7a6a0d39c2c6..81b017a554bc 100644 --- a/drivers/media/usb/pwc/pwc.h +++ b/drivers/media/usb/pwc/pwc.h @@ -226,7 +226,7 @@ struct pwc_device struct list_head queued_bufs; spinlock_t queued_bufs_lock; /* Protects queued_bufs */ - /* Note if taking both locks v4l2_lock must always be locked first! */ + /* If taking both locks vb_queue_lock must always be locked first! */ struct mutex v4l2_lock; /* Protects everything else */ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */ diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index ebb8e48619a2..fccd08b66d1a 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1835,6 +1835,8 @@ bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl) { if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX) return true; + if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX) + return true; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: case V4L2_CID_AUDIO_VOLUME: diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index f81bda1a48ec..7658586fe5f4 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -243,7 +243,6 @@ static void v4l_print_format(const void *arg, bool write_only) const struct v4l2_vbi_format *vbi; const struct v4l2_sliced_vbi_format *sliced; const struct v4l2_window *win; - const struct v4l2_clip *clip; unsigned i; pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); @@ -253,7 +252,7 @@ static void v4l_print_format(const void *arg, bool write_only) pix = &p->fmt.pix; pr_cont(", width=%u, height=%u, " "pixelformat=%c%c%c%c, field=%s, " - "bytesperline=%u sizeimage=%u, colorspace=%d\n", + "bytesperline=%u, sizeimage=%u, colorspace=%d\n", pix->width, pix->height, (pix->pixelformat & 0xff), (pix->pixelformat >> 8) & 0xff, @@ -284,20 +283,14 @@ static void v4l_print_format(const void *arg, bool write_only) case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: win = &p->fmt.win; - pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, " - "chromakey=0x%08x, bitmap=%p, " - "global_alpha=0x%02x\n", - win->w.width, win->w.height, - win->w.left, win->w.top, + /* Note: we can't print the clip list here since the clips + * pointer is a userspace pointer, not a kernelspace + * pointer. */ + pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n", + win->w.width, win->w.height, win->w.left, win->w.top, prt_names(win->field, v4l2_field_names), - win->chromakey, win->bitmap, win->global_alpha); - clip = win->clips; - for (i = 0; i < win->clipcount; i++) { - printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n", - i, clip->c.width, clip->c.height, - clip->c.left, clip->c.top); - clip = clip->next; - } + win->chromakey, win->clipcount, win->clips, + win->bitmap, win->global_alpha); break; case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: @@ -332,7 +325,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only) pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, " "height=%u, pixelformat=%c%c%c%c, " - "bytesperline=%u sizeimage=%u, colorspace=%d\n", + "bytesperline=%u, sizeimage=%u, colorspace=%d\n", p->capability, p->flags, p->base, p->fmt.width, p->fmt.height, (p->fmt.pixelformat & 0xff), @@ -353,7 +346,7 @@ static void v4l_print_modulator(const void *arg, bool write_only) const struct v4l2_modulator *p = arg; if (write_only) - pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans); + pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans); else pr_cont("index=%u, name=%.*s, capability=0x%x, " "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n", @@ -445,13 +438,13 @@ static void v4l_print_buffer(const void *arg, bool write_only) for (i = 0; i < p->length; ++i) { plane = &p->m.planes[i]; printk(KERN_DEBUG - "plane %d: bytesused=%d, data_offset=0x%08x " + "plane %d: bytesused=%d, data_offset=0x%08x, " "offset/userptr=0x%lx, length=%d\n", i, plane->bytesused, plane->data_offset, plane->m.userptr, plane->length); } } else { - pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n", + pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n", p->bytesused, p->m.userptr, p->length); } @@ -504,6 +497,8 @@ static void v4l_print_streamparm(const void *arg, bool write_only) c->capability, c->outputmode, c->timeperframe.numerator, c->timeperframe.denominator, c->extendedmode, c->writebuffers); + } else { + pr_cont("\n"); } } @@ -734,11 +729,11 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only) p->type); switch (p->type) { case V4L2_FRMSIZE_TYPE_DISCRETE: - pr_cont(" wxh=%ux%u\n", + pr_cont(", wxh=%ux%u\n", p->discrete.width, p->discrete.height); break; case V4L2_FRMSIZE_TYPE_STEPWISE: - pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n", + pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n", p->stepwise.min_width, p->stepwise.min_height, p->stepwise.step_width, p->stepwise.step_height, p->stepwise.max_width, p->stepwise.max_height); @@ -764,12 +759,12 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only) p->width, p->height, p->type); switch (p->type) { case V4L2_FRMIVAL_TYPE_DISCRETE: - pr_cont(" fps=%d/%d\n", + pr_cont(", fps=%d/%d\n", p->discrete.numerator, p->discrete.denominator); break; case V4L2_FRMIVAL_TYPE_STEPWISE: - pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n", + pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n", p->stepwise.min.numerator, p->stepwise.min.denominator, p->stepwise.max.numerator, @@ -807,8 +802,8 @@ static void v4l_print_event(const void *arg, bool write_only) pr_cont("value64=%lld, ", c->value64); else pr_cont("value=%d, ", c->value); - pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d," - " default_value=%d\n", + pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, " + "default_value=%d\n", c->flags, c->minimum, c->maximum, c->step, c->default_value); break; @@ -845,7 +840,7 @@ static void v4l_print_freq_band(const void *arg, bool write_only) const struct v4l2_frequency_band *p = arg; pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, " - "rangelow=%u, rangehigh=%u, modulation=0x%x\n", + "rangelow=%u, rangehigh=%u, modulation=0x%x\n", p->tuner, p->type, p->index, p->capability, p->rangelow, p->rangehigh, p->modulation); diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 66f599fcb829..e96497f7c3ed 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -205,7 +205,7 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) { struct v4l2_m2m_dev *m2m_dev; - unsigned long flags_job, flags; + unsigned long flags_job, flags_out, flags_cap; m2m_dev = m2m_ctx->m2m_dev; dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); @@ -223,23 +223,26 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) return; } - spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, + flags_out); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("No input buffers available\n"); return; } - spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { - spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, + flags_cap); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, + flags_out); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("No output buffers available\n"); return; } - spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); if (m2m_dev->m2m_ops->job_ready && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { @@ -372,6 +375,20 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); /** + * v4l2_m2m_create_bufs() - create a source or destination buffer, depending + * on the type + */ +int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_create_buffers *create) +{ + struct vb2_queue *vq; + + vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); + return vb2_create_bufs(vq, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); + +/** * v4l2_m2m_expbuf() - export a source or destination buffer, depending on * the type */ @@ -486,8 +503,10 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, if (m2m_ctx->m2m_dev->m2m_ops->unlock) m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); - poll_wait(file, &src_q->done_wq, wait); - poll_wait(file, &dst_q->done_wq, wait); + if (list_empty(&src_q->done_list)) + poll_wait(file, &src_q->done_wq, wait); + if (list_empty(&dst_q->done_list)) + poll_wait(file, &dst_q->done_wq, wait); if (m2m_ctx->m2m_dev->m2m_ops->lock) m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 7d833eefaf4e..e3bdc3be91e1 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -2014,7 +2014,8 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) if (list_empty(&q->queued_list)) return res | POLLERR; - poll_wait(file, &q->done_wq, wait); + if (list_empty(&q->done_list)) + poll_wait(file, &q->done_wq, wait); /* * Take first buffer available for dequeuing. diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index 721b9186a5d1..4b93ed4d5cd6 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c @@ -107,7 +107,7 @@ static struct mfd_cell tps6586x_cell[] = { .name = "tps6586x-gpio", }, { - .name = "tps6586x-pmic", + .name = "tps6586x-regulator", }, { .name = "tps6586x-rtc", diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 02d9ae7d527e..f97569613526 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2413,7 +2413,8 @@ static void bond_miimon_commit(struct bonding *bond) pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", bond->dev->name, slave->dev->name, - slave->speed, slave->duplex ? "full" : "half"); + slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, + slave->duplex ? "full" : "half"); /* notify ad that the link status has changed */ if (bond->params.mode == BOND_MODE_8023AD) diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 6e15ef08f301..cbd388eea682 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf, err = usb_8dev_cmd_version(priv, &version); if (err) { netdev_err(netdev, "can't get firmware version\n"); - goto cleanup_cmd_msg_buffer; + goto cleanup_unregister_candev; } else { netdev_info(netdev, "firmware: %d.%d, hardware: %d.%d\n", @@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf, return 0; +cleanup_unregister_candev: + unregister_netdev(priv->netdev); + cleanup_cmd_msg_buffer: kfree(priv->cmd_msg_buffer); diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig index 36d6abd1cfff..ad6aa1e98348 100644 --- a/drivers/net/ethernet/atheros/Kconfig +++ b/drivers/net/ethernet/atheros/Kconfig @@ -67,4 +67,22 @@ config ATL1C To compile this driver as a module, choose M here. The module will be called atl1c. +config ALX + tristate "Qualcomm Atheros AR816x/AR817x support" + depends on PCI + select CRC32 + select NET_CORE + select MDIO + help + This driver supports the Qualcomm Atheros L1F ethernet adapter, + i.e. the following chipsets: + + 1969:1091 - AR8161 Gigabit Ethernet + 1969:1090 - AR8162 Fast Ethernet + 1969:10A1 - AR8171 Gigabit Ethernet + 1969:10A0 - AR8172 Fast Ethernet + + To compile this driver as a module, choose M here. The module + will be called alx. + endif # NET_VENDOR_ATHEROS diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile index e7e76fb576ff..5cf1c65bbce9 100644 --- a/drivers/net/ethernet/atheros/Makefile +++ b/drivers/net/ethernet/atheros/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/ obj-$(CONFIG_ATL2) += atlx/ obj-$(CONFIG_ATL1E) += atl1e/ obj-$(CONFIG_ATL1C) += atl1c/ +obj-$(CONFIG_ALX) += alx/ diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile new file mode 100644 index 000000000000..5901fa407d52 --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ALX) += alx.o +alx-objs := main.o ethtool.o hw.o +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h new file mode 100644 index 000000000000..50b3ae2b143d --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _ALX_H_ +#define _ALX_H_ + +#include <linux/types.h> +#include <linux/etherdevice.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> +#include "hw.h" + +#define ALX_WATCHDOG_TIME (5 * HZ) + +struct alx_buffer { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(size); +}; + +struct alx_rx_queue { + struct alx_rrd *rrd; + dma_addr_t rrd_dma; + + struct alx_rfd *rfd; + dma_addr_t rfd_dma; + + struct alx_buffer *bufs; + + u16 write_idx, read_idx; + u16 rrd_read_idx; +}; +#define ALX_RX_ALLOC_THRESH 32 + +struct alx_tx_queue { + struct alx_txd *tpd; + dma_addr_t tpd_dma; + struct alx_buffer *bufs; + u16 write_idx, read_idx; +}; + +#define ALX_DEFAULT_TX_WORK 128 + +enum alx_device_quirks { + ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0), +}; + +struct alx_priv { + struct net_device *dev; + + struct alx_hw hw; + + /* all descriptor memory */ + struct { + dma_addr_t dma; + void *virt; + int size; + } descmem; + + /* protect int_mask updates */ + spinlock_t irq_lock; + u32 int_mask; + + int tx_ringsz; + int rx_ringsz; + int rxbuf_size; + + struct napi_struct napi; + struct alx_tx_queue txq; + struct alx_rx_queue rxq; + + struct work_struct link_check_wk; + struct work_struct reset_wk; + + u16 msg_enable; + + bool msi; +}; + +extern const struct ethtool_ops alx_ethtool_ops; +extern const char alx_drv_name[]; + +#endif diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c new file mode 100644 index 000000000000..6fa2aec2bc81 --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/pci.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mdio.h> +#include <linux/interrupt.h> +#include <asm/byteorder.h> + +#include "alx.h" +#include "reg.h" +#include "hw.h" + + +static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + ecmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause; + if (alx_hw_giga(hw)) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + if (hw->adv_cfg & ADVERTISED_Autoneg) + ecmd->advertising |= hw->adv_cfg; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + if (hw->adv_cfg & ADVERTISED_Autoneg) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_INTERNAL; + + if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) { + if (hw->flowctrl & ALX_FC_RX) { + ecmd->advertising |= ADVERTISED_Pause; + + if (!(hw->flowctrl & ALX_FC_TX)) + ecmd->advertising |= ADVERTISED_Asym_Pause; + } else if (hw->flowctrl & ALX_FC_TX) { + ecmd->advertising |= ADVERTISED_Asym_Pause; + } + } + + if (hw->link_speed != SPEED_UNKNOWN) { + ethtool_cmd_speed_set(ecmd, + hw->link_speed - hw->link_speed % 10); + ecmd->duplex = hw->link_speed % 10; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + u32 adv_cfg; + + ASSERT_RTNL(); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ecmd->advertising & ADVERTISED_1000baseT_Half) + return -EINVAL; + adv_cfg = ecmd->advertising | ADVERTISED_Autoneg; + } else { + int speed = ethtool_cmd_speed(ecmd); + + switch (speed + ecmd->duplex) { + case SPEED_10 + DUPLEX_HALF: + adv_cfg = ADVERTISED_10baseT_Half; + break; + case SPEED_10 + DUPLEX_FULL: + adv_cfg = ADVERTISED_10baseT_Full; + break; + case SPEED_100 + DUPLEX_HALF: + adv_cfg = ADVERTISED_100baseT_Half; + break; + case SPEED_100 + DUPLEX_FULL: + adv_cfg = ADVERTISED_100baseT_Full; + break; + default: + return -EINVAL; + } + } + + hw->adv_cfg = adv_cfg; + return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl); +} + +static void alx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + if (hw->flowctrl & ALX_FC_ANEG && + hw->adv_cfg & ADVERTISED_Autoneg) + pause->autoneg = AUTONEG_ENABLE; + else + pause->autoneg = AUTONEG_DISABLE; + + if (hw->flowctrl & ALX_FC_TX) + pause->tx_pause = 1; + else + pause->tx_pause = 0; + + if (hw->flowctrl & ALX_FC_RX) + pause->rx_pause = 1; + else + pause->rx_pause = 0; +} + + +static int alx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + int err = 0; + bool reconfig_phy = false; + u8 fc = 0; + + if (pause->tx_pause) + fc |= ALX_FC_TX; + if (pause->rx_pause) + fc |= ALX_FC_RX; + if (pause->autoneg) + fc |= ALX_FC_ANEG; + + ASSERT_RTNL(); + + /* restart auto-neg for auto-mode */ + if (hw->adv_cfg & ADVERTISED_Autoneg) { + if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG)) + reconfig_phy = true; + if (fc & hw->flowctrl & ALX_FC_ANEG && + (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) + reconfig_phy = true; + } + + if (reconfig_phy) { + err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc); + return err; + } + + /* flow control on mac */ + if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) + alx_cfg_mac_flowcontrol(hw, fc); + + hw->flowctrl = fc; + + return 0; +} + +static u32 alx_get_msglevel(struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + + return alx->msg_enable; +} + +static void alx_set_msglevel(struct net_device *netdev, u32 data) +{ + struct alx_priv *alx = netdev_priv(netdev); + + alx->msg_enable = data; +} + +static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) + wol->wolopts |= WAKE_MAGIC; + if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) + wol->wolopts |= WAKE_PHY; +} + +static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + return -EOPNOTSUPP; + + hw->sleep_ctrl = 0; + + if (wol->wolopts & WAKE_MAGIC) + hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC; + if (wol->wolopts & WAKE_PHY) + hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY; + + device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl); + + return 0; +} + +static void alx_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct alx_priv *alx = netdev_priv(netdev); + + strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev), + sizeof(drvinfo->bus_info)); +} + +const struct ethtool_ops alx_ethtool_ops = { + .get_settings = alx_get_settings, + .set_settings = alx_set_settings, + .get_pauseparam = alx_get_pauseparam, + .set_pauseparam = alx_set_pauseparam, + .get_drvinfo = alx_get_drvinfo, + .get_msglevel = alx_get_msglevel, + .set_msglevel = alx_set_msglevel, + .get_wol = alx_get_wol, + .set_wol = alx_set_wol, + .get_link = ethtool_op_get_link, +}; diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c new file mode 100644 index 000000000000..220a16ad0e49 --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -0,0 +1,1226 @@ +/* + * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/mdio.h> +#include "reg.h" +#include "hw.h" + +static inline bool alx_is_rev_a(u8 rev) +{ + return rev == ALX_REV_A0 || rev == ALX_REV_A1; +} + +static int alx_wait_mdio_idle(struct alx_hw *hw) +{ + u32 val; + int i; + + for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) { + val = alx_read_mem32(hw, ALX_MDIO); + if (!(val & ALX_MDIO_BUSY)) + return 0; + udelay(10); + } + + return -ETIMEDOUT; +} + +static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data) +{ + u32 val, clk_sel; + int err; + + *phy_data = 0; + + /* use slow clock when it's in hibernation status */ + clk_sel = hw->link_speed != SPEED_UNKNOWN ? + ALX_MDIO_CLK_SEL_25MD4 : + ALX_MDIO_CLK_SEL_25MD128; + + if (ext) { + val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT | + reg << ALX_MDIO_EXTN_REG_SHIFT; + alx_write_mem32(hw, ALX_MDIO_EXTN, val); + + val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START | + ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT; + } else { + val = ALX_MDIO_SPRES_PRMBL | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT | + reg << ALX_MDIO_REG_SHIFT | + ALX_MDIO_START | ALX_MDIO_OP_READ; + } + alx_write_mem32(hw, ALX_MDIO, val); + + err = alx_wait_mdio_idle(hw); + if (err) + return err; + val = alx_read_mem32(hw, ALX_MDIO); + *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA); + return 0; +} + +static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data) +{ + u32 val, clk_sel; + + /* use slow clock when it's in hibernation status */ + clk_sel = hw->link_speed != SPEED_UNKNOWN ? + ALX_MDIO_CLK_SEL_25MD4 : + ALX_MDIO_CLK_SEL_25MD128; + + if (ext) { + val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT | + reg << ALX_MDIO_EXTN_REG_SHIFT; + alx_write_mem32(hw, ALX_MDIO_EXTN, val); + + val = ALX_MDIO_SPRES_PRMBL | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT | + phy_data << ALX_MDIO_DATA_SHIFT | + ALX_MDIO_START | ALX_MDIO_MODE_EXT; + } else { + val = ALX_MDIO_SPRES_PRMBL | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT | + reg << ALX_MDIO_REG_SHIFT | + phy_data << ALX_MDIO_DATA_SHIFT | + ALX_MDIO_START; + } + alx_write_mem32(hw, ALX_MDIO, val); + + return alx_wait_mdio_idle(hw); +} + +static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) +{ + return alx_read_phy_core(hw, false, 0, reg, phy_data); +} + +static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) +{ + return alx_write_phy_core(hw, false, 0, reg, phy_data); +} + +static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) +{ + return alx_read_phy_core(hw, true, dev, reg, pdata); +} + +static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) +{ + return alx_write_phy_core(hw, true, dev, reg, data); +} + +static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) +{ + int err; + + err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); + if (err) + return err; + + return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata); +} + +static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) +{ + int err; + + err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); + if (err) + return err; + + return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data); +} + +int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_reg(hw, reg, phy_data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_reg(hw, reg, phy_data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_ext(hw, dev, reg, pdata); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_ext(hw, dev, reg, data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_dbg(hw, reg, pdata); + spin_unlock(&hw->mdio_lock); + + return err; +} + +static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_dbg(hw, reg, data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +static u16 alx_get_phy_config(struct alx_hw *hw) +{ + u32 val; + u16 phy_val; + + val = alx_read_mem32(hw, ALX_PHY_CTRL); + /* phy in reset */ + if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0) + return ALX_DRV_PHY_UNKNOWN; + + val = alx_read_mem32(hw, ALX_DRV); + val = ALX_GET_FIELD(val, ALX_DRV_PHY); + if (ALX_DRV_PHY_UNKNOWN == val) + return ALX_DRV_PHY_UNKNOWN; + + alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val); + if (ALX_PHY_INITED == phy_val) + return val; + + return ALX_DRV_PHY_UNKNOWN; +} + +static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val) +{ + u32 read; + int i; + + for (i = 0; i < ALX_SLD_MAX_TO; i++) { + read = alx_read_mem32(hw, reg); + if ((read & wait) == 0) { + if (val) + *val = read; + return true; + } + mdelay(1); + } + + return false; +} + +static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr) +{ + u32 mac0, mac1; + + mac0 = alx_read_mem32(hw, ALX_STAD0); + mac1 = alx_read_mem32(hw, ALX_STAD1); + + /* addr should be big-endian */ + *(__be32 *)(addr + 2) = cpu_to_be32(mac0); + *(__be16 *)addr = cpu_to_be16(mac1); + + return is_valid_ether_addr(addr); +} + +int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr) +{ + u32 val; + + /* try to get it from register first */ + if (alx_read_macaddr(hw, addr)) + return 0; + + /* try to load from efuse */ + if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val)) + return -EIO; + alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START); + if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL)) + return -EIO; + if (alx_read_macaddr(hw, addr)) + return 0; + + /* try to load from flash/eeprom (if present) */ + val = alx_read_mem32(hw, ALX_EFLD); + if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) { + if (!alx_wait_reg(hw, ALX_EFLD, + ALX_EFLD_STAT | ALX_EFLD_START, &val)) + return -EIO; + alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START); + if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL)) + return -EIO; + if (alx_read_macaddr(hw, addr)) + return 0; + } + + return -EIO; +} + +void alx_set_macaddr(struct alx_hw *hw, const u8 *addr) +{ + u32 val; + + /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */ + val = be32_to_cpu(*(__be32 *)(addr + 2)); + alx_write_mem32(hw, ALX_STAD0, val); + val = be16_to_cpu(*(__be16 *)addr); + alx_write_mem32(hw, ALX_STAD1, val); +} + +static void alx_enable_osc(struct alx_hw *hw) +{ + u32 val; + + /* rising edge */ + val = alx_read_mem32(hw, ALX_MISC); + alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN); + alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); +} + +static void alx_reset_osc(struct alx_hw *hw, u8 rev) +{ + u32 val, val2; + + /* clear Internal OSC settings, switching OSC by hw itself */ + val = alx_read_mem32(hw, ALX_MISC3); + alx_write_mem32(hw, ALX_MISC3, + (val & ~ALX_MISC3_25M_BY_SW) | + ALX_MISC3_25M_NOTO_INTNL); + + /* 25M clk from chipset may be unstable 1s after de-assert of + * PERST, driver need re-calibrate before enter Sleep for WoL + */ + val = alx_read_mem32(hw, ALX_MISC); + if (rev >= ALX_REV_B0) { + /* restore over current protection def-val, + * this val could be reset by MAC-RST + */ + ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF); + /* a 0->1 change will update the internal val of osc */ + val &= ~ALX_MISC_INTNLOSC_OPEN; + alx_write_mem32(hw, ALX_MISC, val); + alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); + /* hw will automatically dis OSC after cab. */ + val2 = alx_read_mem32(hw, ALX_MSIC2); + val2 &= ~ALX_MSIC2_CALB_START; + alx_write_mem32(hw, ALX_MSIC2, val2); + alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START); + } else { + val &= ~ALX_MISC_INTNLOSC_OPEN; + /* disable isolate for rev A devices */ + if (alx_is_rev_a(rev)) + val &= ~ALX_MISC_ISO_EN; + + alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); + alx_write_mem32(hw, ALX_MISC, val); + } + + udelay(20); +} + +static int alx_stop_mac(struct alx_hw *hw) +{ + u32 rxq, txq, val; + u16 i; + + rxq = alx_read_mem32(hw, ALX_RXQ0); + alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN); + txq = alx_read_mem32(hw, ALX_TXQ0); + alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN); + + udelay(40); + + hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); + + for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { + val = alx_read_mem32(hw, ALX_MAC_STS); + if (!(val & ALX_MAC_STS_IDLE)) + return 0; + udelay(10); + } + + return -ETIMEDOUT; +} + +int alx_reset_mac(struct alx_hw *hw) +{ + u32 val, pmctrl; + int i, ret; + u8 rev; + bool a_cr; + + pmctrl = 0; + rev = alx_hw_revision(hw); + a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw); + + /* disable all interrupts, RXQ/TXQ */ + alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF); + alx_write_mem32(hw, ALX_IMR, 0); + alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); + + ret = alx_stop_mac(hw); + if (ret) + return ret; + + /* mac reset workaroud */ + alx_write_mem32(hw, ALX_RFD_PIDX, 1); + + /* dis l0s/l1 before mac reset */ + if (a_cr) { + pmctrl = alx_read_mem32(hw, ALX_PMCTRL); + if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) + alx_write_mem32(hw, ALX_PMCTRL, + pmctrl & ~(ALX_PMCTRL_L1_EN | + ALX_PMCTRL_L0S_EN)); + } + + /* reset whole mac safely */ + val = alx_read_mem32(hw, ALX_MASTER); + alx_write_mem32(hw, ALX_MASTER, + val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS); + + /* make sure it's real idle */ + udelay(10); + for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { + val = alx_read_mem32(hw, ALX_RFD_PIDX); + if (val == 0) + break; + udelay(10); + } + for (; i < ALX_DMA_MAC_RST_TO; i++) { + val = alx_read_mem32(hw, ALX_MASTER); + if ((val & ALX_MASTER_DMA_MAC_RST) == 0) + break; + udelay(10); + } + if (i == ALX_DMA_MAC_RST_TO) + return -EIO; + udelay(10); + + if (a_cr) { + alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS); + /* restore l0s / l1 */ + if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) + alx_write_mem32(hw, ALX_PMCTRL, pmctrl); + } + + alx_reset_osc(hw, rev); + + /* clear Internal OSC settings, switching OSC by hw itself, + * disable isolate for rev A devices + */ + val = alx_read_mem32(hw, ALX_MISC3); + alx_write_mem32(hw, ALX_MISC3, + (val & ~ALX_MISC3_25M_BY_SW) | + ALX_MISC3_25M_NOTO_INTNL); + val = alx_read_mem32(hw, ALX_MISC); + val &= ~ALX_MISC_INTNLOSC_OPEN; + if (alx_is_rev_a(rev)) + val &= ~ALX_MISC_ISO_EN; + alx_write_mem32(hw, ALX_MISC, val); + udelay(20); + + /* driver control speed/duplex, hash-alg */ + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); + + val = alx_read_mem32(hw, ALX_SERDES); + alx_write_mem32(hw, ALX_SERDES, + val | ALX_SERDES_MACCLK_SLWDWN | + ALX_SERDES_PHYCLK_SLWDWN); + + return 0; +} + +void alx_reset_phy(struct alx_hw *hw) +{ + int i; + u32 val; + u16 phy_val; + + /* (DSP)reset PHY core */ + val = alx_read_mem32(hw, ALX_PHY_CTRL); + val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ | + ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN | + ALX_PHY_CTRL_CLS); + val |= ALX_PHY_CTRL_RST_ANALOG; + + val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN); + alx_write_mem32(hw, ALX_PHY_CTRL, val); + udelay(10); + alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT); + + for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++) + udelay(10); + + /* phy power saving & hib */ + alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL, + ALX_SYSMODCTRL_IECHOADJ_DEF); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS, + ALX_VDRVBIAS_DEF); + + /* EEE advertisement */ + val = alx_read_mem32(hw, ALX_LPI_CTRL); + alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0); + + /* phy power saving */ + alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF); + alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); + alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, + phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN); + /* rtl8139c, 120m issue */ + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78, + ALX_MIIEXT_NLP78_120M_DEF); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10, + ALX_MIIEXT_S3DIG10_DEF); + + if (hw->lnk_patch) { + /* Turn off half amplitude */ + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, + phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT); + /* Turn off Green feature */ + alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); + alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, + phy_val | ALX_GREENCFG2_BP_GREEN); + /* Turn off half Bias */ + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, + phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS); + } + + /* set phy interrupt mask */ + alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN); +} + +#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO) + +void alx_reset_pcie(struct alx_hw *hw) +{ + u8 rev = alx_hw_revision(hw); + u32 val; + u16 val16; + + /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ + pci_read_config_word(hw->pdev, PCI_COMMAND, &val16); + if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) { + val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(hw->pdev, PCI_COMMAND, val16); + } + + /* clear WoL setting/status */ + val = alx_read_mem32(hw, ALX_WOL0); + alx_write_mem32(hw, ALX_WOL0, 0); + + val = alx_read_mem32(hw, ALX_PDLL_TRNS1); + alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN); + + /* mask some pcie error bits */ + val = alx_read_mem32(hw, ALX_UE_SVRT); + val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR); + alx_write_mem32(hw, ALX_UE_SVRT, val); + + /* wol 25M & pclk */ + val = alx_read_mem32(hw, ALX_MASTER); + if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) { + if ((val & ALX_MASTER_WAKEN_25M) == 0 || + (val & ALX_MASTER_PCLKSEL_SRDS) == 0) + alx_write_mem32(hw, ALX_MASTER, + val | ALX_MASTER_PCLKSEL_SRDS | + ALX_MASTER_WAKEN_25M); + } else { + if ((val & ALX_MASTER_WAKEN_25M) == 0 || + (val & ALX_MASTER_PCLKSEL_SRDS) != 0) + alx_write_mem32(hw, ALX_MASTER, + (val & ~ALX_MASTER_PCLKSEL_SRDS) | + ALX_MASTER_WAKEN_25M); + } + + /* ASPM setting */ + alx_enable_aspm(hw, true, true); + + udelay(10); +} + +void alx_start_mac(struct alx_hw *hw) +{ + u32 mac, txq, rxq; + + rxq = alx_read_mem32(hw, ALX_RXQ0); + alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN); + txq = alx_read_mem32(hw, ALX_TXQ0); + alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); + + mac = hw->rx_ctrl; + if (hw->link_speed % 10 == DUPLEX_FULL) + mac |= ALX_MAC_CTRL_FULLD; + else + mac &= ~ALX_MAC_CTRL_FULLD; + ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, + hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 : + ALX_MAC_CTRL_SPEED_10_100); + mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN; + hw->rx_ctrl = mac; + alx_write_mem32(hw, ALX_MAC_CTRL, mac); +} + +void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc) +{ + if (fc & ALX_FC_RX) + hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN; + else + hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN; + + if (fc & ALX_FC_TX) + hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN; + else + hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN; + + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en) +{ + u32 pmctrl; + u8 rev = alx_hw_revision(hw); + + pmctrl = alx_read_mem32(hw, ALX_PMCTRL); + + ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER, + ALX_PMCTRL_LCKDET_TIMER_DEF); + pmctrl |= ALX_PMCTRL_RCVR_WT_1US | + ALX_PMCTRL_L1_CLKSW_EN | + ALX_PMCTRL_L1_SRDSRX_PWD; + ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF); + ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US); + pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN | + ALX_PMCTRL_L1_SRDSPLL_EN | + ALX_PMCTRL_L1_BUFSRX_EN | + ALX_PMCTRL_SADLY_EN | + ALX_PMCTRL_HOTRST_WTEN| + ALX_PMCTRL_L0S_EN | + ALX_PMCTRL_L1_EN | + ALX_PMCTRL_ASPM_FCEN | + ALX_PMCTRL_TXL1_AFTER_L0S | + ALX_PMCTRL_RXL1_AFTER_L0S); + if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) + pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN; + + if (l0s_en) + pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN); + if (l1_en) + pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN); + + alx_write_mem32(hw, ALX_PMCTRL, pmctrl); +} + + +static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg) +{ + u32 cfg = 0; + + if (ethadv_cfg & ADVERTISED_Autoneg) { + cfg |= ALX_DRV_PHY_AUTO; + if (ethadv_cfg & ADVERTISED_10baseT_Half) + cfg |= ALX_DRV_PHY_10; + if (ethadv_cfg & ADVERTISED_10baseT_Full) + cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_100baseT_Half) + cfg |= ALX_DRV_PHY_100; + if (ethadv_cfg & ADVERTISED_100baseT_Full) + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_1000baseT_Half) + cfg |= ALX_DRV_PHY_1000; + if (ethadv_cfg & ADVERTISED_1000baseT_Full) + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_Pause) + cfg |= ADVERTISE_PAUSE_CAP; + if (ethadv_cfg & ADVERTISED_Asym_Pause) + cfg |= ADVERTISE_PAUSE_ASYM; + } else { + switch (ethadv_cfg) { + case ADVERTISED_10baseT_Half: + cfg |= ALX_DRV_PHY_10; + break; + case ADVERTISED_100baseT_Half: + cfg |= ALX_DRV_PHY_100; + break; + case ADVERTISED_10baseT_Full: + cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; + break; + case ADVERTISED_100baseT_Full: + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + break; + } + } + + return cfg; +} + +int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl) +{ + u16 adv, giga, cr; + u32 val; + int err = 0; + + alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0); + val = alx_read_mem32(hw, ALX_DRV); + ALX_SET_FIELD(val, ALX_DRV_PHY, 0); + + if (ethadv & ADVERTISED_Autoneg) { + adv = ADVERTISE_CSMA; + adv |= ethtool_adv_to_mii_adv_t(ethadv); + + if (flowctrl & ALX_FC_ANEG) { + if (flowctrl & ALX_FC_RX) { + adv |= ADVERTISED_Pause; + if (!(flowctrl & ALX_FC_TX)) + adv |= ADVERTISED_Asym_Pause; + } else if (flowctrl & ALX_FC_TX) { + adv |= ADVERTISED_Asym_Pause; + } + } + giga = 0; + if (alx_hw_giga(hw)) + giga = ethtool_adv_to_mii_ctrl1000_t(ethadv); + + cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; + + if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) || + alx_write_phy_reg(hw, MII_CTRL1000, giga) || + alx_write_phy_reg(hw, MII_BMCR, cr)) + err = -EBUSY; + } else { + cr = BMCR_RESET; + if (ethadv == ADVERTISED_100baseT_Half || + ethadv == ADVERTISED_100baseT_Full) + cr |= BMCR_SPEED100; + if (ethadv == ADVERTISED_10baseT_Full || + ethadv == ADVERTISED_100baseT_Full) + cr |= BMCR_FULLDPLX; + + err = alx_write_phy_reg(hw, MII_BMCR, cr); + } + + if (!err) { + alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED); + val |= ethadv_to_hw_cfg(hw, ethadv); + } + + alx_write_mem32(hw, ALX_DRV, val); + + return err; +} + + +void alx_post_phy_link(struct alx_hw *hw) +{ + u16 phy_val, len, agc; + u8 revid = alx_hw_revision(hw); + bool adj_th = revid == ALX_REV_B0; + int speed; + + if (hw->link_speed == SPEED_UNKNOWN) + speed = SPEED_UNKNOWN; + else + speed = hw->link_speed - hw->link_speed % 10; + + if (revid != ALX_REV_B0 && !alx_is_rev_a(revid)) + return; + + /* 1000BT/AZ, wrong cable length */ + if (speed != SPEED_UNKNOWN) { + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6, + &phy_val); + len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN); + alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val); + agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA); + + if ((speed == SPEED_1000 && + (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G || + (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) || + (speed == SPEED_100 && + (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M || + (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) { + alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, + ALX_AZ_ANADECT_LONG); + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val | ALX_AFE_10BT_100M_TH); + } else { + alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, + ALX_AZ_ANADECT_DEF); + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, + ALX_MIIEXT_AFE, &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val & ~ALX_AFE_10BT_100M_TH); + } + + /* threshold adjust */ + if (adj_th && hw->lnk_patch) { + if (speed == SPEED_100) { + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, + ALX_MSE16DB_UP); + } else if (speed == SPEED_1000) { + /* + * Giga link threshold, raise the tolerance of + * noise 50% + */ + alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, + &phy_val); + ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH, + ALX_MSE20DB_TH_HI); + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, + phy_val); + } + } + } else { + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val & ~ALX_AFE_10BT_100M_TH); + + if (adj_th && hw->lnk_patch) { + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, + ALX_MSE16DB_DOWN); + alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val); + ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH, + ALX_MSE20DB_TH_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val); + } + } +} + + +/* NOTE: + * 1. phy link must be established before calling this function + * 2. wol option (pattern,magic,link,etc.) is configed before call it. + */ +int alx_pre_suspend(struct alx_hw *hw, int speed) +{ + u32 master, mac, phy, val; + int err = 0; + + master = alx_read_mem32(hw, ALX_MASTER); + master &= ~ALX_MASTER_PCLKSEL_SRDS; + mac = hw->rx_ctrl; + /* 10/100 half */ + ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100); + mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); + + phy = alx_read_mem32(hw, ALX_PHY_CTRL); + phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS); + phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE | + ALX_PHY_CTRL_HIB_EN; + + /* without any activity */ + if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) { + err = alx_write_phy_reg(hw, ALX_MII_IER, 0); + if (err) + return err; + phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN; + } else { + if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS)) + mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN; + if (hw->sleep_ctrl & ALX_SLEEP_CIFS) + mac |= ALX_MAC_CTRL_TX_EN; + if (speed % 10 == DUPLEX_FULL) + mac |= ALX_MAC_CTRL_FULLD; + if (speed >= SPEED_1000) + ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, + ALX_MAC_CTRL_SPEED_1000); + phy |= ALX_PHY_CTRL_DSPRST_OUT; + err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, + ALX_MIIEXT_S3DIG10, + ALX_MIIEXT_S3DIG10_SL); + if (err) + return err; + } + + alx_enable_osc(hw); + hw->rx_ctrl = mac; + alx_write_mem32(hw, ALX_MASTER, master); + alx_write_mem32(hw, ALX_MAC_CTRL, mac); + alx_write_mem32(hw, ALX_PHY_CTRL, phy); + + /* set val of PDLL D3PLLOFF */ + val = alx_read_mem32(hw, ALX_PDLL_TRNS1); + val |= ALX_PDLL_TRNS1_D3PLLOFF_EN; + alx_write_mem32(hw, ALX_PDLL_TRNS1, val); + + return 0; +} + +bool alx_phy_configured(struct alx_hw *hw) +{ + u32 cfg, hw_cfg; + + cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg); + cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY); + hw_cfg = alx_get_phy_config(hw); + + if (hw_cfg == ALX_DRV_PHY_UNKNOWN) + return false; + + return cfg == hw_cfg; +} + +int alx_get_phy_link(struct alx_hw *hw, int *speed) +{ + struct pci_dev *pdev = hw->pdev; + u16 bmsr, giga; + int err; + + err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); + if (err) + return err; + + err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); + if (err) + return err; + + if (!(bmsr & BMSR_LSTATUS)) { + *speed = SPEED_UNKNOWN; + return 0; + } + + /* speed/duplex result is saved in PHY Specific Status Register */ + err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga); + if (err) + return err; + + if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED)) + goto wrong_speed; + + switch (giga & ALX_GIGA_PSSR_SPEED) { + case ALX_GIGA_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case ALX_GIGA_PSSR_100MBS: + *speed = SPEED_100; + break; + case ALX_GIGA_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + goto wrong_speed; + } + + *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF; + return 1; + +wrong_speed: + dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga); + return -EINVAL; +} + +int alx_clear_phy_intr(struct alx_hw *hw) +{ + u16 isr; + + /* clear interrupt status by reading it */ + return alx_read_phy_reg(hw, ALX_MII_ISR, &isr); +} + +int alx_config_wol(struct alx_hw *hw) +{ + u32 wol = 0; + int err = 0; + + /* turn on magic packet event */ + if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) + wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN; + + /* turn on link up event */ + if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) { + wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK; + /* only link up can wake up */ + err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP); + } + alx_write_mem32(hw, ALX_WOL0, wol); + + return err; +} + +void alx_disable_rss(struct alx_hw *hw) +{ + u32 ctrl = alx_read_mem32(hw, ALX_RXQ0); + + ctrl &= ~ALX_RXQ0_RSS_HASH_EN; + alx_write_mem32(hw, ALX_RXQ0, ctrl); +} + +void alx_configure_basic(struct alx_hw *hw) +{ + u32 val, raw_mtu, max_payload; + u16 val16; + u8 chip_rev = alx_hw_revision(hw); + + alx_set_macaddr(hw, hw->mac_addr); + + alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL); + + /* idle timeout to switch clk_125M */ + if (chip_rev >= ALX_REV_B0) + alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER, + ALX_IDLE_DECISN_TIMER_DEF); + + alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL); + + val = alx_read_mem32(hw, ALX_MASTER); + val |= ALX_MASTER_IRQMOD2_EN | + ALX_MASTER_IRQMOD1_EN | + ALX_MASTER_SYSALVTIMER_EN; + alx_write_mem32(hw, ALX_MASTER, val); + alx_write_mem32(hw, ALX_IRQ_MODU_TIMER, + (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT); + /* intr re-trig timeout */ + alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO); + /* tpd threshold to trig int */ + alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd); + alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt); + + raw_mtu = hw->mtu + ETH_HLEN; + alx_write_mem32(hw, ALX_MTU, raw_mtu + 8); + if (raw_mtu > ALX_MTU_JUMBO_TH) + hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE; + + if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH) + val = (raw_mtu + 8 + 7) >> 3; + else + val = ALX_TXQ1_JUMBO_TSO_TH >> 3; + alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN); + + max_payload = pcie_get_readrq(hw->pdev) >> 8; + /* + * if BIOS had changed the default dma read max length, + * restore it to default value + */ + if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN) + pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN); + + val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT | + ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN | + ALX_TXQ0_SUPT_IPOPT | + ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT; + alx_write_mem32(hw, ALX_TXQ0, val); + val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT | + ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT | + ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT | + ALX_HQTPD_BURST_EN; + alx_write_mem32(hw, ALX_HQTPD, val); + + /* rxq, flow control */ + val = alx_read_mem32(hw, ALX_SRAM5); + val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3; + if (val > ALX_SRAM_RXF_LEN_8K) { + val16 = ALX_MTU_STD_ALGN >> 3; + val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3; + } else { + val16 = ALX_MTU_STD_ALGN >> 3; + val = (val - ALX_MTU_STD_ALGN) >> 3; + } + alx_write_mem32(hw, ALX_RXQ2, + val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT | + val << ALX_RXQ2_RXF_XON_THRESH_SHIFT); + val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT | + ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT | + ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT | + ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN | + ALX_RXQ0_IPV6_PARSE_EN; + + if (alx_hw_giga(hw)) + ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH, + ALX_RXQ0_ASPM_THRESH_100M); + + alx_write_mem32(hw, ALX_RXQ0, val); + + val = alx_read_mem32(hw, ALX_DMA); + val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT | + ALX_DMA_RREQ_PRI_DATA | + max_payload << ALX_DMA_RREQ_BLEN_SHIFT | + ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT | + ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT | + (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT; + alx_write_mem32(hw, ALX_DMA, val); + + /* default multi-tx-q weights */ + val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT | + 4 << ALX_WRR_PRI0_SHIFT | + 4 << ALX_WRR_PRI1_SHIFT | + 4 << ALX_WRR_PRI2_SHIFT | + 4 << ALX_WRR_PRI3_SHIFT; + alx_write_mem32(hw, ALX_WRR, val); +} + +static inline u32 alx_speed_to_ethadv(int speed) +{ + switch (speed) { + case SPEED_1000 + DUPLEX_FULL: + return ADVERTISED_1000baseT_Full; + case SPEED_100 + DUPLEX_FULL: + return ADVERTISED_100baseT_Full; + case SPEED_100 + DUPLEX_HALF: + return ADVERTISED_10baseT_Half; + case SPEED_10 + DUPLEX_FULL: + return ADVERTISED_10baseT_Full; + case SPEED_10 + DUPLEX_HALF: + return ADVERTISED_10baseT_Half; + default: + return 0; + } +} + +int alx_select_powersaving_speed(struct alx_hw *hw, int *speed) +{ + int i, err, spd; + u16 lpa; + + err = alx_get_phy_link(hw, &spd); + if (err < 0) + return err; + + if (spd == SPEED_UNKNOWN) + return 0; + + err = alx_read_phy_reg(hw, MII_LPA, &lpa); + if (err) + return err; + + if (!(lpa & LPA_LPACK)) { + *speed = spd; + return 0; + } + + if (lpa & LPA_10FULL) + *speed = SPEED_10 + DUPLEX_FULL; + else if (lpa & LPA_10HALF) + *speed = SPEED_10 + DUPLEX_HALF; + else if (lpa & LPA_100FULL) + *speed = SPEED_100 + DUPLEX_FULL; + else + *speed = SPEED_100 + DUPLEX_HALF; + + if (*speed != spd) { + err = alx_write_phy_reg(hw, ALX_MII_IER, 0); + if (err) + return err; + err = alx_setup_speed_duplex(hw, + alx_speed_to_ethadv(*speed) | + ADVERTISED_Autoneg, + ALX_FC_ANEG | ALX_FC_RX | + ALX_FC_TX); + if (err) + return err; + + /* wait for linkup */ + for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) { + int speed2; + + msleep(100); + + err = alx_get_phy_link(hw, &speed2); + if (err < 0) + return err; + if (speed2 != SPEED_UNKNOWN) + break; + } + if (i == ALX_MAX_SETUP_LNK_CYCLE) + return -ETIMEDOUT; + } + + return 0; +} + +bool alx_get_phy_info(struct alx_hw *hw) +{ + u16 devs1, devs2; + + if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) || + alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1])) + return false; + + /* since we haven't PMA/PMD status2 register, we can't + * use mdio45_probe function for prtad and mmds. + * use fixed MMD3 to get mmds. + */ + if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) || + alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2)) + return false; + hw->mdio.mmds = devs1 | devs2 << 16; + + return true; +} diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h new file mode 100644 index 000000000000..65e723d2172a --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/hw.h @@ -0,0 +1,499 @@ +/* + * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ALX_HW_H_ +#define ALX_HW_H_ +#include <linux/types.h> +#include <linux/mdio.h> +#include <linux/pci.h> +#include "reg.h" + +/* Transmit Packet Descriptor, contains 4 32-bit words. + * + * 31 16 0 + * +----------------+----------------+ + * | vlan-tag | buf length | + * +----------------+----------------+ + * | Word 1 | + * +----------------+----------------+ + * | Word 2: buf addr lo | + * +----------------+----------------+ + * | Word 3: buf addr hi | + * +----------------+----------------+ + * + * Word 2 and 3 combine to form a 64-bit buffer address + * + * Word 1 has three forms, depending on the state of bit 8/12/13: + * if bit8 =='1', the definition is just for custom checksum offload. + * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor + * for the skb is special for LSO V2, Word 2 become total skb length , + * Word 3 is meaningless. + * other condition, the definition is for general skb or ip/tcp/udp + * checksum or LSO(TSO) offload. + * + * Here is the depiction: + * + * 0-+ 0-+ + * 1 | 1 | + * 2 | 2 | + * 3 | Payload offset 3 | L4 header offset + * 4 | (7:0) 4 | (7:0) + * 5 | 5 | + * 6 | 6 | + * 7-+ 7-+ + * 8 Custom csum enable = 1 8 Custom csum enable = 0 + * 9 General IPv4 checksum 9 General IPv4 checksum + * 10 General TCP checksum 10 General TCP checksum + * 11 General UDP checksum 11 General UDP checksum + * 12 Large Send Segment enable 12 Large Send Segment enable + * 13 Large Send Segment type 13 Large Send Segment type + * 14 VLAN tagged 14 VLAN tagged + * 15 Insert VLAN tag 15 Insert VLAN tag + * 16 IPv4 packet 16 IPv4 packet + * 17 Ethernet frame type 17 Ethernet frame type + * 18-+ 18-+ + * 19 | 19 | + * 20 | 20 | + * 21 | Custom csum offset 21 | + * 22 | (25:18) 22 | + * 23 | 23 | MSS (30:18) + * 24 | 24 | + * 25-+ 25 | + * 26-+ 26 | + * 27 | 27 | + * 28 | Reserved 28 | + * 29 | 29 | + * 30-+ 30-+ + * 31 End of packet 31 End of packet + */ +struct alx_txd { + __le16 len; + __le16 vlan_tag; + __le32 word1; + union { + __le64 addr; + struct { + __le32 pkt_len; + __le32 resvd; + } l; + } adrl; +} __packed; + +/* tpd word 1 */ +#define TPD_CXSUMSTART_MASK 0x00FF +#define TPD_CXSUMSTART_SHIFT 0 +#define TPD_L4HDROFFSET_MASK 0x00FF +#define TPD_L4HDROFFSET_SHIFT 0 +#define TPD_CXSUM_EN_MASK 0x0001 +#define TPD_CXSUM_EN_SHIFT 8 +#define TPD_IP_XSUM_MASK 0x0001 +#define TPD_IP_XSUM_SHIFT 9 +#define TPD_TCP_XSUM_MASK 0x0001 +#define TPD_TCP_XSUM_SHIFT 10 +#define TPD_UDP_XSUM_MASK 0x0001 +#define TPD_UDP_XSUM_SHIFT 11 +#define TPD_LSO_EN_MASK 0x0001 +#define TPD_LSO_EN_SHIFT 12 +#define TPD_LSO_V2_MASK 0x0001 +#define TPD_LSO_V2_SHIFT 13 +#define TPD_VLTAGGED_MASK 0x0001 +#define TPD_VLTAGGED_SHIFT 14 +#define TPD_INS_VLTAG_MASK 0x0001 +#define TPD_INS_VLTAG_SHIFT 15 +#define TPD_IPV4_MASK 0x0001 +#define TPD_IPV4_SHIFT 16 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 17 +#define TPD_CXSUMOFFSET_MASK 0x00FF +#define TPD_CXSUMOFFSET_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 18 +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 31 + +#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK) + +/* Receive Free Descriptor */ +struct alx_rfd { + __le64 addr; /* data buffer address, length is + * declared in register --- every + * buffer has the same size + */ +} __packed; + +/* Receive Return Descriptor, contains 4 32-bit words. + * + * 31 16 0 + * +----------------+----------------+ + * | Word 0 | + * +----------------+----------------+ + * | Word 1: RSS Hash value | + * +----------------+----------------+ + * | Word 2 | + * +----------------+----------------+ + * | Word 3 | + * +----------------+----------------+ + * + * Word 0 depiction & Word 2 depiction: + * + * 0--+ 0--+ + * 1 | 1 | + * 2 | 2 | + * 3 | 3 | + * 4 | 4 | + * 5 | 5 | + * 6 | 6 | + * 7 | IP payload checksum 7 | VLAN tag + * 8 | (15:0) 8 | (15:0) + * 9 | 9 | + * 10 | 10 | + * 11 | 11 | + * 12 | 12 | + * 13 | 13 | + * 14 | 14 | + * 15-+ 15-+ + * 16-+ 16-+ + * 17 | Number of RFDs 17 | + * 18 | (19:16) 18 | + * 19-+ 19 | Protocol ID + * 20-+ 20 | (23:16) + * 21 | 21 | + * 22 | 22 | + * 23 | 23-+ + * 24 | 24 | Reserved + * 25 | Start index of RFD-ring 25-+ + * 26 | (31:20) 26 | RSS Q-num (27:25) + * 27 | 27-+ + * 28 | 28-+ + * 29 | 29 | RSS Hash algorithm + * 30 | 30 | (31:28) + * 31-+ 31-+ + * + * Word 3 depiction: + * + * 0--+ + * 1 | + * 2 | + * 3 | + * 4 | + * 5 | + * 6 | + * 7 | Packet length (include FCS) + * 8 | (13:0) + * 9 | + * 10 | + * 11 | + * 12 | + * 13-+ + * 14 L4 Header checksum error + * 15 IPv4 checksum error + * 16 VLAN tagged + * 17-+ + * 18 | Protocol ID (19:17) + * 19-+ + * 20 Receive error summary + * 21 FCS(CRC) error + * 22 Frame alignment error + * 23 Truncated packet + * 24 Runt packet + * 25 Incomplete packet due to insufficient rx-desc + * 26 Broadcast packet + * 27 Multicast packet + * 28 Ethernet type (EII or 802.3) + * 29 FIFO overflow + * 30 Length error (for 802.3, length field mismatch with actual len) + * 31 Updated, indicate to driver that this RRD is refreshed. + */ +struct alx_rrd { + __le32 word0; + __le32 rss_hash; + __le32 word2; + __le32 word3; +} __packed; + +/* rrd word 0 */ +#define RRD_XSUM_MASK 0xFFFF +#define RRD_XSUM_SHIFT 0 +#define RRD_NOR_MASK 0x000F +#define RRD_NOR_SHIFT 16 +#define RRD_SI_MASK 0x0FFF +#define RRD_SI_SHIFT 20 + +/* rrd word 2 */ +#define RRD_VLTAG_MASK 0xFFFF +#define RRD_VLTAG_SHIFT 0 +#define RRD_PID_MASK 0x00FF +#define RRD_PID_SHIFT 16 +/* non-ip packet */ +#define RRD_PID_NONIP 0 +/* ipv4(only) */ +#define RRD_PID_IPV4 1 +/* tcp/ipv6 */ +#define RRD_PID_IPV6TCP 2 +/* tcp/ipv4 */ +#define RRD_PID_IPV4TCP 3 +/* udp/ipv6 */ +#define RRD_PID_IPV6UDP 4 +/* udp/ipv4 */ +#define RRD_PID_IPV4UDP 5 +/* ipv6(only) */ +#define RRD_PID_IPV6 6 +/* LLDP packet */ +#define RRD_PID_LLDP 7 +/* 1588 packet */ +#define RRD_PID_1588 8 +#define RRD_RSSQ_MASK 0x0007 +#define RRD_RSSQ_SHIFT 25 +#define RRD_RSSALG_MASK 0x000F +#define RRD_RSSALG_SHIFT 28 +#define RRD_RSSALG_TCPV6 0x1 +#define RRD_RSSALG_IPV6 0x2 +#define RRD_RSSALG_TCPV4 0x4 +#define RRD_RSSALG_IPV4 0x8 + +/* rrd word 3 */ +#define RRD_PKTLEN_MASK 0x3FFF +#define RRD_PKTLEN_SHIFT 0 +#define RRD_ERR_L4_MASK 0x0001 +#define RRD_ERR_L4_SHIFT 14 +#define RRD_ERR_IPV4_MASK 0x0001 +#define RRD_ERR_IPV4_SHIFT 15 +#define RRD_VLTAGGED_MASK 0x0001 +#define RRD_VLTAGGED_SHIFT 16 +#define RRD_OLD_PID_MASK 0x0007 +#define RRD_OLD_PID_SHIFT 17 +#define RRD_ERR_RES_MASK 0x0001 +#define RRD_ERR_RES_SHIFT 20 +#define RRD_ERR_FCS_MASK 0x0001 +#define RRD_ERR_FCS_SHIFT 21 +#define RRD_ERR_FAE_MASK 0x0001 +#define RRD_ERR_FAE_SHIFT 22 +#define RRD_ERR_TRUNC_MASK 0x0001 +#define RRD_ERR_TRUNC_SHIFT 23 +#define RRD_ERR_RUNT_MASK 0x0001 +#define RRD_ERR_RUNT_SHIFT 24 +#define RRD_ERR_ICMP_MASK 0x0001 +#define RRD_ERR_ICMP_SHIFT 25 +#define RRD_BCAST_MASK 0x0001 +#define RRD_BCAST_SHIFT 26 +#define RRD_MCAST_MASK 0x0001 +#define RRD_MCAST_SHIFT 27 +#define RRD_ETHTYPE_MASK 0x0001 +#define RRD_ETHTYPE_SHIFT 28 +#define RRD_ERR_FIFOV_MASK 0x0001 +#define RRD_ERR_FIFOV_SHIFT 29 +#define RRD_ERR_LEN_MASK 0x0001 +#define RRD_ERR_LEN_SHIFT 30 +#define RRD_UPDATED_MASK 0x0001 +#define RRD_UPDATED_SHIFT 31 + + +#define ALX_MAX_SETUP_LNK_CYCLE 50 + +/* for FlowControl */ +#define ALX_FC_RX 0x01 +#define ALX_FC_TX 0x02 +#define ALX_FC_ANEG 0x04 + +/* for sleep control */ +#define ALX_SLEEP_WOL_PHY 0x00000001 +#define ALX_SLEEP_WOL_MAGIC 0x00000002 +#define ALX_SLEEP_CIFS 0x00000004 +#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \ + ALX_SLEEP_WOL_MAGIC | \ + ALX_SLEEP_CIFS) + +/* for RSS hash type */ +#define ALX_RSS_HASH_TYPE_IPV4 0x1 +#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2 +#define ALX_RSS_HASH_TYPE_IPV6 0x4 +#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8 +#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \ + ALX_RSS_HASH_TYPE_IPV4_TCP | \ + ALX_RSS_HASH_TYPE_IPV6 | \ + ALX_RSS_HASH_TYPE_IPV6_TCP) +#define ALX_DEF_RXBUF_SIZE 1536 +#define ALX_MAX_JUMBO_PKT_SIZE (9*1024) +#define ALX_MAX_TSO_PKT_SIZE (7*1024) +#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE +#define ALX_MIN_FRAME_SIZE 68 +#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) + +#define ALX_MAX_RX_QUEUES 8 +#define ALX_MAX_TX_QUEUES 4 +#define ALX_MAX_HANDLED_INTRS 5 + +#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \ + ALX_ISR_DMAW | \ + ALX_ISR_DMAR | \ + ALX_ISR_SMB | \ + ALX_ISR_MANU | \ + ALX_ISR_TIMER) + +#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \ + ALX_ISR_DMAW | ALX_ISR_DMAR) + +#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \ + ALX_ISR_TXF_UR | \ + ALX_ISR_RFD_UR) + +#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \ + ALX_ISR_TX_Q1 | \ + ALX_ISR_TX_Q2 | \ + ALX_ISR_TX_Q3 | \ + ALX_ISR_RX_Q0 | \ + ALX_ISR_RX_Q1 | \ + ALX_ISR_RX_Q2 | \ + ALX_ISR_RX_Q3 | \ + ALX_ISR_RX_Q4 | \ + ALX_ISR_RX_Q5 | \ + ALX_ISR_RX_Q6 | \ + ALX_ISR_RX_Q7) + +/* maximum interrupt vectors for msix */ +#define ALX_MAX_MSIX_INTRS 16 + +#define ALX_GET_FIELD(_data, _field) \ + (((_data) >> _field ## _SHIFT) & _field ## _MASK) + +#define ALX_SET_FIELD(_data, _field, _value) do { \ + (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \ + (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\ + } while (0) + +struct alx_hw { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + + /* current & permanent mac addr */ + u8 mac_addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + u16 mtu; + u16 imt; + u8 dma_chnl; + u8 max_dma_chnl; + /* tpd threshold to trig INT */ + u32 ith_tpd; + u32 rx_ctrl; + u32 mc_hash[2]; + + u32 smb_timer; + /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */ + int link_speed; + + /* auto-neg advertisement or force mode config */ + u32 adv_cfg; + u8 flowctrl; + + u32 sleep_ctrl; + + spinlock_t mdio_lock; + struct mdio_if_info mdio; + u16 phy_id[2]; + + /* PHY link patch flag */ + bool lnk_patch; +}; + +static inline int alx_hw_revision(struct alx_hw *hw) +{ + return hw->pdev->revision >> ALX_PCI_REVID_SHIFT; +} + +static inline bool alx_hw_with_cr(struct alx_hw *hw) +{ + return hw->pdev->revision & 1; +} + +static inline bool alx_hw_giga(struct alx_hw *hw) +{ + return hw->pdev->device & 1; +} + +static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val) +{ + writeb(val, hw->hw_addr + reg); +} + +static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val) +{ + writew(val, hw->hw_addr + reg); +} + +static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg) +{ + return readw(hw->hw_addr + reg); +} + +static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} + +static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg) +{ + return readl(hw->hw_addr + reg); +} + +static inline void alx_post_write(struct alx_hw *hw) +{ + readl(hw->hw_addr); +} + +int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr); +void alx_reset_phy(struct alx_hw *hw); +void alx_reset_pcie(struct alx_hw *hw); +void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en); +int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl); +void alx_post_phy_link(struct alx_hw *hw); +int alx_pre_suspend(struct alx_hw *hw, int speed); +int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data); +int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data); +int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata); +int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data); +int alx_get_phy_link(struct alx_hw *hw, int *speed); +int alx_clear_phy_intr(struct alx_hw *hw); +int alx_config_wol(struct alx_hw *hw); +void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc); +void alx_start_mac(struct alx_hw *hw); +int alx_reset_mac(struct alx_hw *hw); +void alx_set_macaddr(struct alx_hw *hw, const u8 *addr); +bool alx_phy_configured(struct alx_hw *hw); +void alx_configure_basic(struct alx_hw *hw); +void alx_disable_rss(struct alx_hw *hw); +int alx_select_powersaving_speed(struct alx_hw *hw, int *speed); +bool alx_get_phy_info(struct alx_hw *hw); + +#endif diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c new file mode 100644 index 000000000000..418de8b13165 --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -0,0 +1,1625 @@ +/* + * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/if_vlan.h> +#include <linux/mdio.h> +#include <linux/aer.h> +#include <linux/bitops.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <net/ip6_checksum.h> +#include <linux/crc32.h> +#include "alx.h" +#include "hw.h" +#include "reg.h" + +const char alx_drv_name[] = "alx"; + + +static void alx_free_txbuf(struct alx_priv *alx, int entry) +{ + struct alx_buffer *txb = &alx->txq.bufs[entry]; + + if (dma_unmap_len(txb, size)) { + dma_unmap_single(&alx->hw.pdev->dev, + dma_unmap_addr(txb, dma), + dma_unmap_len(txb, size), + DMA_TO_DEVICE); + dma_unmap_len_set(txb, size, 0); + } + + if (txb->skb) { + dev_kfree_skb_any(txb->skb); + txb->skb = NULL; + } +} + +static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) +{ + struct alx_rx_queue *rxq = &alx->rxq; + struct sk_buff *skb; + struct alx_buffer *cur_buf; + dma_addr_t dma; + u16 cur, next, count = 0; + + next = cur = rxq->write_idx; + if (++next == alx->rx_ringsz) + next = 0; + cur_buf = &rxq->bufs[cur]; + + while (!cur_buf->skb && next != rxq->read_idx) { + struct alx_rfd *rfd = &rxq->rfd[cur]; + + skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); + if (!skb) + break; + dma = dma_map_single(&alx->hw.pdev->dev, + skb->data, alx->rxbuf_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&alx->hw.pdev->dev, dma)) { + dev_kfree_skb(skb); + break; + } + + /* Unfortunately, RX descriptor buffers must be 4-byte + * aligned, so we can't use IP alignment. + */ + if (WARN_ON(dma & 3)) { + dev_kfree_skb(skb); + break; + } + + cur_buf->skb = skb; + dma_unmap_len_set(cur_buf, size, alx->rxbuf_size); + dma_unmap_addr_set(cur_buf, dma, dma); + rfd->addr = cpu_to_le64(dma); + + cur = next; + if (++next == alx->rx_ringsz) + next = 0; + cur_buf = &rxq->bufs[cur]; + count++; + } + + if (count) { + /* flush all updates before updating hardware */ + wmb(); + rxq->write_idx = cur; + alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); + } + + return count; +} + +static inline int alx_tpd_avail(struct alx_priv *alx) +{ + struct alx_tx_queue *txq = &alx->txq; + + if (txq->write_idx >= txq->read_idx) + return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; + return txq->read_idx - txq->write_idx - 1; +} + +static bool alx_clean_tx_irq(struct alx_priv *alx) +{ + struct alx_tx_queue *txq = &alx->txq; + u16 hw_read_idx, sw_read_idx; + unsigned int total_bytes = 0, total_packets = 0; + int budget = ALX_DEFAULT_TX_WORK; + + sw_read_idx = txq->read_idx; + hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX); + + if (sw_read_idx != hw_read_idx) { + while (sw_read_idx != hw_read_idx && budget > 0) { + struct sk_buff *skb; + + skb = txq->bufs[sw_read_idx].skb; + if (skb) { + total_bytes += skb->len; + total_packets++; + budget--; + } + + alx_free_txbuf(alx, sw_read_idx); + + if (++sw_read_idx == alx->tx_ringsz) + sw_read_idx = 0; + } + txq->read_idx = sw_read_idx; + + netdev_completed_queue(alx->dev, total_packets, total_bytes); + } + + if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) && + alx_tpd_avail(alx) > alx->tx_ringsz/4) + netif_wake_queue(alx->dev); + + return sw_read_idx == hw_read_idx; +} + +static void alx_schedule_link_check(struct alx_priv *alx) +{ + schedule_work(&alx->link_check_wk); +} + +static void alx_schedule_reset(struct alx_priv *alx) +{ + schedule_work(&alx->reset_wk); +} + +static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) +{ + struct alx_rx_queue *rxq = &alx->rxq; + struct alx_rrd *rrd; + struct alx_buffer *rxb; + struct sk_buff *skb; + u16 length, rfd_cleaned = 0; + + while (budget > 0) { + rrd = &rxq->rrd[rxq->rrd_read_idx]; + if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) + break; + rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT); + + if (ALX_GET_FIELD(le32_to_cpu(rrd->word0), + RRD_SI) != rxq->read_idx || + ALX_GET_FIELD(le32_to_cpu(rrd->word0), + RRD_NOR) != 1) { + alx_schedule_reset(alx); + return 0; + } + + rxb = &rxq->bufs[rxq->read_idx]; + dma_unmap_single(&alx->hw.pdev->dev, + dma_unmap_addr(rxb, dma), + dma_unmap_len(rxb, size), + DMA_FROM_DEVICE); + dma_unmap_len_set(rxb, size, 0); + skb = rxb->skb; + rxb->skb = NULL; + + if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) || + rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) { + rrd->word3 = 0; + dev_kfree_skb_any(skb); + goto next_pkt; + } + + length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), + RRD_PKTLEN) - ETH_FCS_LEN; + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, alx->dev); + + skb_checksum_none_assert(skb); + if (alx->dev->features & NETIF_F_RXCSUM && + !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) | + cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) { + switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2), + RRD_PID)) { + case RRD_PID_IPV6UDP: + case RRD_PID_IPV4UDP: + case RRD_PID_IPV4TCP: + case RRD_PID_IPV6TCP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } + } + + napi_gro_receive(&alx->napi, skb); + budget--; + +next_pkt: + if (++rxq->read_idx == alx->rx_ringsz) + rxq->read_idx = 0; + if (++rxq->rrd_read_idx == alx->rx_ringsz) + rxq->rrd_read_idx = 0; + + if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) + rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC); + } + + if (rfd_cleaned) + alx_refill_rx_ring(alx, GFP_ATOMIC); + + return budget > 0; +} + +static int alx_poll(struct napi_struct *napi, int budget) +{ + struct alx_priv *alx = container_of(napi, struct alx_priv, napi); + struct alx_hw *hw = &alx->hw; + bool complete = true; + unsigned long flags; + + complete = alx_clean_tx_irq(alx) && + alx_clean_rx_irq(alx, budget); + + if (!complete) + return 1; + + napi_complete(&alx->napi); + + /* enable interrupt */ + spin_lock_irqsave(&alx->irq_lock, flags); + alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + spin_unlock_irqrestore(&alx->irq_lock, flags); + + alx_post_write(hw); + + return 0; +} + +static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) +{ + struct alx_hw *hw = &alx->hw; + bool write_int_mask = false; + + spin_lock(&alx->irq_lock); + + /* ACK interrupt */ + alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS); + intr &= alx->int_mask; + + if (intr & ALX_ISR_FATAL) { + netif_warn(alx, hw, alx->dev, + "fatal interrupt 0x%x, resetting\n", intr); + alx_schedule_reset(alx); + goto out; + } + + if (intr & ALX_ISR_ALERT) + netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr); + + if (intr & ALX_ISR_PHY) { + /* suppress PHY interrupt, because the source + * is from PHY internal. only the internal status + * is cleared, the interrupt status could be cleared. + */ + alx->int_mask &= ~ALX_ISR_PHY; + write_int_mask = true; + alx_schedule_link_check(alx); + } + + if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { + napi_schedule(&alx->napi); + /* mask rx/tx interrupt, enable them when napi complete */ + alx->int_mask &= ~ALX_ISR_ALL_QUEUES; + write_int_mask = true; + } + + if (write_int_mask) + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + + alx_write_mem32(hw, ALX_ISR, 0); + + out: + spin_unlock(&alx->irq_lock); + return IRQ_HANDLED; +} + +static irqreturn_t alx_intr_msi(int irq, void *data) +{ + struct alx_priv *alx = data; + + return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR)); +} + +static irqreturn_t alx_intr_legacy(int irq, void *data) +{ + struct alx_priv *alx = data; + struct alx_hw *hw = &alx->hw; + u32 intr; + + intr = alx_read_mem32(hw, ALX_ISR); + + if (intr & ALX_ISR_DIS || !(intr & alx->int_mask)) + return IRQ_NONE; + + return alx_intr_handle(alx, intr); +} + +static void alx_init_ring_ptrs(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + u32 addr_hi = ((u64)alx->descmem.dma) >> 32; + + alx->rxq.read_idx = 0; + alx->rxq.write_idx = 0; + alx->rxq.rrd_read_idx = 0; + alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); + alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma); + alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); + alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma); + alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); + alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); + + alx->txq.read_idx = 0; + alx->txq.write_idx = 0; + alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi); + alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma); + alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz); + + /* load these pointers into the chip */ + alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); +} + +static void alx_free_txring_buf(struct alx_priv *alx) +{ + struct alx_tx_queue *txq = &alx->txq; + int i; + + if (!txq->bufs) + return; + + for (i = 0; i < alx->tx_ringsz; i++) + alx_free_txbuf(alx, i); + + memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer)); + memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd)); + txq->write_idx = 0; + txq->read_idx = 0; + + netdev_reset_queue(alx->dev); +} + +static void alx_free_rxring_buf(struct alx_priv *alx) +{ + struct alx_rx_queue *rxq = &alx->rxq; + struct alx_buffer *cur_buf; + u16 i; + + if (rxq == NULL) + return; + + for (i = 0; i < alx->rx_ringsz; i++) { + cur_buf = rxq->bufs + i; + if (cur_buf->skb) { + dma_unmap_single(&alx->hw.pdev->dev, + dma_unmap_addr(cur_buf, dma), + dma_unmap_len(cur_buf, size), + DMA_FROM_DEVICE); + dev_kfree_skb(cur_buf->skb); + cur_buf->skb = NULL; + dma_unmap_len_set(cur_buf, size, 0); + dma_unmap_addr_set(cur_buf, dma, 0); + } + } + + rxq->write_idx = 0; + rxq->read_idx = 0; + rxq->rrd_read_idx = 0; +} + +static void alx_free_buffers(struct alx_priv *alx) +{ + alx_free_txring_buf(alx); + alx_free_rxring_buf(alx); +} + +static int alx_reinit_rings(struct alx_priv *alx) +{ + alx_free_buffers(alx); + + alx_init_ring_ptrs(alx); + + if (!alx_refill_rx_ring(alx, GFP_KERNEL)) + return -ENOMEM; + + return 0; +} + +static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash) +{ + u32 crc32, bit, reg; + + crc32 = ether_crc(ETH_ALEN, addr); + reg = (crc32 >> 31) & 0x1; + bit = (crc32 >> 26) & 0x1F; + + mc_hash[reg] |= BIT(bit); +} + +static void __alx_set_rx_mode(struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + struct netdev_hw_addr *ha; + u32 mc_hash[2] = {}; + + if (!(netdev->flags & IFF_ALLMULTI)) { + netdev_for_each_mc_addr(ha, netdev) + alx_add_mc_addr(hw, ha->addr, mc_hash); + + alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]); + alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]); + } + + hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); + if (netdev->flags & IFF_PROMISC) + hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; + if (netdev->flags & IFF_ALLMULTI) + hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; + + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +static void alx_set_rx_mode(struct net_device *netdev) +{ + __alx_set_rx_mode(netdev); +} + +static int alx_set_mac_address(struct net_device *netdev, void *data) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + struct sockaddr *addr = data; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netdev->addr_assign_type & NET_ADDR_RANDOM) + netdev->addr_assign_type ^= NET_ADDR_RANDOM; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + alx_set_macaddr(hw, hw->mac_addr); + + return 0; +} + +static int alx_alloc_descriptors(struct alx_priv *alx) +{ + alx->txq.bufs = kcalloc(alx->tx_ringsz, + sizeof(struct alx_buffer), + GFP_KERNEL); + if (!alx->txq.bufs) + return -ENOMEM; + + alx->rxq.bufs = kcalloc(alx->rx_ringsz, + sizeof(struct alx_buffer), + GFP_KERNEL); + if (!alx->rxq.bufs) + goto out_free; + + /* physical tx/rx ring descriptors + * + * Allocate them as a single chunk because they must not cross a + * 4G boundary (hardware has a single register for high 32 bits + * of addresses only) + */ + alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz + + sizeof(struct alx_rrd) * alx->rx_ringsz + + sizeof(struct alx_rfd) * alx->rx_ringsz; + alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, + alx->descmem.size, + &alx->descmem.dma, + GFP_KERNEL); + if (!alx->descmem.virt) + goto out_free; + + alx->txq.tpd = (void *)alx->descmem.virt; + alx->txq.tpd_dma = alx->descmem.dma; + + /* alignment requirement for next block */ + BUILD_BUG_ON(sizeof(struct alx_txd) % 8); + + alx->rxq.rrd = + (void *)((u8 *)alx->descmem.virt + + sizeof(struct alx_txd) * alx->tx_ringsz); + alx->rxq.rrd_dma = alx->descmem.dma + + sizeof(struct alx_txd) * alx->tx_ringsz; + + /* alignment requirement for next block */ + BUILD_BUG_ON(sizeof(struct alx_rrd) % 8); + + alx->rxq.rfd = + (void *)((u8 *)alx->descmem.virt + + sizeof(struct alx_txd) * alx->tx_ringsz + + sizeof(struct alx_rrd) * alx->rx_ringsz); + alx->rxq.rfd_dma = alx->descmem.dma + + sizeof(struct alx_txd) * alx->tx_ringsz + + sizeof(struct alx_rrd) * alx->rx_ringsz; + + return 0; +out_free: + kfree(alx->txq.bufs); + kfree(alx->rxq.bufs); + return -ENOMEM; +} + +static int alx_alloc_rings(struct alx_priv *alx) +{ + int err; + + err = alx_alloc_descriptors(alx); + if (err) + return err; + + alx->int_mask &= ~ALX_ISR_ALL_QUEUES; + alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; + alx->tx_ringsz = alx->tx_ringsz; + + netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); + + alx_reinit_rings(alx); + return 0; +} + +static void alx_free_rings(struct alx_priv *alx) +{ + netif_napi_del(&alx->napi); + alx_free_buffers(alx); + + kfree(alx->txq.bufs); + kfree(alx->rxq.bufs); + + dma_free_coherent(&alx->hw.pdev->dev, + alx->descmem.size, + alx->descmem.virt, + alx->descmem.dma); +} + +static void alx_config_vector_mapping(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0); + alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0); + alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); +} + +static void alx_irq_enable(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + /* level-1 interrupt switch */ + alx_write_mem32(hw, ALX_ISR, 0); + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + alx_post_write(hw); +} + +static void alx_irq_disable(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); + alx_write_mem32(hw, ALX_IMR, 0); + alx_post_write(hw); + + synchronize_irq(alx->hw.pdev->irq); +} + +static int alx_request_irq(struct alx_priv *alx) +{ + struct pci_dev *pdev = alx->hw.pdev; + struct alx_hw *hw = &alx->hw; + int err; + u32 msi_ctrl; + + msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; + + if (!pci_enable_msi(alx->hw.pdev)) { + alx->msi = true; + + alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, + msi_ctrl | ALX_MSI_MASK_SEL_LINE); + err = request_irq(pdev->irq, alx_intr_msi, 0, + alx->dev->name, alx); + if (!err) + goto out; + /* fall back to legacy interrupt */ + pci_disable_msi(alx->hw.pdev); + } + + alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0); + err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED, + alx->dev->name, alx); +out: + if (!err) + alx_config_vector_mapping(alx); + return err; +} + +static void alx_free_irq(struct alx_priv *alx) +{ + struct pci_dev *pdev = alx->hw.pdev; + + free_irq(pdev->irq, alx); + + if (alx->msi) { + pci_disable_msi(alx->hw.pdev); + alx->msi = false; + } +} + +static int alx_identify_hw(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + int rev = alx_hw_revision(hw); + + if (rev > ALX_REV_C0) + return -EINVAL; + + hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; + + return 0; +} + +static int alx_init_sw(struct alx_priv *alx) +{ + struct pci_dev *pdev = alx->hw.pdev; + struct alx_hw *hw = &alx->hw; + int err; + + err = alx_identify_hw(alx); + if (err) { + dev_err(&pdev->dev, "unrecognized chip, aborting\n"); + return err; + } + + alx->hw.lnk_patch = + pdev->device == ALX_DEV_ID_AR8161 && + pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC && + pdev->subsystem_device == 0x0091 && + pdev->revision == 0; + + hw->smb_timer = 400; + hw->mtu = alx->dev->mtu; + alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); + alx->tx_ringsz = 256; + alx->rx_ringsz = 512; + hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY; + hw->imt = 200; + alx->int_mask = ALX_ISR_MISC; + hw->dma_chnl = hw->max_dma_chnl; + hw->ith_tpd = alx->tx_ringsz / 3; + hw->link_speed = SPEED_UNKNOWN; + hw->adv_cfg = ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_1000baseT_Full; + hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; + + hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | + ALX_MAC_CTRL_MHASH_ALG_HI5B | + ALX_MAC_CTRL_BRD_EN | + ALX_MAC_CTRL_PCRCE | + ALX_MAC_CTRL_CRCE | + ALX_MAC_CTRL_RXFC_EN | + ALX_MAC_CTRL_TXFC_EN | + 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT; + + return err; +} + + +static netdev_features_t alx_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return features; +} + +static void alx_netif_stop(struct alx_priv *alx) +{ + alx->dev->trans_start = jiffies; + if (netif_carrier_ok(alx->dev)) { + netif_carrier_off(alx->dev); + netif_tx_disable(alx->dev); + napi_disable(&alx->napi); + } +} + +static void alx_halt(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_netif_stop(alx); + hw->link_speed = SPEED_UNKNOWN; + + alx_reset_mac(hw); + + /* disable l0s/l1 */ + alx_enable_aspm(hw, false, false); + alx_irq_disable(alx); + alx_free_buffers(alx); +} + +static void alx_configure(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_configure_basic(hw); + alx_disable_rss(hw); + __alx_set_rx_mode(alx->dev); + + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +static void alx_activate(struct alx_priv *alx) +{ + /* hardware setting lost, restore it */ + alx_reinit_rings(alx); + alx_configure(alx); + + /* clear old interrupts */ + alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); + + alx_irq_enable(alx); + + alx_schedule_link_check(alx); +} + +static void alx_reinit(struct alx_priv *alx) +{ + ASSERT_RTNL(); + + alx_halt(alx); + alx_activate(alx); +} + +static int alx_change_mtu(struct net_device *netdev, int mtu) +{ + struct alx_priv *alx = netdev_priv(netdev); + int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ALX_MIN_FRAME_SIZE) || + (max_frame > ALX_MAX_FRAME_SIZE)) + return -EINVAL; + + if (netdev->mtu == mtu) + return 0; + + netdev->mtu = mtu; + alx->hw.mtu = mtu; + alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ? + ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE; + netdev_update_features(netdev); + if (netif_running(netdev)) + alx_reinit(alx); + return 0; +} + +static void alx_netif_start(struct alx_priv *alx) +{ + netif_tx_wake_all_queues(alx->dev); + napi_enable(&alx->napi); + netif_carrier_on(alx->dev); +} + +static int __alx_open(struct alx_priv *alx, bool resume) +{ + int err; + + if (!resume) + netif_carrier_off(alx->dev); + + err = alx_alloc_rings(alx); + if (err) + return err; + + alx_configure(alx); + + err = alx_request_irq(alx); + if (err) + goto out_free_rings; + + /* clear old interrupts */ + alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); + + alx_irq_enable(alx); + + if (!resume) + netif_tx_start_all_queues(alx->dev); + + alx_schedule_link_check(alx); + return 0; + +out_free_rings: + alx_free_rings(alx); + return err; +} + +static void __alx_stop(struct alx_priv *alx) +{ + alx_halt(alx); + alx_free_irq(alx); + alx_free_rings(alx); +} + +static const char *alx_speed_desc(u16 speed) +{ + switch (speed) { + case SPEED_1000 + DUPLEX_FULL: + return "1 Gbps Full"; + case SPEED_100 + DUPLEX_FULL: + return "100 Mbps Full"; + case SPEED_100 + DUPLEX_HALF: + return "100 Mbps Half"; + case SPEED_10 + DUPLEX_FULL: + return "10 Mbps Full"; + case SPEED_10 + DUPLEX_HALF: + return "10 Mbps Half"; + default: + return "Unknown speed"; + } +} + +static void alx_check_link(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + unsigned long flags; + int speed, old_speed; + int err; + + /* clear PHY internal interrupt status, otherwise the main + * interrupt status will be asserted forever + */ + alx_clear_phy_intr(hw); + + err = alx_get_phy_link(hw, &speed); + if (err < 0) + goto reset; + + spin_lock_irqsave(&alx->irq_lock, flags); + alx->int_mask |= ALX_ISR_PHY; + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + spin_unlock_irqrestore(&alx->irq_lock, flags); + + old_speed = hw->link_speed; + + if (old_speed == speed) + return; + hw->link_speed = speed; + + if (speed != SPEED_UNKNOWN) { + netif_info(alx, link, alx->dev, + "NIC Up: %s\n", alx_speed_desc(speed)); + alx_post_phy_link(hw); + alx_enable_aspm(hw, true, true); + alx_start_mac(hw); + + if (old_speed == SPEED_UNKNOWN) + alx_netif_start(alx); + } else { + /* link is now down */ + alx_netif_stop(alx); + netif_info(alx, link, alx->dev, "Link Down\n"); + err = alx_reset_mac(hw); + if (err) + goto reset; + alx_irq_disable(alx); + + /* MAC reset causes all HW settings to be lost, restore all */ + err = alx_reinit_rings(alx); + if (err) + goto reset; + alx_configure(alx); + alx_enable_aspm(hw, false, true); + alx_post_phy_link(hw); + alx_irq_enable(alx); + } + + return; + +reset: + alx_schedule_reset(alx); +} + +static int alx_open(struct net_device *netdev) +{ + return __alx_open(netdev_priv(netdev), false); +} + +static int alx_stop(struct net_device *netdev) +{ + __alx_stop(netdev_priv(netdev)); + return 0; +} + +static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct net_device *netdev = alx->dev; + struct alx_hw *hw = &alx->hw; + int err, speed; + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __alx_stop(alx); + +#ifdef CONFIG_PM_SLEEP + err = pci_save_state(pdev); + if (err) + return err; +#endif + + err = alx_select_powersaving_speed(hw, &speed); + if (err) + return err; + err = alx_clear_phy_intr(hw); + if (err) + return err; + err = alx_pre_suspend(hw, speed); + if (err) + return err; + err = alx_config_wol(hw); + if (err) + return err; + + *wol_en = false; + if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) { + netif_info(alx, wol, netdev, + "wol: ctrl=%X, speed=%X\n", + hw->sleep_ctrl, speed); + device_set_wakeup_enable(&pdev->dev, true); + *wol_en = true; + } + + pci_disable_device(pdev); + + return 0; +} + +static void alx_shutdown(struct pci_dev *pdev) +{ + int err; + bool wol_en; + + err = __alx_shutdown(pdev, &wol_en); + if (!err) { + pci_wake_from_d3(pdev, wol_en); + pci_set_power_state(pdev, PCI_D3hot); + } else { + dev_err(&pdev->dev, "shutdown fail %d\n", err); + } +} + +static void alx_link_check(struct work_struct *work) +{ + struct alx_priv *alx; + + alx = container_of(work, struct alx_priv, link_check_wk); + + rtnl_lock(); + alx_check_link(alx); + rtnl_unlock(); +} + +static void alx_reset(struct work_struct *work) +{ + struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk); + + rtnl_lock(); + alx_reinit(alx); + rtnl_unlock(); +} + +static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) +{ + u8 cso, css; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + cso = skb_checksum_start_offset(skb); + if (cso & 1) + return -EINVAL; + + css = cso + skb->csum_offset; + first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT); + first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT); + first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT); + + return 0; +} + +static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) +{ + struct alx_tx_queue *txq = &alx->txq; + struct alx_txd *tpd, *first_tpd; + dma_addr_t dma; + int maplen, f, first_idx = txq->write_idx; + + first_tpd = &txq->tpd[txq->write_idx]; + tpd = first_tpd; + + maplen = skb_headlen(skb); + dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, + DMA_TO_DEVICE); + if (dma_mapping_error(&alx->hw.pdev->dev, dma)) + goto err_dma; + + dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); + dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); + + tpd->adrl.addr = cpu_to_le64(dma); + tpd->len = cpu_to_le16(maplen); + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + + if (++txq->write_idx == alx->tx_ringsz) + txq->write_idx = 0; + tpd = &txq->tpd[txq->write_idx]; + + tpd->word1 = first_tpd->word1; + + maplen = skb_frag_size(frag); + dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0, + maplen, DMA_TO_DEVICE); + if (dma_mapping_error(&alx->hw.pdev->dev, dma)) + goto err_dma; + dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); + dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); + + tpd->adrl.addr = cpu_to_le64(dma); + tpd->len = cpu_to_le16(maplen); + } + + /* last TPD, set EOP flag and store skb */ + tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); + txq->bufs[txq->write_idx].skb = skb; + + if (++txq->write_idx == alx->tx_ringsz) + txq->write_idx = 0; + + return 0; + +err_dma: + f = first_idx; + while (f != txq->write_idx) { + alx_free_txbuf(alx, f); + if (++f == alx->tx_ringsz) + f = 0; + } + return -ENOMEM; +} + +static netdev_tx_t alx_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_tx_queue *txq = &alx->txq; + struct alx_txd *first; + int tpdreq = skb_shinfo(skb)->nr_frags + 1; + + if (alx_tpd_avail(alx) < tpdreq) { + netif_stop_queue(alx->dev); + goto drop; + } + + first = &txq->tpd[txq->write_idx]; + memset(first, 0, sizeof(*first)); + + if (alx_tx_csum(skb, first)) + goto drop; + + if (alx_map_tx_skb(alx, skb) < 0) + goto drop; + + netdev_sent_queue(alx->dev, skb->len); + + /* flush updates before updating hardware */ + wmb(); + alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx); + + if (alx_tpd_avail(alx) < alx->tx_ringsz/8) + netif_stop_queue(alx->dev); + + return NETDEV_TX_OK; + +drop: + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static void alx_tx_timeout(struct net_device *dev) +{ + struct alx_priv *alx = netdev_priv(dev); + + alx_schedule_reset(alx); +} + +static int alx_mdio_read(struct net_device *netdev, + int prtad, int devad, u16 addr) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + u16 val; + int err; + + if (prtad != hw->mdio.prtad) + return -EINVAL; + + if (devad == MDIO_DEVAD_NONE) + err = alx_read_phy_reg(hw, addr, &val); + else + err = alx_read_phy_ext(hw, devad, addr, &val); + + if (err) + return err; + return val; +} + +static int alx_mdio_write(struct net_device *netdev, + int prtad, int devad, u16 addr, u16 val) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + if (prtad != hw->mdio.prtad) + return -EINVAL; + + if (devad == MDIO_DEVAD_NONE) + return alx_write_phy_reg(hw, addr, val); + + return alx_write_phy_ext(hw, devad, addr, val); +} + +static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct alx_priv *alx = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EAGAIN; + + return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void alx_poll_controller(struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + + if (alx->msi) + alx_intr_msi(0, alx); + else + alx_intr_legacy(0, alx); +} +#endif + +static const struct net_device_ops alx_netdev_ops = { + .ndo_open = alx_open, + .ndo_stop = alx_stop, + .ndo_start_xmit = alx_start_xmit, + .ndo_set_rx_mode = alx_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = alx_set_mac_address, + .ndo_change_mtu = alx_change_mtu, + .ndo_do_ioctl = alx_ioctl, + .ndo_tx_timeout = alx_tx_timeout, + .ndo_fix_features = alx_fix_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = alx_poll_controller, +#endif +}; + +static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct alx_priv *alx; + struct alx_hw *hw; + bool phy_configured; + int bars, pm_cap, err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + /* The alx chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used for descriptors. + */ + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA config, aborting\n"); + goto out_pci_disable; + } + } + } + + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_request_selected_regions(pdev, bars, alx_drv_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed(bars:%d)\n", bars); + goto out_pci_disable; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pm_cap == 0) { + dev_err(&pdev->dev, + "Can't find power management capability, aborting\n"); + err = -EIO; + goto out_pci_release; + } + + err = pci_set_power_state(pdev, PCI_D0); + if (err) + goto out_pci_release; + + netdev = alloc_etherdev(sizeof(*alx)); + if (!netdev) { + err = -ENOMEM; + goto out_pci_release; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + alx = netdev_priv(netdev); + alx->dev = netdev; + alx->hw.pdev = pdev; + alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | + NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL; + hw = &alx->hw; + pci_set_drvdata(pdev, alx); + + hw->hw_addr = pci_ioremap_bar(pdev, 0); + if (!hw->hw_addr) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -EIO; + goto out_free_netdev; + } + + netdev->netdev_ops = &alx_netdev_ops; + SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops); + netdev->irq = pdev->irq; + netdev->watchdog_timeo = ALX_WATCHDOG_TIME; + + if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) + pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; + + err = alx_init_sw(alx); + if (err) { + dev_err(&pdev->dev, "net device private data init failed\n"); + goto out_unmap; + } + + alx_reset_pcie(hw); + + phy_configured = alx_phy_configured(hw); + + if (!phy_configured) + alx_reset_phy(hw); + + err = alx_reset_mac(hw); + if (err) { + dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); + goto out_unmap; + } + + /* setup link to put it in a known good starting state */ + if (!phy_configured) { + err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); + if (err) { + dev_err(&pdev->dev, + "failed to configure PHY speed/duplex (err=%d)\n", + err); + goto out_unmap; + } + } + + netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; + + if (alx_get_perm_macaddr(hw, hw->perm_addr)) { + dev_warn(&pdev->dev, + "Invalid permanent address programmed, using random one\n"); + eth_hw_addr_random(netdev); + memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); + } + + memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); + memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); + memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); + + hw->mdio.prtad = 0; + hw->mdio.mmds = 0; + hw->mdio.dev = netdev; + hw->mdio.mode_support = MDIO_SUPPORTS_C45 | + MDIO_SUPPORTS_C22 | + MDIO_EMULATE_C22; + hw->mdio.mdio_read = alx_mdio_read; + hw->mdio.mdio_write = alx_mdio_write; + + if (!alx_get_phy_info(hw)) { + dev_err(&pdev->dev, "failed to identify PHY\n"); + err = -EIO; + goto out_unmap; + } + + INIT_WORK(&alx->link_check_wk, alx_link_check); + INIT_WORK(&alx->reset_wk, alx_reset); + spin_lock_init(&alx->hw.mdio_lock); + spin_lock_init(&alx->irq_lock); + + netif_carrier_off(netdev); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "register netdevice failed\n"); + goto out_unmap; + } + + device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl); + + netdev_info(netdev, + "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", + netdev->dev_addr); + + return 0; + +out_unmap: + iounmap(hw->hw_addr); +out_free_netdev: + free_netdev(netdev); +out_pci_release: + pci_release_selected_regions(pdev, bars); +out_pci_disable: + pci_disable_device(pdev); + return err; +} + +static void alx_remove(struct pci_dev *pdev) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + + cancel_work_sync(&alx->link_check_wk); + cancel_work_sync(&alx->reset_wk); + + /* restore permanent mac address */ + alx_set_macaddr(hw, hw->perm_addr); + + unregister_netdev(alx->dev); + iounmap(hw->hw_addr); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + free_netdev(alx->dev); +} + +#ifdef CONFIG_PM_SLEEP +static int alx_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int err; + bool wol_en; + + err = __alx_shutdown(pdev, &wol_en); + if (err) { + dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err); + return err; + } + + if (wol_en) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + +static int alx_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct alx_priv *alx = pci_get_drvdata(pdev); + struct net_device *netdev = alx->dev; + struct alx_hw *hw = &alx->hw; + int err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + hw->link_speed = SPEED_UNKNOWN; + alx->int_mask = ALX_ISR_MISC; + + alx_reset_pcie(hw); + alx_reset_phy(hw); + + err = alx_reset_mac(hw); + if (err) { + netif_err(alx, hw, alx->dev, + "resume:reset_mac fail %d\n", err); + return -EIO; + } + + err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); + if (err) { + netif_err(alx, hw, alx->dev, + "resume:setup_speed_duplex fail %d\n", err); + return -EIO; + } + + if (netif_running(netdev)) { + err = __alx_open(alx, true); + if (err) + return err; + } + + netif_device_attach(netdev); + + return err; +} +#endif + +static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct net_device *netdev = alx->dev; + pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET; + + dev_info(&pdev->dev, "pci error detected\n"); + + rtnl_lock(); + + if (netif_running(netdev)) { + netif_device_detach(netdev); + alx_halt(alx); + } + + if (state == pci_channel_io_perm_failure) + rc = PCI_ERS_RESULT_DISCONNECT; + else + pci_disable_device(pdev); + + rtnl_unlock(); + + return rc; +} + +static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + + dev_info(&pdev->dev, "pci error slot reset\n"); + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); + goto out; + } + + pci_set_master(pdev); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + alx_reset_pcie(hw); + if (!alx_reset_mac(hw)) + rc = PCI_ERS_RESULT_RECOVERED; +out: + pci_cleanup_aer_uncorrect_error_status(pdev); + + rtnl_unlock(); + + return rc; +} + +static void alx_pci_error_resume(struct pci_dev *pdev) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct net_device *netdev = alx->dev; + + dev_info(&pdev->dev, "pci error resume\n"); + + rtnl_lock(); + + if (netif_running(netdev)) { + alx_activate(alx); + netif_device_attach(netdev); + } + + rtnl_unlock(); +} + +static const struct pci_error_handlers alx_err_handlers = { + .error_detected = alx_pci_error_detected, + .slot_reset = alx_pci_error_slot_reset, + .resume = alx_pci_error_resume, +}; + +#ifdef CONFIG_PM_SLEEP +static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); +#define ALX_PM_OPS (&alx_pm_ops) +#else +#define ALX_PM_OPS NULL +#endif + +static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = { + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) }, + {} +}; + +static struct pci_driver alx_driver = { + .name = alx_drv_name, + .id_table = alx_pci_tbl, + .probe = alx_probe, + .remove = alx_remove, + .shutdown = alx_shutdown, + .err_handler = &alx_err_handlers, + .driver.pm = ALX_PM_OPS, +}; + +module_pci_driver(alx_driver); +MODULE_DEVICE_TABLE(pci, alx_pci_tbl); +MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); +MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>"); +MODULE_DESCRIPTION( + "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h new file mode 100644 index 000000000000..e4358c98bc4e --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/reg.h @@ -0,0 +1,810 @@ +/* + * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ALX_REG_H +#define ALX_REG_H + +#define ALX_DEV_ID_AR8161 0x1091 +#define ALX_DEV_ID_E2200 0xe091 +#define ALX_DEV_ID_AR8162 0x1090 +#define ALX_DEV_ID_AR8171 0x10A1 +#define ALX_DEV_ID_AR8172 0x10A0 + +/* rev definition, + * bit(0): with xD support + * bit(1): with Card Reader function + * bit(7:2): real revision + */ +#define ALX_PCI_REVID_SHIFT 3 +#define ALX_REV_A0 0 +#define ALX_REV_A1 1 +#define ALX_REV_B0 2 +#define ALX_REV_C0 3 + +#define ALX_DEV_CTRL 0x0060 +#define ALX_DEV_CTRL_MAXRRS_MIN 2 + +#define ALX_MSIX_MASK 0x0090 + +#define ALX_UE_SVRT 0x010C +#define ALX_UE_SVRT_FCPROTERR BIT(13) +#define ALX_UE_SVRT_DLPROTERR BIT(4) + +/* eeprom & flash load register */ +#define ALX_EFLD 0x0204 +#define ALX_EFLD_F_EXIST BIT(10) +#define ALX_EFLD_E_EXIST BIT(9) +#define ALX_EFLD_STAT BIT(5) +#define ALX_EFLD_START BIT(0) + +/* eFuse load register */ +#define ALX_SLD 0x0218 +#define ALX_SLD_STAT BIT(12) +#define ALX_SLD_START BIT(11) +#define ALX_SLD_MAX_TO 100 + +#define ALX_PDLL_TRNS1 0x1104 +#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11) + +#define ALX_PMCTRL 0x12F8 +#define ALX_PMCTRL_HOTRST_WTEN BIT(31) +/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */ +#define ALX_PMCTRL_ASPM_FCEN BIT(30) +#define ALX_PMCTRL_SADLY_EN BIT(29) +#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF +#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24 +#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC +/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */ +#define ALX_PMCTRL_L1REQ_TO_MASK 0xF +#define ALX_PMCTRL_L1REQ_TO_SHIFT 20 +#define ALX_PMCTRL_L1REG_TO_DEF 0xF +#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19) +#define ALX_PMCTRL_L1_TIMER_MASK 0x7 +#define ALX_PMCTRL_L1_TIMER_SHIFT 16 +#define ALX_PMCTRL_L1_TIMER_16US 4 +#define ALX_PMCTRL_RCVR_WT_1US BIT(15) +/* bit13: enable pcie clk switch in L1 state */ +#define ALX_PMCTRL_L1_CLKSW_EN BIT(13) +#define ALX_PMCTRL_L0S_EN BIT(12) +#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11) +#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7) +/* bit6: power down serdes RX */ +#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6) +#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5) +#define ALX_PMCTRL_L1_SRDS_EN BIT(4) +#define ALX_PMCTRL_L1_EN BIT(3) + +/*******************************************************/ +/* following registers are mapped only to memory space */ +/*******************************************************/ + +#define ALX_MASTER 0x1400 +/* bit12: 1:alwys select pclk from serdes, not sw to 25M */ +#define ALX_MASTER_PCLKSEL_SRDS BIT(12) +/* bit11: irq moduration for rx */ +#define ALX_MASTER_IRQMOD2_EN BIT(11) +/* bit10: irq moduration for tx/rx */ +#define ALX_MASTER_IRQMOD1_EN BIT(10) +#define ALX_MASTER_SYSALVTIMER_EN BIT(7) +#define ALX_MASTER_OOB_DIS BIT(6) +/* bit5: wakeup without pcie clk */ +#define ALX_MASTER_WAKEN_25M BIT(5) +/* bit0: MAC & DMA reset */ +#define ALX_MASTER_DMA_MAC_RST BIT(0) +#define ALX_DMA_MAC_RST_TO 50 + +#define ALX_IRQ_MODU_TIMER 0x1408 +#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF +#define ALX_IRQ_MODU_TIMER1_SHIFT 0 + +#define ALX_PHY_CTRL 0x140C +#define ALX_PHY_CTRL_100AB_EN BIT(17) +/* bit14: affect MAC & PHY, go to low power sts */ +#define ALX_PHY_CTRL_POWER_DOWN BIT(14) +/* bit13: 1:pll always ON, 0:can switch in lpw */ +#define ALX_PHY_CTRL_PLL_ON BIT(13) +#define ALX_PHY_CTRL_RST_ANALOG BIT(12) +#define ALX_PHY_CTRL_HIB_PULSE BIT(11) +#define ALX_PHY_CTRL_HIB_EN BIT(10) +#define ALX_PHY_CTRL_IDDQ BIT(7) +#define ALX_PHY_CTRL_GATE_25M BIT(5) +#define ALX_PHY_CTRL_LED_MODE BIT(2) +/* bit0: out of dsp RST state */ +#define ALX_PHY_CTRL_DSPRST_OUT BIT(0) +#define ALX_PHY_CTRL_DSPRST_TO 80 +#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \ + ALX_PHY_CTRL_100AB_EN | \ + ALX_PHY_CTRL_PLL_ON) + +#define ALX_MAC_STS 0x1410 +#define ALX_MAC_STS_TXQ_BUSY BIT(3) +#define ALX_MAC_STS_RXQ_BUSY BIT(2) +#define ALX_MAC_STS_TXMAC_BUSY BIT(1) +#define ALX_MAC_STS_RXMAC_BUSY BIT(0) +#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \ + ALX_MAC_STS_RXQ_BUSY | \ + ALX_MAC_STS_TXMAC_BUSY | \ + ALX_MAC_STS_RXMAC_BUSY) + +#define ALX_MDIO 0x1414 +#define ALX_MDIO_MODE_EXT BIT(30) +#define ALX_MDIO_BUSY BIT(27) +#define ALX_MDIO_CLK_SEL_MASK 0x7 +#define ALX_MDIO_CLK_SEL_SHIFT 24 +#define ALX_MDIO_CLK_SEL_25MD4 0 +#define ALX_MDIO_CLK_SEL_25MD128 7 +#define ALX_MDIO_START BIT(23) +#define ALX_MDIO_SPRES_PRMBL BIT(22) +/* bit21: 1:read,0:write */ +#define ALX_MDIO_OP_READ BIT(21) +#define ALX_MDIO_REG_MASK 0x1F +#define ALX_MDIO_REG_SHIFT 16 +#define ALX_MDIO_DATA_MASK 0xFFFF +#define ALX_MDIO_DATA_SHIFT 0 +#define ALX_MDIO_MAX_AC_TO 120 + +#define ALX_MDIO_EXTN 0x1448 +#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F +#define ALX_MDIO_EXTN_DEVAD_SHIFT 16 +#define ALX_MDIO_EXTN_REG_MASK 0xFFFF +#define ALX_MDIO_EXTN_REG_SHIFT 0 + +#define ALX_SERDES 0x1424 +#define ALX_SERDES_PHYCLK_SLWDWN BIT(18) +#define ALX_SERDES_MACCLK_SLWDWN BIT(17) + +#define ALX_LPI_CTRL 0x1440 +#define ALX_LPI_CTRL_EN BIT(0) + +/* for B0+, bit[13..] for C0+ */ +#define ALX_HRTBT_EXT_CTRL 0x1AD0 +#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F +#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24 +#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13) +#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12) +#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF +#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4 +#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3) +#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2) +#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1) +#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0) + +#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4 +#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478 +#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8 +#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC +#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0 +#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4 + +/* 1B8C ~ 1B94 for C0+ */ +#define ALX_SWOI_ACER_CTRL 0x1B8C +#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20) +#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF +#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12 +#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF +#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0 + +#define ALX_SWOI_IOAC_CTRL_2 0x1B90 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0 + +#define ALX_SWOI_IOAC_CTRL_3 0x1B94 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0 + +/* for B0 */ +#define ALX_IDLE_DECISN_TIMER 0x1474 +/* 1ms */ +#define ALX_IDLE_DECISN_TIMER_DEF 0x400 + +#define ALX_MAC_CTRL 0x1480 +#define ALX_MAC_CTRL_FAST_PAUSE BIT(31) +#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30) +/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/ +#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29) +#define ALX_MAC_CTRL_BRD_EN BIT(26) +#define ALX_MAC_CTRL_MULTIALL_EN BIT(25) +#define ALX_MAC_CTRL_SPEED_MASK 0x3 +#define ALX_MAC_CTRL_SPEED_SHIFT 20 +#define ALX_MAC_CTRL_SPEED_10_100 1 +#define ALX_MAC_CTRL_SPEED_1000 2 +#define ALX_MAC_CTRL_PROMISC_EN BIT(15) +#define ALX_MAC_CTRL_VLANSTRIP BIT(14) +#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF +#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10 +#define ALX_MAC_CTRL_PCRCE BIT(7) +#define ALX_MAC_CTRL_CRCE BIT(6) +#define ALX_MAC_CTRL_FULLD BIT(5) +#define ALX_MAC_CTRL_RXFC_EN BIT(3) +#define ALX_MAC_CTRL_TXFC_EN BIT(2) +#define ALX_MAC_CTRL_RX_EN BIT(1) +#define ALX_MAC_CTRL_TX_EN BIT(0) + +#define ALX_STAD0 0x1488 +#define ALX_STAD1 0x148C + +#define ALX_HASH_TBL0 0x1490 +#define ALX_HASH_TBL1 0x1494 + +#define ALX_MTU 0x149C +#define ALX_MTU_JUMBO_TH 1514 +#define ALX_MTU_STD_ALGN 1536 + +#define ALX_SRAM5 0x1524 +#define ALX_SRAM_RXF_LEN_MASK 0xFFF +#define ALX_SRAM_RXF_LEN_SHIFT 0 +#define ALX_SRAM_RXF_LEN_8K (8*1024) + +#define ALX_SRAM9 0x1534 +#define ALX_SRAM_LOAD_PTR BIT(0) + +#define ALX_RX_BASE_ADDR_HI 0x1540 + +#define ALX_TX_BASE_ADDR_HI 0x1544 + +#define ALX_RFD_ADDR_LO 0x1550 +#define ALX_RFD_RING_SZ 0x1560 +#define ALX_RFD_BUF_SZ 0x1564 + +#define ALX_RRD_ADDR_LO 0x1568 +#define ALX_RRD_RING_SZ 0x1578 + +/* pri3: highest, pri0: lowest */ +#define ALX_TPD_PRI3_ADDR_LO 0x14E4 +#define ALX_TPD_PRI2_ADDR_LO 0x14E0 +#define ALX_TPD_PRI1_ADDR_LO 0x157C +#define ALX_TPD_PRI0_ADDR_LO 0x1580 + +/* producer index is 16bit */ +#define ALX_TPD_PRI3_PIDX 0x1618 +#define ALX_TPD_PRI2_PIDX 0x161A +#define ALX_TPD_PRI1_PIDX 0x15F0 +#define ALX_TPD_PRI0_PIDX 0x15F2 + +/* consumer index is 16bit */ +#define ALX_TPD_PRI3_CIDX 0x161C +#define ALX_TPD_PRI2_CIDX 0x161E +#define ALX_TPD_PRI1_CIDX 0x15F4 +#define ALX_TPD_PRI0_CIDX 0x15F6 + +#define ALX_TPD_RING_SZ 0x1584 + +#define ALX_TXQ0 0x1590 +#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF +#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16 +#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200 +#define ALX_TXQ0_LSO_8023_EN BIT(7) +#define ALX_TXQ0_MODE_ENHANCE BIT(6) +#define ALX_TXQ0_EN BIT(5) +#define ALX_TXQ0_SUPT_IPOPT BIT(4) +#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF +#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0 +#define ALX_TXQ_TPD_BURSTPREF_DEF 5 + +#define ALX_TXQ1 0x1594 +/* bit11: drop large packet, len > (rfd buf) */ +#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11) +#define ALX_TXQ1_JUMBO_TSO_TH (7*1024) + +#define ALX_RXQ0 0x15A0 +#define ALX_RXQ0_EN BIT(31) +#define ALX_RXQ0_RSS_HASH_EN BIT(29) +#define ALX_RXQ0_RSS_MODE_MASK 0x3 +#define ALX_RXQ0_RSS_MODE_SHIFT 26 +#define ALX_RXQ0_RSS_MODE_DIS 0 +#define ALX_RXQ0_RSS_MODE_MQMI 3 +#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F +#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20 +#define ALX_RXQ0_NUM_RFD_PREF_DEF 8 +#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF +#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8 +#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100 +#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128 +#define ALX_RXQ0_IPV6_PARSE_EN BIT(7) +#define ALX_RXQ0_RSS_HSTYP_MASK 0xF +#define ALX_RXQ0_RSS_HSTYP_SHIFT 2 +#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5) +#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4) +#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3) +#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2) +#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \ + ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \ + ALX_RXQ0_RSS_HSTYP_IPV6_EN | \ + ALX_RXQ0_RSS_HSTYP_IPV4_EN) +#define ALX_RXQ0_ASPM_THRESH_MASK 0x3 +#define ALX_RXQ0_ASPM_THRESH_SHIFT 0 +#define ALX_RXQ0_ASPM_THRESH_100M 3 + +#define ALX_RXQ2 0x15A8 +#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF +#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16 +#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF +#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0 +/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) + + * rx-packet(1522) + delay-of-link(64) + * = 3212. + */ +#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212 + +#define ALX_DMA 0x15C0 +#define ALX_DMA_RCHNL_SEL_MASK 0x3 +#define ALX_DMA_RCHNL_SEL_SHIFT 26 +#define ALX_DMA_WDLY_CNT_MASK 0xF +#define ALX_DMA_WDLY_CNT_SHIFT 16 +#define ALX_DMA_WDLY_CNT_DEF 4 +#define ALX_DMA_RDLY_CNT_MASK 0x1F +#define ALX_DMA_RDLY_CNT_SHIFT 11 +#define ALX_DMA_RDLY_CNT_DEF 15 +/* bit10: 0:tpd with pri, 1: data */ +#define ALX_DMA_RREQ_PRI_DATA BIT(10) +#define ALX_DMA_RREQ_BLEN_MASK 0x7 +#define ALX_DMA_RREQ_BLEN_SHIFT 4 +#define ALX_DMA_RORDER_MODE_MASK 0x7 +#define ALX_DMA_RORDER_MODE_SHIFT 0 +#define ALX_DMA_RORDER_MODE_OUT 4 + +#define ALX_WOL0 0x14A0 +#define ALX_WOL0_PME_LINK BIT(5) +#define ALX_WOL0_LINK_EN BIT(4) +#define ALX_WOL0_PME_MAGIC_EN BIT(3) +#define ALX_WOL0_MAGIC_EN BIT(2) + +#define ALX_RFD_PIDX 0x15E0 + +#define ALX_RFD_CIDX 0x15F8 + +/* MIB */ +#define ALX_MIB_BASE 0x1700 +#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0) +#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92) +#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96) +#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192) + +#define ALX_RX_STATS_BIN ALX_MIB_RX_OK +#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR +#define ALX_TX_STATS_BIN ALX_MIB_TX_OK +#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT + +#define ALX_ISR 0x1600 +#define ALX_ISR_DIS BIT(31) +#define ALX_ISR_RX_Q7 BIT(30) +#define ALX_ISR_RX_Q6 BIT(29) +#define ALX_ISR_RX_Q5 BIT(28) +#define ALX_ISR_RX_Q4 BIT(27) +#define ALX_ISR_PCIE_LNKDOWN BIT(26) +#define ALX_ISR_RX_Q3 BIT(19) +#define ALX_ISR_RX_Q2 BIT(18) +#define ALX_ISR_RX_Q1 BIT(17) +#define ALX_ISR_RX_Q0 BIT(16) +#define ALX_ISR_TX_Q0 BIT(15) +#define ALX_ISR_PHY BIT(12) +#define ALX_ISR_DMAW BIT(10) +#define ALX_ISR_DMAR BIT(9) +#define ALX_ISR_TXF_UR BIT(8) +#define ALX_ISR_TX_Q3 BIT(7) +#define ALX_ISR_TX_Q2 BIT(6) +#define ALX_ISR_TX_Q1 BIT(5) +#define ALX_ISR_RFD_UR BIT(4) +#define ALX_ISR_RXF_OV BIT(3) +#define ALX_ISR_MANU BIT(2) +#define ALX_ISR_TIMER BIT(1) +#define ALX_ISR_SMB BIT(0) + +#define ALX_IMR 0x1604 + +/* re-send assert msg if SW no response */ +#define ALX_INT_RETRIG 0x1608 +/* 40ms */ +#define ALX_INT_RETRIG_TO 20000 + +#define ALX_SMB_TIMER 0x15C4 + +#define ALX_TINT_TPD_THRSHLD 0x15C8 + +#define ALX_TINT_TIMER 0x15CC + +#define ALX_CLK_GATE 0x1814 +#define ALX_CLK_GATE_RXMAC BIT(5) +#define ALX_CLK_GATE_TXMAC BIT(4) +#define ALX_CLK_GATE_RXQ BIT(3) +#define ALX_CLK_GATE_TXQ BIT(2) +#define ALX_CLK_GATE_DMAR BIT(1) +#define ALX_CLK_GATE_DMAW BIT(0) +#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \ + ALX_CLK_GATE_TXMAC | \ + ALX_CLK_GATE_RXQ | \ + ALX_CLK_GATE_TXQ | \ + ALX_CLK_GATE_DMAR | \ + ALX_CLK_GATE_DMAW) + +/* interop between drivers */ +#define ALX_DRV 0x1804 +#define ALX_DRV_PHY_AUTO BIT(28) +#define ALX_DRV_PHY_1000 BIT(27) +#define ALX_DRV_PHY_100 BIT(26) +#define ALX_DRV_PHY_10 BIT(25) +#define ALX_DRV_PHY_DUPLEX BIT(24) +/* bit23: adv Pause */ +#define ALX_DRV_PHY_PAUSE BIT(23) +/* bit22: adv Asym Pause */ +#define ALX_DRV_PHY_MASK 0xFF +#define ALX_DRV_PHY_SHIFT 21 +#define ALX_DRV_PHY_UNKNOWN 0 + +/* flag of phy inited */ +#define ALX_PHY_INITED 0x003F + +/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */ +#define ALX_WOL_CTRL2 0x1830 +#define ALX_WOL_CTRL2_DATA_STORE BIT(3) +#define ALX_WOL_CTRL2_PTRN_EVT BIT(2) +#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1) +#define ALX_WOL_CTRL2_PTRN_EN BIT(0) + +#define ALX_WOL_CTRL3 0x1834 +#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF +#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0 + +#define ALX_WOL_CTRL4 0x1838 +#define ALX_WOL_CTRL4_PT15_MATCH BIT(31) +#define ALX_WOL_CTRL4_PT14_MATCH BIT(30) +#define ALX_WOL_CTRL4_PT13_MATCH BIT(29) +#define ALX_WOL_CTRL4_PT12_MATCH BIT(28) +#define ALX_WOL_CTRL4_PT11_MATCH BIT(27) +#define ALX_WOL_CTRL4_PT10_MATCH BIT(26) +#define ALX_WOL_CTRL4_PT9_MATCH BIT(25) +#define ALX_WOL_CTRL4_PT8_MATCH BIT(24) +#define ALX_WOL_CTRL4_PT7_MATCH BIT(23) +#define ALX_WOL_CTRL4_PT6_MATCH BIT(22) +#define ALX_WOL_CTRL4_PT5_MATCH BIT(21) +#define ALX_WOL_CTRL4_PT4_MATCH BIT(20) +#define ALX_WOL_CTRL4_PT3_MATCH BIT(19) +#define ALX_WOL_CTRL4_PT2_MATCH BIT(18) +#define ALX_WOL_CTRL4_PT1_MATCH BIT(17) +#define ALX_WOL_CTRL4_PT0_MATCH BIT(16) +#define ALX_WOL_CTRL4_PT15_EN BIT(15) +#define ALX_WOL_CTRL4_PT14_EN BIT(14) +#define ALX_WOL_CTRL4_PT13_EN BIT(13) +#define ALX_WOL_CTRL4_PT12_EN BIT(12) +#define ALX_WOL_CTRL4_PT11_EN BIT(11) +#define ALX_WOL_CTRL4_PT10_EN BIT(10) +#define ALX_WOL_CTRL4_PT9_EN BIT(9) +#define ALX_WOL_CTRL4_PT8_EN BIT(8) +#define ALX_WOL_CTRL4_PT7_EN BIT(7) +#define ALX_WOL_CTRL4_PT6_EN BIT(6) +#define ALX_WOL_CTRL4_PT5_EN BIT(5) +#define ALX_WOL_CTRL4_PT4_EN BIT(4) +#define ALX_WOL_CTRL4_PT3_EN BIT(3) +#define ALX_WOL_CTRL4_PT2_EN BIT(2) +#define ALX_WOL_CTRL4_PT1_EN BIT(1) +#define ALX_WOL_CTRL4_PT0_EN BIT(0) + +#define ALX_WOL_CTRL5 0x183C +#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0 + +#define ALX_WOL_CTRL6 0x1840 +#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0 + +#define ALX_WOL_CTRL7 0x1844 +#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0 + +#define ALX_WOL_CTRL8 0x1848 +#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0 + +#define ALX_ACER_FIXED_PTN0 0x1850 +#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF +#define ALX_ACER_FIXED_PTN0_SHIFT 0 + +#define ALX_ACER_FIXED_PTN1 0x1854 +#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF +#define ALX_ACER_FIXED_PTN1_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM0 0x1858 +#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM0_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM1 0x185C +#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM1_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM2 0x1860 +#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM2_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM3 0x1864 +#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM3_SHIFT 0 + +#define ALX_ACER_MAGIC 0x1868 +#define ALX_ACER_MAGIC_EN BIT(31) +#define ALX_ACER_MAGIC_PME_EN BIT(30) +#define ALX_ACER_MAGIC_MATCH BIT(29) +#define ALX_ACER_MAGIC_FF_CHECK BIT(10) +#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F +#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5 +#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F +#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0 + +#define ALX_ACER_TIMER 0x186C +#define ALX_ACER_TIMER_EN BIT(31) +#define ALX_ACER_TIMER_PME_EN BIT(30) +#define ALX_ACER_TIMER_MATCH BIT(29) +#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF +#define ALX_ACER_TIMER_THRES_SHIFT 0 +#define ALX_ACER_TIMER_THRES_DEF 1 + +/* RSS definitions */ +#define ALX_RSS_KEY0 0x14B0 +#define ALX_RSS_KEY1 0x14B4 +#define ALX_RSS_KEY2 0x14B8 +#define ALX_RSS_KEY3 0x14BC +#define ALX_RSS_KEY4 0x14C0 +#define ALX_RSS_KEY5 0x14C4 +#define ALX_RSS_KEY6 0x14C8 +#define ALX_RSS_KEY7 0x14CC +#define ALX_RSS_KEY8 0x14D0 +#define ALX_RSS_KEY9 0x14D4 + +#define ALX_RSS_IDT_TBL0 0x1B00 + +#define ALX_MSI_MAP_TBL1 0x15D0 +#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20 +#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16 +#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12 +#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8 +#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4 +#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0 + +#define ALX_MSI_MAP_TBL2 0x15D8 +#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20 +#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16 +#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12 +#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8 +#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4 +#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0 + +#define ALX_MSI_ID_MAP 0x15D4 + +#define ALX_MSI_RETRANS_TIMER 0x1920 +/* bit16: 1:line,0:standard */ +#define ALX_MSI_MASK_SEL_LINE BIT(16) +#define ALX_MSI_RETRANS_TM_MASK 0xFFFF +#define ALX_MSI_RETRANS_TM_SHIFT 0 + +/* CR DMA ctrl */ + +/* TX QoS */ +#define ALX_WRR 0x1938 +#define ALX_WRR_PRI_MASK 0x3 +#define ALX_WRR_PRI_SHIFT 29 +#define ALX_WRR_PRI_RESTRICT_NONE 3 +#define ALX_WRR_PRI3_MASK 0x1F +#define ALX_WRR_PRI3_SHIFT 24 +#define ALX_WRR_PRI2_MASK 0x1F +#define ALX_WRR_PRI2_SHIFT 16 +#define ALX_WRR_PRI1_MASK 0x1F +#define ALX_WRR_PRI1_SHIFT 8 +#define ALX_WRR_PRI0_MASK 0x1F +#define ALX_WRR_PRI0_SHIFT 0 + +#define ALX_HQTPD 0x193C +#define ALX_HQTPD_BURST_EN BIT(31) +#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF +#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8 +#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF +#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4 +#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF +#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0 + +#define ALX_MISC 0x19C0 +#define ALX_MISC_PSW_OCP_MASK 0x7 +#define ALX_MISC_PSW_OCP_SHIFT 21 +#define ALX_MISC_PSW_OCP_DEF 0x7 +#define ALX_MISC_ISO_EN BIT(12) +#define ALX_MISC_INTNLOSC_OPEN BIT(3) + +#define ALX_MSIC2 0x19C8 +#define ALX_MSIC2_CALB_START BIT(0) + +#define ALX_MISC3 0x19CC +/* bit1: 1:Software control 25M */ +#define ALX_MISC3_25M_BY_SW BIT(1) +/* bit0: 25M switch to intnl OSC */ +#define ALX_MISC3_25M_NOTO_INTNL BIT(0) + +/* MSIX tbl in memory space */ +#define ALX_MSIX_ENTRY_BASE 0x2000 + +/********************* PHY regs definition ***************************/ + +/* PHY Specific Status Register */ +#define ALX_MII_GIGA_PSSR 0x11 +#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 +#define ALX_GIGA_PSSR_DPLX 0x2000 +#define ALX_GIGA_PSSR_SPEED 0xC000 +#define ALX_GIGA_PSSR_10MBS 0x0000 +#define ALX_GIGA_PSSR_100MBS 0x4000 +#define ALX_GIGA_PSSR_1000MBS 0x8000 + +/* PHY Interrupt Enable Register */ +#define ALX_MII_IER 0x12 +#define ALX_IER_LINK_UP 0x0400 +#define ALX_IER_LINK_DOWN 0x0800 + +/* PHY Interrupt Status Register */ +#define ALX_MII_ISR 0x13 + +#define ALX_MII_DBG_ADDR 0x1D +#define ALX_MII_DBG_DATA 0x1E + +/***************************** debug port *************************************/ + +#define ALX_MIIDBG_ANACTRL 0x00 +#define ALX_ANACTRL_DEF 0x02EF + +#define ALX_MIIDBG_SYSMODCTRL 0x04 +/* en half bias */ +#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B + +#define ALX_MIIDBG_SRDSYSMOD 0x05 +#define ALX_SRDSYSMOD_DEEMP_EN 0x0040 +#define ALX_SRDSYSMOD_DEF 0x2C46 + +#define ALX_MIIDBG_HIBNEG 0x0B +#define ALX_HIBNEG_PSHIB_EN 0x8000 +#define ALX_HIBNEG_HIB_PSE 0x1000 +#define ALX_HIBNEG_DEF 0xBC40 +#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \ + ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE)) + +#define ALX_MIIDBG_TST10BTCFG 0x12 +#define ALX_TST10BTCFG_DEF 0x4C04 + +#define ALX_MIIDBG_AZ_ANADECT 0x15 +#define ALX_AZ_ANADECT_DEF 0x3220 +#define ALX_AZ_ANADECT_LONG 0x3210 + +#define ALX_MIIDBG_MSE16DB 0x18 +#define ALX_MSE16DB_UP 0x05EA +#define ALX_MSE16DB_DOWN 0x02EA + +#define ALX_MIIDBG_MSE20DB 0x1C +#define ALX_MSE20DB_TH_MASK 0x7F +#define ALX_MSE20DB_TH_SHIFT 2 +#define ALX_MSE20DB_TH_DEF 0x2E +#define ALX_MSE20DB_TH_HI 0x54 + +#define ALX_MIIDBG_AGC 0x23 +#define ALX_AGC_2_VGA_MASK 0x3FU +#define ALX_AGC_2_VGA_SHIFT 8 +#define ALX_AGC_LONG1G_LIMT 40 +#define ALX_AGC_LONG100M_LIMT 44 + +#define ALX_MIIDBG_LEGCYPS 0x29 +#define ALX_LEGCYPS_EN 0x8000 +#define ALX_LEGCYPS_DEF 0x129D + +#define ALX_MIIDBG_TST100BTCFG 0x36 +#define ALX_TST100BTCFG_DEF 0xE12C + +#define ALX_MIIDBG_GREENCFG 0x3B +#define ALX_GREENCFG_DEF 0x7078 + +#define ALX_MIIDBG_GREENCFG2 0x3D +#define ALX_GREENCFG2_BP_GREEN 0x8000 +#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080 + +/******* dev 3 *********/ +#define ALX_MIIEXT_PCS 3 + +#define ALX_MIIEXT_CLDCTRL3 0x8003 +#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000 + +#define ALX_MIIEXT_CLDCTRL5 0x8005 +#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000 + +#define ALX_MIIEXT_CLDCTRL6 0x8006 +#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF +#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0 +#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116 +#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152 + +#define ALX_MIIEXT_VDRVBIAS 0x8062 +#define ALX_VDRVBIAS_DEF 0x3 + +/********* dev 7 **********/ +#define ALX_MIIEXT_ANEG 7 + +#define ALX_MIIEXT_LOCAL_EEEADV 0x3C +#define ALX_LOCAL_EEEADV_1000BT 0x0004 +#define ALX_LOCAL_EEEADV_100BT 0x0002 + +#define ALX_MIIEXT_AFE 0x801A +#define ALX_AFE_10BT_100M_TH 0x0040 + +#define ALX_MIIEXT_S3DIG10 0x8023 +/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */ +#define ALX_MIIEXT_S3DIG10_SL 0x0001 +#define ALX_MIIEXT_S3DIG10_DEF 0 + +#define ALX_MIIEXT_NLP78 0x8027 +#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05 + +#endif diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index c777b9013164..a13463e8a2c3 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -744,6 +744,9 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) status = tg3_ape_read32(tp, gnt + off); if (status == bit) break; + if (pci_channel_offline(tp->pdev)) + break; + udelay(10); } @@ -1635,6 +1638,9 @@ static void tg3_wait_for_event_ack(struct tg3 *tp) for (i = 0; i < delay_cnt; i++) { if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) break; + if (pci_channel_offline(tp->pdev)) + break; + udelay(8); } } @@ -1813,6 +1819,9 @@ static int tg3_poll_fw(struct tg3 *tp) for (i = 0; i < 200; i++) { if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) return 0; + if (pci_channel_offline(tp->pdev)) + return -ENODEV; + udelay(100); } return -ENODEV; @@ -1823,6 +1832,15 @@ static int tg3_poll_fw(struct tg3 *tp) tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) break; + if (pci_channel_offline(tp->pdev)) { + if (!tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); + netdev_info(tp->dev, "No firmware running\n"); + } + + break; + } + udelay(10); } @@ -3520,6 +3538,8 @@ static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) break; + if (pci_channel_offline(tp->pdev)) + return -EBUSY; } return (i == iters) ? -EBUSY : 0; @@ -8589,6 +8609,14 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, boo tw32_f(ofs, val); for (i = 0; i < MAX_WAIT_CNT; i++) { + if (pci_channel_offline(tp->pdev)) { + dev_err(&tp->pdev->dev, + "tg3_stop_block device offline, " + "ofs=%lx enable_bit=%x\n", + ofs, enable_bit); + return -ENODEV; + } + udelay(100); val = tr32(ofs); if ((val & enable_bit) == 0) @@ -8612,6 +8640,13 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent) tg3_disable_ints(tp); + if (pci_channel_offline(tp->pdev)) { + tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); + tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; + err = -ENODEV; + goto err_no_dev; + } + tp->rx_mode &= ~RX_MODE_ENABLE; tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); @@ -8660,6 +8695,7 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent) err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); +err_no_dev: for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; if (tnapi->hw_status) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a667015be22a..d48099f03b7f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -516,6 +516,7 @@ fec_restart(struct net_device *ndev, int duplex) /* Set MII speed */ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); +#if !defined(CONFIG_M5272) /* set RX checksum */ val = readl(fep->hwp + FEC_RACC); if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) @@ -523,6 +524,7 @@ fec_restart(struct net_device *ndev, int duplex) else val &= ~FEC_RACC_OPTIONS; writel(val, fep->hwp + FEC_RACC); +#endif /* * The phy interface and speed need to get configured @@ -575,6 +577,7 @@ fec_restart(struct net_device *ndev, int duplex) #endif } +#if !defined(CONFIG_M5272) /* enable pause frame*/ if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && @@ -592,6 +595,7 @@ fec_restart(struct net_device *ndev, int duplex) } else { rcntl &= ~FEC_ENET_FCE; } +#endif /* !defined(CONFIG_M5272) */ writel(rcntl, fep->hwp + FEC_R_CNTRL); @@ -1205,7 +1209,9 @@ static int fec_enet_mii_probe(struct net_device *ndev) /* mask with MAC supported features */ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { phy_dev->supported &= PHY_GBIT_FEATURES; +#if !defined(CONFIG_M5272) phy_dev->supported |= SUPPORTED_Pause; +#endif } else phy_dev->supported &= PHY_BASIC_FEATURES; @@ -1390,6 +1396,8 @@ static int fec_enet_get_ts_info(struct net_device *ndev, } } +#if !defined(CONFIG_M5272) + static void fec_enet_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { @@ -1436,9 +1444,13 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, return 0; } +#endif /* !defined(CONFIG_M5272) */ + static const struct ethtool_ops fec_enet_ethtool_ops = { +#if !defined(CONFIG_M5272) .get_pauseparam = fec_enet_get_pauseparam, .set_pauseparam = fec_enet_set_pauseparam, +#endif .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, @@ -1874,10 +1886,12 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); +#if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ if (pdev->id_entry && (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; +#endif fep->hwp = devm_request_and_ioremap(&pdev->dev, r); fep->pdev = pdev; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2ad1494efbb3..d1cbfb12c1ca 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1757,7 +1757,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) memset(rxq->rx_desc_area, 0, size); rxq->rx_desc_area_size = size; - rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb), + rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), GFP_KERNEL); if (rxq->rx_skb == NULL) goto out_free; diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 339bb323cb0c..1c8af8ba08d9 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1015,7 +1015,7 @@ static int rxq_init(struct net_device *dev) int rx_desc_num = pep->rx_ring_size; /* Allocate RX skb rings */ - pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, + pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, GFP_KERNEL); if (!pep->rx_skb) return -ENOMEM; @@ -1076,7 +1076,7 @@ static int txq_init(struct net_device *dev) int size = 0, i = 0; int tx_desc_num = pep->tx_ring_size; - pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, + pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, GFP_KERNEL); if (!pep->tx_skb) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2f4a26039e80..8a434997a0df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -632,6 +632,9 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.cqe_size = 32; } + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); + slave_adjust_steering_mode(dev, &dev_cap, &hca_param); return 0; diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 921729f9c85c..91a8a5d28037 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -46,17 +46,25 @@ union mgmt_port_ring_entry { u64 d64; struct { - u64 reserved_62_63:2; +#define RING_ENTRY_CODE_DONE 0xf +#define RING_ENTRY_CODE_MORE 0x10 +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved_62_63:2; /* Length of the buffer/packet in bytes */ - u64 len:14; + u64 len:14; /* For TX, signals that the packet should be timestamped */ - u64 tstamp:1; + u64 tstamp:1; /* The RX error code */ - u64 code:7; -#define RING_ENTRY_CODE_DONE 0xf -#define RING_ENTRY_CODE_MORE 0x10 + u64 code:7; /* Physical address of the buffer */ - u64 addr:40; + u64 addr:40; +#else + u64 addr:40; + u64 code:7; + u64 tstamp:1; + u64 len:14; + u64 reserved_62_63:2; +#endif } s; }; @@ -1141,10 +1149,13 @@ static int octeon_mgmt_open(struct net_device *netdev) /* For compensation state to lock. */ ndelay(1040 * NS_PER_PHY_CLK); - /* Some Ethernet switches cannot handle standard - * Interframe Gap, increase to 16 bytes. + /* Default Interframe Gaps are too small. Recommended + * workaround is. + * + * AGL_GMX_TX_IFG[IFG1]=14 + * AGL_GMX_TX_IFG[IFG2]=10 */ - cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88); + cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); } octeon_mgmt_rx_fill_ring(netdev); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 43562c256379..6acf82b9f018 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -642,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) qlcnic_83xx_config_intrpt(adapter, 0); } /* Allow dma queues to drain after context reset */ - msleep(20); + mdelay(20); } } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5e3982fc5398..e29fe8dbd226 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -380,8 +380,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = 0x01ff009f, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | - EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, @@ -427,8 +428,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | - EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, @@ -478,8 +480,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .rmcr_value = 0x00000001, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | - EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, @@ -592,9 +595,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ - EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, .fdr_value = 0x0000072f, @@ -674,9 +677,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ - EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, @@ -811,9 +814,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ - EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, @@ -1549,11 +1552,12 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) ignore_link: if (intr_status & EESR_TWB) { - /* Write buck end. unused write back interrupt */ - if (intr_status & EESR_TABT) /* Transmit Abort int */ + /* Unused write back interrupt */ + if (intr_status & EESR_TABT) { /* Transmit Abort int */ ndev->stats.tx_aborted_errors++; if (netif_msg_tx_err(mdp)) dev_err(&ndev->dev, "Transmit Abort\n"); + } } if (intr_status & EESR_RABT) { diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 1ddc9f235bcb..62689a5823be 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -253,7 +253,7 @@ enum EESR_BIT { #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ EESR_RTO) -#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \ +#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ EESR_TFE | EESR_TDE | EESR_ECI) #define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 39e4cb39de29..4a14a940c65e 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2139,7 +2139,7 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); return sprintf(buf, "%d\n", efx->phy_type); } -static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); +static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); static int efx_register_netdev(struct efx_nic *efx) { diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7788fbe44f0a..95176979b2d2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -297,8 +297,8 @@ struct dma_features { #define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ /* Default LPI timers */ -#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 -#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 +#define STMMAC_DEFAULT_LIT_LS 0x3E8 +#define STMMAC_DEFAULT_TWT_LS 0x0 #define STMMAC_CHAIN_MODE 0x1 #define STMMAC_RING_MODE 0x2 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ee919ca8b8a0..e9eab29db7be 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -130,7 +130,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; module_param(eee_timer, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); -#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) +#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) /* By default the driver will use the ring mode to manage tx and rx descriptors * but passing this value so user can force to use the chain instead of the ring @@ -288,7 +288,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) struct stmmac_priv *priv = (struct stmmac_priv *)arg; stmmac_enable_eee_mode(priv); - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); } /** @@ -304,22 +304,34 @@ bool stmmac_eee_init(struct stmmac_priv *priv) { bool ret = false; + /* Using PCS we cannot dial with the phy registers at this stage + * so we do not support extra feature like EEE. + */ + if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) || + (priv->pcs == STMMAC_PCS_RTBI)) + goto out; + /* MAC core supports the EEE feature. */ if (priv->dma_cap.eee) { /* Check if the PHY supports EEE */ if (phy_init_eee(priv->phydev, 1)) goto out; - priv->eee_active = 1; - init_timer(&priv->eee_ctrl_timer); - priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; - priv->eee_ctrl_timer.data = (unsigned long)priv; - priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer); - add_timer(&priv->eee_ctrl_timer); - - priv->hw->mac->set_eee_timer(priv->ioaddr, - STMMAC_DEFAULT_LIT_LS_TIMER, - priv->tx_lpi_timer); + if (!priv->eee_active) { + priv->eee_active = 1; + init_timer(&priv->eee_ctrl_timer); + priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; + priv->eee_ctrl_timer.data = (unsigned long)priv; + priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); + add_timer(&priv->eee_ctrl_timer); + + priv->hw->mac->set_eee_timer(priv->ioaddr, + STMMAC_DEFAULT_LIT_LS, + priv->tx_lpi_timer); + } else + /* Set HW EEE according to the speed */ + priv->hw->mac->set_eee_pls(priv->ioaddr, + priv->phydev->link); pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); @@ -329,20 +341,6 @@ out: return ret; } -/** - * stmmac_eee_adjust: adjust HW EEE according to the speed - * @priv: driver private structure - * Description: - * When the EEE has been already initialised we have to - * modify the PLS bit in the LPI ctrl & status reg according - * to the PHY link status. For this reason. - */ -static void stmmac_eee_adjust(struct stmmac_priv *priv) -{ - if (priv->eee_enabled) - priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); -} - /* stmmac_get_tx_hwtstamp: get HW TX timestamps * @priv: driver private structure * @entry : descriptor index to be used. @@ -769,7 +767,10 @@ static void stmmac_adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); - stmmac_eee_adjust(priv); + /* At this stage, it could be needed to setup the EEE or adjust some + * MAC related HW registers. + */ + priv->eee_enabled = stmmac_eee_init(priv); spin_unlock_irqrestore(&priv->lock, flags); @@ -1277,7 +1278,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { stmmac_enable_eee_mode(priv); - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); } spin_unlock(&priv->tx_lock); } @@ -1671,14 +1672,9 @@ static int stmmac_open(struct net_device *dev) if (priv->phydev) phy_start(priv->phydev); - priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; + priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; - /* Using PCS we cannot dial with the phy registers at this stage - * so we do not support extra feature like EEE. - */ - if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && - priv->pcs != STMMAC_PCS_RTBI) - priv->eee_enabled = stmmac_eee_init(priv); + priv->eee_enabled = stmmac_eee_init(priv); stmmac_init_tx_coalesce(priv); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 21a5b291b4b3..d1a769f35f9d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1679,7 +1679,7 @@ static int cpsw_probe(struct platform_device *pdev) priv->rx_packet_max = max(rx_packet_max, 128); priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); priv->irq_enabled = true; - if (!ndev) { + if (!priv->cpts) { pr_err("error allocating cpts\n"); goto clean_ndev_ret; } @@ -1973,9 +1973,12 @@ static int cpsw_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); + struct cpsw_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) cpsw_ndo_stop(ndev); + soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset); + soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset); pm_runtime_put_sync(&pdev->dev); return 0; diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 49dfd592ac1e..053c84fd0853 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -705,6 +705,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, } buffer = dma_map_single(ctlr->dev, data, len, chan->dir); + ret = dma_mapping_error(ctlr->dev, buffer); + if (ret) { + cpdma_desc_free(ctlr->pool, desc, 1); + ret = -EINVAL; + goto unlock_ret; + } + mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; cpdma_desc_to_port(chan, mode, directed); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index ab2307b5d9a7..4dccead586be 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -285,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj, skb->protocol = eth_type_trans(skb, net); skb->ip_summed = CHECKSUM_NONE; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci); + if (packet->vlan_tci & VLAN_TAG_PRESENT) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + packet->vlan_tci); net->stats.rx_packets++; net->stats.rx_bytes += packet->total_data_buflen; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 59e9605de316..b6dd6a75919a 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -524,8 +524,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, return -EMSGSIZE; num_pages = get_user_pages_fast(base, size, 0, &page[i]); if (num_pages != size) { - for (i = 0; i < num_pages; i++) - put_page(page[i]); + int j; + + for (j = 0; j < num_pages; j++) + put_page(page[i + j]); return -EFAULT; } truesize = size * PAGE_SIZE; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index bfa9bb48e42d..9c61f8734a40 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1010,8 +1010,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, return -EMSGSIZE; num_pages = get_user_pages_fast(base, size, 0, &page[i]); if (num_pages != size) { - for (i = 0; i < num_pages; i++) - put_page(page[i]); + int j; + + for (j = 0; j < num_pages; j++) + put_page(page[i + j]); return -EFAULT; } truesize = size * PAGE_SIZE; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index d095d0d3056b..56459215a22b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -590,7 +590,13 @@ static const struct usb_device_id products[] = { {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ - {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa002)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa003)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa004)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa005)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa006)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa007)}, /* Novatel Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3b1d2ee7156b..57325f356d4f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -565,18 +565,22 @@ skip: /* Watch incoming packets to learn mapping between Ethernet address * and Tunnel endpoint. + * Return true if packet is bogus and should be droppped. */ -static void vxlan_snoop(struct net_device *dev, +static bool vxlan_snoop(struct net_device *dev, __be32 src_ip, const u8 *src_mac) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; - int err; f = vxlan_find_mac(vxlan, src_mac); if (likely(f)) { if (likely(f->remote.remote_ip == src_ip)) - return; + return false; + + /* Don't migrate static entries, drop packets */ + if (f->state & NUD_NOARP) + return true; if (net_ratelimit()) netdev_info(dev, @@ -588,14 +592,19 @@ static void vxlan_snoop(struct net_device *dev, } else { /* learned new entry */ spin_lock(&vxlan->hash_lock); - err = vxlan_fdb_create(vxlan, src_mac, src_ip, - NUD_REACHABLE, - NLM_F_EXCL|NLM_F_CREATE, - vxlan->dst_port, - vxlan->default_dst.remote_vni, - 0, NTF_SELF); + + /* close off race between vxlan_flush and incoming packets */ + if (netif_running(dev)) + vxlan_fdb_create(vxlan, src_mac, src_ip, + NUD_REACHABLE, + NLM_F_EXCL|NLM_F_CREATE, + vxlan->dst_port, + vxlan->default_dst.remote_vni, + 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); } + + return false; } @@ -727,8 +736,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) vxlan->dev->dev_addr) == 0) goto drop; - if (vxlan->flags & VXLAN_F_LEARN) - vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source); + if ((vxlan->flags & VXLAN_F_LEARN) && + vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source)) + goto drop; __skb_tunnel_rx(skb, vxlan->dev); skb_reset_network_header(skb); @@ -1151,9 +1161,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) struct sk_buff *skb1; skb1 = skb_clone(skb, GFP_ATOMIC); - rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); - if (rc == NETDEV_TX_OK) - rc = rc1; + if (skb1) { + rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); + if (rc == NETDEV_TX_OK) + rc = rc1; + } } rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc); diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 147614ed86aa..6a8a382c5f4c 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -384,21 +384,37 @@ static int dlci_del(struct dlci_add *dlci) struct frad_local *flp; struct net_device *master, *slave; int err; + bool found = false; + + rtnl_lock(); /* validate slave device */ master = __dev_get_by_name(&init_net, dlci->devname); - if (!master) - return -ENODEV; + if (!master) { + err = -ENODEV; + goto out; + } + + list_for_each_entry(dlp, &dlci_devs, list) { + if (dlp->master == master) { + found = true; + break; + } + } + if (!found) { + err = -ENODEV; + goto out; + } if (netif_running(master)) { - return -EBUSY; + err = -EBUSY; + goto out; } dlp = netdev_priv(master); slave = dlp->slave; flp = netdev_priv(slave); - rtnl_lock(); err = (*flp->deassoc)(slave, master); if (!err) { list_del(&dlp->list); @@ -407,8 +423,8 @@ static int dlci_del(struct dlci_add *dlci) dev_put(slave); } +out: rtnl_unlock(); - return err; } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 0743a47cef8f..62f1b7636c92 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1174,7 +1174,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&priv->htc_pm_lock); priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); - if (priv->ps_idle) + if (!priv->ps_idle) chip_reset = true; mutex_unlock(&priv->htc_pm_lock); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 1c9b1bac8b0d..83ab6be3fe6d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1570,6 +1570,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) return; + rcu_read_lock(); + ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); @@ -1608,8 +1610,10 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) if (ac == last_ac || txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) - return; + break; } + + rcu_read_unlock(); } /***********/ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index b98f2235978e..2c593570497c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -930,6 +930,10 @@ fail: brcmf_fws_del_interface(ifp); brcmf_fws_deinit(drvr); } + if (drvr->iflist[0]) { + free_netdev(ifp->ndev); + drvr->iflist[0] = NULL; + } if (p2p_ifp) { free_netdev(p2p_ifp->ndev); drvr->iflist[1] = NULL; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 28e7aeedd184..9fd6f2fef11b 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -3074,21 +3074,8 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail) */ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) { - /* disallow PS when one of the following global conditions meets */ - if (!wlc->pub->associated) - return false; - - /* disallow PS when one of these meets when not scanning */ - if (wlc->filter_flags & FIF_PROMISC_IN_BSS) - return false; - - if (wlc->bsscfg->type == BRCMS_TYPE_AP) - return false; - - if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC) - return false; - - return true; + /* not supporting PS so always return false for now */ + return false; } static void brcms_c_statsupd(struct brcms_c_info *wlc) diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c index c9f197d9ca1e..fe31590a51b2 100644 --- a/drivers/net/wireless/iwlegacy/3945-rs.c +++ b/drivers/net/wireless/iwlegacy/3945-rs.c @@ -816,6 +816,7 @@ out: rs_sta->last_txrate_idx = idx; info->control.rates[0].idx = rs_sta->last_txrate_idx; } + info->control.rates[0].count = 1; D_RATE("leave: %d\n", idx); } diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index 1fc0b227e120..ed3c42a63a43 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c @@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; - + info->control.rates[0].count = 1; } static void * diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index 907bd6e50aad..10fbb176cc8e 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; - + info->control.rates[0].count = 1; } static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 707446fa00bd..cd1ad0019185 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv) struct iwl_chain_noise_data *data = &priv->chain_noise_data; int ret; - if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)) + if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED) return; if ((data->state == IWL_CHAIN_NOISE_ALIVE) && diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 39aad9893e0b..40fed1f511e2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -1000,10 +1000,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) */ if (load_module) { err = request_module("%s", op->name); +#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR if (err) IWL_ERR(drv, "failed to load module %s (error %d), is dynamic loading enabled?\n", op->name, err); +#endif } return; diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 55334d542e26..b99fe3163866 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -2546,6 +2546,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; + info->control.rates[0].count = 1; } static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index f212f16502ff..48c1891e3df6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -180,7 +180,8 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; } else if (ieee80211_is_back_req(fc)) { - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); + tx_cmd->tx_flags |= + cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); } /* HT rate doesn't make sense for a non data frame */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index b52d70c75e1a..72f32e5caa4d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3027,19 +3027,26 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, * TODO: we do not use +6 dBm option to do not increase power beyond * regulatory limit, however this could be utilized for devices with * CAPABILITY_POWER_LIMIT. + * + * TODO: add different temperature compensation code for RT3290 & RT5390 + * to allow to use BBP_R1 for those chips. */ - rt2800_bbp_read(rt2x00dev, 1, &r1); - if (delta <= -12) { - power_ctrl = 2; - delta += 12; - } else if (delta <= -6) { - power_ctrl = 1; - delta += 6; - } else { - power_ctrl = 0; + if (!rt2x00_rt(rt2x00dev, RT3290) && + !rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_bbp_read(rt2x00dev, 1, &r1); + if (delta <= -12) { + power_ctrl = 2; + delta += 12; + } else if (delta <= -6) { + power_ctrl = 1; + delta += 6; + } else { + power_ctrl = 0; + } + rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); + rt2800_bbp_write(rt2x00dev, 1, r1); } - rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); - rt2800_bbp_write(rt2x00dev, 1, r1); + offset = TX_PWR_CFG_0; for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 9544cdc0d1af..e79e006eb9ab 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -811,6 +811,70 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev) return pcidev->irq; } +static struct iosapic_info *first_isi = NULL; + +#ifdef CONFIG_64BIT +int iosapic_serial_irq(int num) +{ + struct iosapic_info *isi = first_isi; + struct irt_entry *irte = NULL; /* only used if PAT PDC */ + struct vector_info *vi; + int isi_line; /* line used by device */ + + /* lookup IRT entry for isi/slot/pin set */ + irte = &irt_cell[num]; + + DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n", + irte, + irte->entry_type, + irte->entry_length, + irte->polarity_trigger, + irte->src_bus_irq_devno, + irte->src_bus_id, + irte->src_seg_id, + irte->dest_iosapic_intin, + (u32) irte->dest_iosapic_addr); + isi_line = irte->dest_iosapic_intin; + + /* get vector info for this input line */ + vi = isi->isi_vector + isi_line; + DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi); + + /* If this IRQ line has already been setup, skip it */ + if (vi->irte) + goto out; + + vi->irte = irte; + + /* + * Allocate processor IRQ + * + * XXX/FIXME The txn_alloc_irq() code and related code should be + * moved to enable_irq(). That way we only allocate processor IRQ + * bits for devices that actually have drivers claiming them. + * Right now we assign an IRQ to every PCI device present, + * regardless of whether it's used or not. + */ + vi->txn_irq = txn_alloc_irq(8); + + if (vi->txn_irq < 0) + panic("I/O sapic: couldn't get TXN IRQ\n"); + + /* enable_irq() will use txn_* to program IRdT */ + vi->txn_addr = txn_alloc_addr(vi->txn_irq); + vi->txn_data = txn_alloc_data(vi->txn_irq); + + vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI; + vi->eoi_data = cpu_to_le32(vi->txn_data); + + cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi); + + out: + + return vi->txn_irq; +} +#endif + /* ** squirrel away the I/O Sapic Version @@ -877,6 +941,8 @@ void *iosapic_register(unsigned long hpa) vip->irqline = (unsigned char) cnt; vip->iosapic = isi; } + if (!first_isi) + first_isi = isi; return isi; } diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 716aa93fff76..59df8575a48c 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -61,6 +61,7 @@ static DEFINE_MUTEX(bridge_mutex); static void handle_hotplug_event_bridge (acpi_handle, u32, void *); static void acpiphp_sanitize_bus(struct pci_bus *bus); static void acpiphp_set_hpp_values(struct pci_bus *bus); +static void hotplug_event_func(acpi_handle handle, u32 type, void *context); static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); static void free_bridge(struct kref *kref); @@ -147,7 +148,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, static const struct acpi_dock_ops acpiphp_dock_ops = { - .handler = handle_hotplug_event_func, + .handler = hotplug_event_func, }; /* Check whether the PCI device is managed by native PCIe hotplug driver */ @@ -179,6 +180,20 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev) return true; } +static void acpiphp_dock_init(void *data) +{ + struct acpiphp_func *func = data; + + get_bridge(func->slot->bridge); +} + +static void acpiphp_dock_release(void *data) +{ + struct acpiphp_func *func = data; + + put_bridge(func->slot->bridge); +} + /* callback routine to register each ACPI PCI slot object */ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) @@ -298,7 +313,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) */ newfunc->flags &= ~FUNC_HAS_EJ0; if (register_hotplug_dock_device(handle, - &acpiphp_dock_ops, newfunc)) + &acpiphp_dock_ops, newfunc, + acpiphp_dock_init, acpiphp_dock_release)) dbg("failed to register dock device\n"); /* we need to be notified when dock events happen @@ -670,6 +686,7 @@ static int __ref enable_device(struct acpiphp_slot *slot) struct pci_bus *bus = slot->bridge->pci_bus; struct acpiphp_func *func; int num, max, pass; + LIST_HEAD(add_list); if (slot->flags & SLOT_ENABLED) goto err_exit; @@ -694,13 +711,15 @@ static int __ref enable_device(struct acpiphp_slot *slot) max = pci_scan_bridge(bus, dev, max, pass); if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); - pci_bus_size_bridges(dev->subordinate); + pcibios_resource_survey_bus(dev->subordinate); + __pci_bus_size_bridges(dev->subordinate, + &add_list); } } } } - pci_bus_assign_resources(bus); + __pci_bus_assign_resources(bus, &add_list, NULL); acpiphp_sanitize_bus(bus); acpiphp_set_hpp_values(bus); acpiphp_set_acpi_region(slot); @@ -1065,22 +1084,12 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge); } -static void _handle_hotplug_event_func(struct work_struct *work) +static void hotplug_event_func(acpi_handle handle, u32 type, void *context) { - struct acpiphp_func *func; + struct acpiphp_func *func = context; char objname[64]; struct acpi_buffer buffer = { .length = sizeof(objname), .pointer = objname }; - struct acpi_hp_work *hp_work; - acpi_handle handle; - u32 type; - - hp_work = container_of(work, struct acpi_hp_work, work); - handle = hp_work->handle; - type = hp_work->type; - func = (struct acpiphp_func *)hp_work->context; - - acpi_scan_lock_acquire(); acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); @@ -1113,6 +1122,18 @@ static void _handle_hotplug_event_func(struct work_struct *work) warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); break; } +} + +static void _handle_hotplug_event_func(struct work_struct *work) +{ + struct acpi_hp_work *hp_work; + struct acpiphp_func *func; + + hp_work = container_of(work, struct acpi_hp_work, work); + func = hp_work->context; + acpi_scan_lock_acquire(); + + hotplug_event_func(hp_work->handle, hp_work->type, func); acpi_scan_lock_release(); kfree(hp_work); /* allocated in handle_hotplug_event_func */ diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 68678ed76b0d..d1182c4a754e 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -202,6 +202,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int reg); int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); void pci_configure_ari(struct pci_dev *dev); +void __ref __pci_bus_size_bridges(struct pci_bus *bus, + struct list_head *realloc_head); +void __ref __pci_bus_assign_resources(const struct pci_bus *bus, + struct list_head *realloc_head, + struct list_head *fail_head); /** * pci_ari_enabled - query ARI forwarding status diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 16abaaa1f83c..d254e2379533 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1044,7 +1044,7 @@ handle_done: ; } -static void __ref __pci_bus_size_bridges(struct pci_bus *bus, +void __ref __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) { struct pci_dev *dev; @@ -1115,9 +1115,9 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) } EXPORT_SYMBOL(pci_bus_size_bridges); -static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, - struct list_head *realloc_head, - struct list_head *fail_head) +void __ref __pci_bus_assign_resources(const struct pci_bus *bus, + struct list_head *realloc_head, + struct list_head *fail_head) { struct pci_bus *b; struct pci_dev *dev; diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c index d8fa37d5c734..2c9155b66f09 100644 --- a/drivers/regulator/tps6586x-regulator.c +++ b/drivers/regulator/tps6586x-regulator.c @@ -439,7 +439,7 @@ static int tps6586x_regulator_remove(struct platform_device *pdev) static struct platform_driver tps6586x_regulator_driver = { .driver = { - .name = "tps6586x-pmic", + .name = "tps6586x-regulator", .owner = THIS_MODULE, }, .probe = tps6586x_regulator_probe, diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 292b24f9bf93..32ae6c67ea3a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1656,9 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { - skb->vlan_tci = VLAN_TAG_PRESENT | - vlan_dev_vlan_id(fcoe->netdev); + /* must set skb->dev before calling vlan_put_tag */ skb->dev = fcoe->realdev; + skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_dev_vlan_id(fcoe->netdev)); + if (!skb) + return -ENOMEM; } else skb->dev = fcoe->netdev; diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index cd743c545ce9..795843dde8ec 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -1548,9 +1548,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; struct fcoe_fcf *best = fip->sel_fcf; - struct fcoe_fcf *first; - - first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list); list_for_each_entry(fcf, &fip->fcfs, list) { LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " @@ -1568,17 +1565,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) "" : "un"); continue; } - if (fcf->fabric_name != first->fabric_name || - fcf->vfid != first->vfid || - fcf->fc_map != first->fc_map) { + if (!best || fcf->pri < best->pri || best->flogi_sent) + best = fcf; + if (fcf->fabric_name != best->fabric_name || + fcf->vfid != best->vfid || + fcf->fc_map != best->fc_map) { LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " "or FC-MAP\n"); return NULL; } - if (fcf->flogi_sent) - continue; - if (!best || fcf->pri < best->pri || best->flogi_sent) - best = fcf; } fip->sel_fcf = best; if (best) { diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 82a3c1ec8706..6c4cedb44c07 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -8980,19 +8980,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) if (!ioa_cfg->res_entries) goto out; - if (ioa_cfg->sis64) { - ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) * - BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); - ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) * - BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); - ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) * - BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); - - if (!ioa_cfg->target_ids || !ioa_cfg->array_ids - || !ioa_cfg->vset_ids) - goto out_free_res_entries; - } - for (i = 0; i < ioa_cfg->max_devs_supported; i++) { list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; @@ -9089,9 +9076,6 @@ out_free_vpd_cbs: ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); out_free_res_entries: kfree(ioa_cfg->res_entries); - kfree(ioa_cfg->target_ids); - kfree(ioa_cfg->array_ids); - kfree(ioa_cfg->vset_ids); goto out; } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index a1fb840596ef..07a85ce41782 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1440,9 +1440,9 @@ struct ipr_ioa_cfg { /* * Bitmaps for SIS64 generated target values */ - unsigned long *target_ids; - unsigned long *array_ids; - unsigned long *vset_ids; + unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; + unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; + unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; u16 type; /* CCIN of the card */ diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index c772d8d27159..8b928c67e4b9 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -463,13 +463,7 @@ static void fc_exch_delete(struct fc_exch *ep) fc_exch_release(ep); /* drop hold for exch in mp */ } -/** - * fc_seq_send() - Send a frame using existing sequence/exchange pair - * @lport: The local port that the exchange will be sent on - * @sp: The sequence to be sent - * @fp: The frame to be sent on the exchange - */ -static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, +static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) { struct fc_exch *ep; @@ -479,7 +473,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, u8 fh_type = fh->fh_type; ep = fc_seq_exch(sp); - WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); + WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT)); f_ctl = ntoh24(fh->fh_f_ctl); fc_exch_setup_hdr(ep, fp, f_ctl); @@ -502,17 +496,34 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, error = lport->tt.frame_send(lport, fp); if (fh_type == FC_TYPE_BLS) - return error; + goto out; /* * Update the exchange and sequence flags, * assuming all frames for the sequence have been sent. * We can only be called to send once for each sequence. */ - spin_lock_bh(&ep->ex_lock); ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ if (f_ctl & FC_FC_SEQ_INIT) ep->esb_stat &= ~ESB_ST_SEQ_INIT; +out: + return error; +} + +/** + * fc_seq_send() - Send a frame using existing sequence/exchange pair + * @lport: The local port that the exchange will be sent on + * @sp: The sequence to be sent + * @fp: The frame to be sent on the exchange + */ +static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, + struct fc_frame *fp) +{ + struct fc_exch *ep; + int error; + ep = fc_seq_exch(sp); + spin_lock_bh(&ep->ex_lock); + error = fc_seq_send_locked(lport, sp, fp); spin_unlock_bh(&ep->ex_lock); return error; } @@ -629,7 +640,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep, if (fp) { fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - error = fc_seq_send(ep->lp, sp, fp); + error = fc_seq_send_locked(ep->lp, sp, fp); } else error = -ENOBUFS; return error; @@ -1132,7 +1143,7 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; f_ctl |= ep->f_ctl; fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); - fc_seq_send(ep->lp, sp, fp); + fc_seq_send_locked(ep->lp, sp, fp); } /** @@ -1307,8 +1318,8 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) ap->ba_low_seq_cnt = htons(sp->cnt); } sp = fc_seq_start_next_locked(sp); - spin_unlock_bh(&ep->ex_lock); fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); + spin_unlock_bh(&ep->ex_lock); fc_frame_free(rx_fp); return; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index d518d17e940f..6bbb9447b75d 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1962,7 +1962,7 @@ static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len, rdata->flags |= FC_RP_FLAGS_RETRY; rdata->supported_classes = FC_COS_CLASS3; - if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR)) + if (!(lport->service_params & FCP_SPPF_INIT_FCN)) return 0; spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR; diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 98ab921070d2..0a5c8951cebb 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -278,3 +278,14 @@ qla2x00_do_host_ramp_up(scsi_qla_host_t *vha) set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags); } + +static inline void +qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status) +{ + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } +} diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 259d9205d876..d2a4c75e5b8f 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -104,14 +104,9 @@ qla2100_intr_handler(int irq, void *dev_id) RD_REG_WORD(®->hccr); } } + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } - return (IRQ_HANDLED); } @@ -221,14 +216,9 @@ qla2300_intr_handler(int irq, void *dev_id) WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD_RELAXED(®->hccr); } + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } - return (IRQ_HANDLED); } @@ -2613,14 +2603,9 @@ qla24xx_intr_handler(int irq, void *dev_id) if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) ndelay(3500); } + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } - return IRQ_HANDLED; } @@ -2763,13 +2748,9 @@ qla24xx_msix_default(int irq, void *dev_id) } WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); } while (0); + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } return IRQ_HANDLED; } diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 9e5d89db7272..3587ec267fa6 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -179,8 +179,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); - clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - } else { ql_dbg(ql_dbg_mbx, vha, 0x1011, "Cmd=%x Polling Mode.\n", command); diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 937fed8cb038..a6df55838365 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -148,9 +148,6 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); - - clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - } else { ql_dbg(ql_dbg_mbx, vha, 0x112c, "Cmd=%x Polling Mode.\n", command); @@ -2934,13 +2931,10 @@ qlafx00_intr_handler(int irq, void *dev_id) QLAFX00_CLR_INTR_REG(ha, clr_intr); QLAFX00_RD_INTR_REG(ha); } + + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } return IRQ_HANDLED; } diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 10754f518303..cce0cd0d7ec4 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2074,9 +2074,6 @@ qla82xx_intr_handler(int irq, void *dev_id) } WRT_REG_DWORD(®->host_int, 0); } - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (!ha->flags.msi_enabled) - qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); #ifdef QL_DEBUG_LEVEL_17 if (!irq && ha->flags.eeh_busy) @@ -2085,11 +2082,12 @@ qla82xx_intr_handler(int irq, void *dev_id) status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); #endif - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (!ha->flags.msi_enabled) + qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); + return IRQ_HANDLED; } @@ -2149,8 +2147,6 @@ qla82xx_msix_default(int irq, void *dev_id) WRT_REG_DWORD(®->host_int, 0); } while (0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - #ifdef QL_DEBUG_LEVEL_17 if (!irq && ha->flags.eeh_busy) ql_log(ql_log_warn, vha, 0x5044, @@ -2158,11 +2154,9 @@ qla82xx_msix_default(int irq, void *dev_id) status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); #endif - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return IRQ_HANDLED; } @@ -3345,7 +3339,7 @@ void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) ha->flags.mbox_busy = 0; ql_log(ql_log_warn, vha, 0x6010, "Doing premature completion of mbx command.\n"); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) + if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) complete(&ha->mbx_intr_comp); } } diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 7a3870f385f6..66b0b26a1381 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -688,8 +688,12 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen * for qla_tgt_xmit_response LLD code */ + if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; + se_cmd->residual_count = 0; + } se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; - se_cmd->residual_count = se_cmd->data_length; + se_cmd->residual_count += se_cmd->data_length; cmd->bufflen = 0; } diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index c735c5a008a2..6427600b5bbe 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, int ret; sg_free_table(sgt); - ret = sg_alloc_table(sgt, nents, GFP_KERNEL); + ret = sg_alloc_table(sgt, nents, GFP_ATOMIC); if (ret) return ret; } diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index f5d84d6f8222..48b396fced0a 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) return NULL; - pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(&pdev->dev, "failed to allocate memory for platform data\n"); diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 5000586cb98d..71cc3e6ef47c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) } ret = pm_runtime_get_sync(&sdd->pdev->dev); - if (ret != 0) { + if (ret < 0) { dev_err(dev, "Failed to enable device: %d\n", ret); goto out_tx; } diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig index 2e4a28b018e8..12f321dd2399 100644 --- a/drivers/staging/media/davinci_vpfe/Kconfig +++ b/drivers/staging/media/davinci_vpfe/Kconfig @@ -1,6 +1,6 @@ config VIDEO_DM365_VPFE tristate "DM365 VPFE Media Controller Capture Driver" - depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE + depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF select VIDEOBUF2_DMA_CONTIG help Support for DM365 VPFE based Media Controller Capture driver. diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c index b88e1ddce229..d8ce20d2fbda 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c @@ -639,7 +639,8 @@ static int vpfe_probe(struct platform_device *pdev) if (ret) goto probe_free_dev_mem; - if (vpfe_initialize_modules(vpfe_dev, pdev)) + ret = vpfe_initialize_modules(vpfe_dev, pdev); + if (ret) goto probe_disable_clock; vpfe_dev->media_dev.dev = vpfe_dev->pdev; @@ -663,7 +664,8 @@ static int vpfe_probe(struct platform_device *pdev) /* set the driver data in platform device */ platform_set_drvdata(pdev, vpfe_dev); /* register subdevs/entities */ - if (vpfe_register_entities(vpfe_dev)) + ret = vpfe_register_entities(vpfe_dev); + if (ret) goto probe_out_v4l2_unregister; ret = vpfe_attach_irq(vpfe_dev); diff --git a/drivers/staging/media/solo6x10/Kconfig b/drivers/staging/media/solo6x10/Kconfig index df6569b997b8..34f3b6d02d2a 100644 --- a/drivers/staging/media/solo6x10/Kconfig +++ b/drivers/staging/media/solo6x10/Kconfig @@ -5,6 +5,7 @@ config SOLO6X10 select VIDEOBUF2_DMA_SG select VIDEOBUF2_DMA_CONTIG select SND_PCM + select FONT_8x16 ---help--- This driver supports the Softlogic based MPEG-4 and h.264 codec cards. diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 13e9e715ad2e..8d8b3ff68490 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -155,7 +155,7 @@ static ssize_t lio_target_np_store_iser( struct iscsi_tpg_np *tpg_np_iser = NULL; char *endptr; u32 op; - int rc; + int rc = 0; op = simple_strtoul(page, &endptr, 0); if ((op != 1) && (op != 0)) { @@ -174,31 +174,32 @@ static ssize_t lio_target_np_store_iser( return -EINVAL; if (op) { - int rc = request_module("ib_isert"); - if (rc != 0) + rc = request_module("ib_isert"); + if (rc != 0) { pr_warn("Unable to request_module for ib_isert\n"); + rc = 0; + } tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, np->np_ip, tpg_np, ISCSI_INFINIBAND); - if (!tpg_np_iser || IS_ERR(tpg_np_iser)) + if (IS_ERR(tpg_np_iser)) { + rc = PTR_ERR(tpg_np_iser); goto out; + } } else { tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); - if (!tpg_np_iser) - goto out; - - rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); - if (rc < 0) - goto out; + if (tpg_np_iser) { + rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); + if (rc < 0) + goto out; + } } - printk("lio_target_np_store_iser() done, op: %d\n", op); - iscsit_put_tpg(tpg); return count; out: iscsit_put_tpg(tpg); - return -EINVAL; + return rc; } TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 8e6298cc8839..dcb199da06b9 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -842,11 +842,11 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess) return 0; sess->time2retain_timer_flags |= ISCSI_TF_STOP; - spin_unlock_bh(&se_tpg->session_lock); + spin_unlock(&se_tpg->session_lock); del_timer_sync(&sess->time2retain_timer); - spin_lock_bh(&se_tpg->session_lock); + spin_lock(&se_tpg->session_lock); sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; pr_debug("Stopped Time2Retain Timer for SID: %u\n", sess->sid); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index bb5d5c5bce65..3402241be87c 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -984,8 +984,6 @@ int iscsi_target_setup_login_socket( } np->np_transport = t; - printk("Set np->np_transport to %p -> %s\n", np->np_transport, - np->np_transport->name); return 0; } @@ -1002,7 +1000,6 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) conn->sock = new_sock; conn->login_family = np->np_sockaddr.ss_family; - printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock); if (np->np_sockaddr.ss_family == AF_INET6) { memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7ad912060e21..cd5018ff9cd7 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -721,9 +721,6 @@ int iscsi_target_locate_portal( start += strlen(key) + strlen(value) + 2; } - - printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf); - /* * See 5.3. Login Phase. */ diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 59bfaecc4e14..abfd99089781 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -244,14 +244,9 @@ static void pty_flush_buffer(struct tty_struct *tty) static int pty_open(struct tty_struct *tty, struct file *filp) { - int retval = -ENODEV; - if (!tty || !tty->link) - goto out; - - set_bit(TTY_IO_ERROR, &tty->flags); + return -ENODEV; - retval = -EIO; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) goto out; if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) @@ -262,9 +257,11 @@ static int pty_open(struct tty_struct *tty, struct file *filp) clear_bit(TTY_IO_ERROR, &tty->flags); clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); set_bit(TTY_THROTTLED, &tty->flags); - retval = 0; + return 0; + out: - return retval; + set_bit(TTY_IO_ERROR, &tty->flags); + return -EIO; } static void pty_set_termios(struct tty_struct *tty, diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 097dff9c08ad..bb91b4713ebd 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -30,6 +30,12 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; +#ifdef CONFIG_64BIT + extern int iosapic_serial_irq(int cellnum); + if (!dev->irq && (dev->id.sversion == 0xad)) + dev->irq = iosapic_serial_irq(dev->mod_index-1); +#endif + if (!dev->irq) { /* We find some unattached serial ports by walking native * busses. These should be silently ignored. Otherwise, @@ -51,7 +57,8 @@ static int __init serial_init_chip(struct parisc_device *dev) memset(&uart, 0, sizeof(uart)); uart.port.iotype = UPIO_MEM; /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */ - uart.port.uartclk = 7272727; + uart.port.uartclk = (dev->id.sversion != 0xad) ? + 7272727 : 1843200; uart.port.mapbase = address; uart.port.membase = ioremap_nocache(address, 16); uart.port.irq = dev->irq; @@ -73,6 +80,7 @@ static struct parisc_device_id serial_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad }, { 0 } }; diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index fc2c06c66e89..2bd78e2ac8ec 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -289,13 +289,10 @@ static int vt_disallocate(unsigned int vc_num) struct vc_data *vc = NULL; int ret = 0; - if (!vc_num) - return 0; - console_lock(); if (VT_BUSY(vc_num)) ret = -EBUSY; - else + else if (vc_num) vc = vc_deallocate(vc_num); console_unlock(); diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 7ef3eb8617a6..2311b1e4e43c 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -4,11 +4,17 @@ menuconfig USB_PHY bool "USB Physical Layer drivers" help - USB controllers (those which are host, device or DRD) need a - device to handle the physical layer signalling, commonly called - a PHY. + Most USB controllers have the physical layer signalling part + (commonly called a PHY) built in. However, dual-role devices + (a.k.a. USB on-the-go) which support being USB master or slave + with the same connector often use an external PHY. - The following drivers add support for such PHY devices. + The drivers in this submenu add support for such PHY devices. + They are not needed for standard master-only (or the vast + majority of slave-only) USB interfaces. + + If you're not sure if this applies to you, it probably doesn't; + say N here. if USB_PHY diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index c92c5ed4e580..e581c2549a57 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -172,7 +172,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, - { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, }; diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index b353e7e3d480..4a2423e84d55 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h @@ -52,7 +52,9 @@ /* Abbott Diabetics vendor and product ids */ #define ABBOTT_VENDOR_ID 0x1a61 -#define ABBOTT_PRODUCT_ID 0x3410 +#define ABBOTT_STEREO_PLUG_ID 0x3410 +#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID +#define ABBOTT_STRIP_PORT_ID 0x3420 /* Commands */ #define TI_GET_VERSION 0x01 |