diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-08-14 12:14:12 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-08-14 12:14:12 +0200 |
commit | 397f09977ea350a20f20b415a9313cc790137742 (patch) | |
tree | 0e44f2f3e5804bd5c0c6f644785006b7225e3c94 /drivers | |
parent | 6356bb0ad6525dae93c06478a098ed3848e9ab01 (diff) | |
parent | a4b4bedce880046feeb5b206392960f395ed02ad (diff) |
Merge tag 'amd_f15_m30' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp into x86/ras
Pull AMD F15h, model 0x30 and later enablement stuff, more specifically EDAC
support, from Borislav Petkov.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers')
76 files changed, 1077 insertions, 513 deletions
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index fd6c51cc3acb..5a74a9c1e42c 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -451,7 +451,6 @@ static void acpi_processor_remove(struct acpi_device *device) /* Clean up. */ per_cpu(processor_device_array, pr->id) = NULL; per_cpu(processors, pr->id) = NULL; - try_offline_node(cpu_to_node(pr->id)); /* Remove the CPU. */ get_online_cpus(); @@ -459,6 +458,8 @@ static void acpi_processor_remove(struct acpi_device *device) acpi_unmap_lsapic(pr->id); put_online_cpus(); + try_offline_node(cpu_to_node(pr->id)); + out: free_cpumask_var(pr->throttling.shared_cpu_map); kfree(pr); diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index f68095756fb7..408f6b2a5fa8 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list); static DECLARE_RWSEM(bus_type_sem); #define PHYSICAL_NODE_STRING "physical_node" +#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10) int register_acpi_bus_type(struct acpi_bus_type *type) { @@ -78,41 +79,108 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) return ret; } -static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, - void *addr_p, void **ret_p) +static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used, + void *not_used, void **ret_p) { - unsigned long long addr, sta; - acpi_status status; + struct acpi_device *adev = NULL; - status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); - if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) { + acpi_bus_get_device(handle, &adev); + if (adev) { *ret_p = handle; - status = acpi_bus_get_status_handle(handle, &sta); - if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED)) - return AE_CTRL_TERMINATE; + return AE_CTRL_TERMINATE; } return AE_OK; } -acpi_handle acpi_get_child(acpi_handle parent, u64 address) +static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge) { - void *ret = NULL; + unsigned long long sta; + acpi_status status; + + status = acpi_bus_get_status_handle(handle, &sta); + if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) + return false; + + if (is_bridge) { + void *test = NULL; + + /* Check if this object has at least one child device. */ + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + acpi_dev_present, NULL, NULL, &test); + return !!test; + } + return true; +} + +struct find_child_context { + u64 addr; + bool is_bridge; + acpi_handle ret; + bool ret_checked; +}; + +static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used, + void *data, void **not_used) +{ + struct find_child_context *context = data; + unsigned long long addr; + acpi_status status; - if (!parent) - return NULL; + status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); + if (ACPI_FAILURE(status) || addr != context->addr) + return AE_OK; - acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL, - do_acpi_find_child, &address, &ret); - return (acpi_handle)ret; + if (!context->ret) { + /* This is the first matching object. Save its handle. */ + context->ret = handle; + return AE_OK; + } + /* + * There is more than one matching object with the same _ADR value. + * That really is unexpected, so we are kind of beyond the scope of the + * spec here. We have to choose which one to return, though. + * + * First, check if the previously found object is good enough and return + * its handle if so. Second, check the same for the object that we've + * just found. + */ + if (!context->ret_checked) { + if (acpi_extra_checks_passed(context->ret, context->is_bridge)) + return AE_CTRL_TERMINATE; + else + context->ret_checked = true; + } + if (acpi_extra_checks_passed(handle, context->is_bridge)) { + context->ret = handle; + return AE_CTRL_TERMINATE; + } + return AE_OK; } -EXPORT_SYMBOL(acpi_get_child); + +acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge) +{ + if (parent) { + struct find_child_context context = { + .addr = addr, + .is_bridge = is_bridge, + }; + + acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child, + NULL, &context, NULL); + return context.ret; + } + return NULL; +} +EXPORT_SYMBOL_GPL(acpi_find_child); int acpi_bind_one(struct device *dev, acpi_handle handle) { struct acpi_device *acpi_dev; acpi_status status; struct acpi_device_physical_node *physical_node, *pn; - char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; + char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; + struct list_head *physnode_list; + unsigned int node_id; int retval = -EINVAL; if (ACPI_HANDLE(dev)) { @@ -139,25 +207,27 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) mutex_lock(&acpi_dev->physical_node_lock); - /* Sanity check. */ - list_for_each_entry(pn, &acpi_dev->physical_node_list, node) + /* + * Keep the list sorted by node_id so that the IDs of removed nodes can + * be recycled easily. + */ + physnode_list = &acpi_dev->physical_node_list; + node_id = 0; + list_for_each_entry(pn, &acpi_dev->physical_node_list, node) { + /* Sanity check. */ if (pn->dev == dev) { dev_warn(dev, "Already associated with ACPI node\n"); goto err_free; } - - /* allocate physical node id according to physical_node_id_bitmap */ - physical_node->node_id = - find_first_zero_bit(acpi_dev->physical_node_id_bitmap, - ACPI_MAX_PHYSICAL_NODE); - if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) { - retval = -ENOSPC; - goto err_free; + if (pn->node_id == node_id) { + physnode_list = &pn->node; + node_id++; + } } - set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); + physical_node->node_id = node_id; physical_node->dev = dev; - list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); + list_add(&physical_node->node, physnode_list); acpi_dev->physical_node_count++; mutex_unlock(&acpi_dev->physical_node_lock); @@ -208,7 +278,7 @@ int acpi_unbind_one(struct device *dev) mutex_lock(&acpi_dev->physical_node_lock); list_for_each_safe(node, next, &acpi_dev->physical_node_list) { - char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; + char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; entry = list_entry(node, struct acpi_device_physical_node, node); @@ -216,7 +286,6 @@ int acpi_unbind_one(struct device *dev) continue; list_del(node); - clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap); acpi_dev->physical_node_count--; diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index aa1227a7e3f2..04a13784dd20 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) dev->pnp.bus_id, (u32) dev->wakeup.sleep_state); + mutex_lock(&dev->physical_node_lock); + if (!dev->physical_node_count) { seq_printf(seq, "%c%-8s\n", dev->wakeup.flags.run_wake ? '*' : ' ', @@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) put_device(ldev); } } + + mutex_unlock(&dev->physical_node_lock); } mutex_unlock(&acpi_device_lock); return 0; @@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev) { struct acpi_device_physical_node *entry; + mutex_lock(&adev->physical_node_lock); + list_for_each_entry(entry, &adev->physical_node_list, node) if (entry->dev && device_can_wakeup(entry->dev)) { bool enable = !device_may_wakeup(entry->dev); device_set_wakeup_enable(entry->dev, enable); } + + mutex_unlock(&adev->physical_node_lock); } static ssize_t diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 0ec434d2586d..e1284b8dc6ee 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -689,7 +689,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device, * Some systems always report current brightness level as maximum * through _BQC, we need to test another value for them. */ - test_level = current_level == max_level ? br->levels[2] : max_level; + test_level = current_level == max_level ? br->levels[3] : max_level; result = acpi_video_device_lcd_set_level(device, test_level); if (result) diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c index 4ec7c04b3f82..26386f0b89a8 100644 --- a/drivers/ata/pata_imx.c +++ b/drivers/ata/pata_imx.c @@ -237,6 +237,7 @@ static const struct of_device_id imx_pata_dt_ids[] = { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx_pata_dt_ids); static struct platform_driver pata_imx_driver = { .probe = pata_imx_probe, diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index e69102696533..3455f833e473 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -719,7 +719,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block, } } - return regcache_sync_block_raw_flush(map, &data, base, regtmp); + return regcache_sync_block_raw_flush(map, &data, base, regtmp + + map->reg_stride); } int regcache_sync_block(struct regmap *map, void *block, diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 1b456fe9b87a..fc45567ad3ac 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, unsigned long flags; spin_lock_irqsave(&portdev->ports_lock, flags); - list_for_each_entry(port, &portdev->ports, list) - if (port->cdev->dev == dev) + list_for_each_entry(port, &portdev->ports, list) { + if (port->cdev->dev == dev) { + kref_get(&port->kref); goto out; + } + } port = NULL; out: spin_unlock_irqrestore(&portdev->ports_lock, flags); @@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, port = filp->private_data; + /* Port is hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; + if (!port_has_data(port)) { /* * If nothing's connected on the host just return 0 in @@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, if (ret < 0) return ret; } - /* Port got hot-unplugged. */ + /* Port got hot-unplugged while we were waiting above. */ if (!port->guest_connected) return -ENODEV; /* @@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, if (is_rproc_serial(port->out_vq->vdev)) return -EINVAL; + /* + * pipe->nrbufs == 0 means there are no data to transfer, + * so this returns just 0 for no data. + */ + pipe_lock(pipe); + if (!pipe->nrbufs) { + ret = 0; + goto error_out; + } + ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); if (ret < 0) - return ret; + goto error_out; buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); - if (!buf) - return -ENOMEM; + if (!buf) { + ret = -ENOMEM; + goto error_out; + } sgl.n = 0; sgl.len = 0; @@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, sgl.sg = buf->sg; sg_init_table(sgl.sg, sgl.size); ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); + pipe_unlock(pipe); if (likely(ret > 0)) ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); if (unlikely(ret <= 0)) free_buf(buf, true); return ret; + +error_out: + pipe_unlock(pipe); + return ret; } static unsigned int port_fops_poll(struct file *filp, poll_table *wait) @@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp) struct port *port; int ret; + /* We get the port with a kref here */ port = find_port_by_devt(cdev->dev); + if (!port) { + /* Port was unplugged before we could proceed */ + return -ENXIO; + } filp->private_data = port; - /* Prevent against a port getting hot-unplugged at the same time */ - spin_lock_irq(&port->portdev->ports_lock); - kref_get(&port->kref); - spin_unlock_irq(&port->portdev->ports_lock); - /* * Don't allow opening of console port devices -- that's done * via /dev/hvc @@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref) port = container_of(kref, struct port, kref); - sysfs_remove_group(&port->dev->kobj, &port_attribute_group); - device_destroy(pdrvdata.class, port->dev->devt); - cdev_del(port->cdev); - - kfree(port->name); - - debugfs_remove(port->debugfs_file); - kfree(port); } @@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port) spin_unlock_irq(&port->portdev->ports_lock); if (port->guest_connected) { + /* Let the app know the port is going down. */ + send_sigio_to_port(port); + + /* Do this after sigio is actually sent */ port->guest_connected = false; port->host_connected = false; - wake_up_interruptible(&port->waitqueue); - /* Let the app know the port is going down. */ - send_sigio_to_port(port); + wake_up_interruptible(&port->waitqueue); } if (is_console_port(port)) { @@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port) */ port->portdev = NULL; + sysfs_remove_group(&port->dev->kobj, &port_attribute_group); + device_destroy(pdrvdata.class, port->dev->devt); + cdev_del(port->cdev); + + kfree(port->name); + + debugfs_remove(port->debugfs_file); + /* * Locks around here are not necessary - a port can't be * opened after we removed the port struct from ports_list diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 0ceb2eff5a7e..f97cb3d8c5a2 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, return count; } -static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, - size_t count) +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, + const char *buf, size_t count) { struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input, j; @@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, if (input > 1) input = 1; - if (input == cs_tuners->ignore_nice) /* nothing to do */ + if (input == cs_tuners->ignore_nice_load) /* nothing to do */ return count; - cs_tuners->ignore_nice = input; + cs_tuners->ignore_nice_load = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { @@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, dbs_info = &per_cpu(cs_cpu_dbs_info, j); dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->cdbs.prev_cpu_wall, 0); - if (cs_tuners->ignore_nice) + if (cs_tuners->ignore_nice_load) dbs_info->cdbs.prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } @@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate); show_store_one(cs, sampling_down_factor); show_store_one(cs, up_threshold); show_store_one(cs, down_threshold); -show_store_one(cs, ignore_nice); +show_store_one(cs, ignore_nice_load); show_store_one(cs, freq_step); declare_show_sampling_rate_min(cs); @@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate); gov_sys_pol_attr_rw(sampling_down_factor); gov_sys_pol_attr_rw(up_threshold); gov_sys_pol_attr_rw(down_threshold); -gov_sys_pol_attr_rw(ignore_nice); +gov_sys_pol_attr_rw(ignore_nice_load); gov_sys_pol_attr_rw(freq_step); gov_sys_pol_attr_ro(sampling_rate_min); @@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = { &sampling_down_factor_gov_sys.attr, &up_threshold_gov_sys.attr, &down_threshold_gov_sys.attr, - &ignore_nice_gov_sys.attr, + &ignore_nice_load_gov_sys.attr, &freq_step_gov_sys.attr, NULL }; @@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = { &sampling_down_factor_gov_pol.attr, &up_threshold_gov_pol.attr, &down_threshold_gov_pol.attr, - &ignore_nice_gov_pol.attr, + &ignore_nice_load_gov_pol.attr, &freq_step_gov_pol.attr, NULL }; @@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data) tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; - tuners->ignore_nice = 0; + tuners->ignore_nice_load = 0; tuners->freq_step = DEF_FREQUENCY_STEP; dbs_data->tuners = tuners; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 7b839a8db2a7..e59afaa9da23 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -47,9 +47,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) unsigned int j; if (dbs_data->cdata->governor == GOV_ONDEMAND) - ignore_nice = od_tuners->ignore_nice; + ignore_nice = od_tuners->ignore_nice_load; else - ignore_nice = cs_tuners->ignore_nice; + ignore_nice = cs_tuners->ignore_nice_load; policy = cdbs->cur_policy; @@ -298,12 +298,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, cs_tuners = dbs_data->tuners; cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); sampling_rate = cs_tuners->sampling_rate; - ignore_nice = cs_tuners->ignore_nice; + ignore_nice = cs_tuners->ignore_nice_load; } else { od_tuners = dbs_data->tuners; od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); sampling_rate = od_tuners->sampling_rate; - ignore_nice = od_tuners->ignore_nice; + ignore_nice = od_tuners->ignore_nice_load; od_ops = dbs_data->cdata->gov_ops; io_busy = od_tuners->io_is_busy; } diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 6663ec3b3056..d5f12b4b11b8 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s { /* Per policy Governers sysfs tunables */ struct od_dbs_tuners { - unsigned int ignore_nice; + unsigned int ignore_nice_load; unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; @@ -175,7 +175,7 @@ struct od_dbs_tuners { }; struct cs_dbs_tuners { - unsigned int ignore_nice; + unsigned int ignore_nice_load; unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 93eb5cbcc1f6..c087347d6688 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, return count; } -static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, - size_t count) +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, + const char *buf, size_t count) { struct od_dbs_tuners *od_tuners = dbs_data->tuners; unsigned int input; @@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, if (input > 1) input = 1; - if (input == od_tuners->ignore_nice) { /* nothing to do */ + if (input == od_tuners->ignore_nice_load) { /* nothing to do */ return count; } - od_tuners->ignore_nice = input; + od_tuners->ignore_nice_load = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { @@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, dbs_info = &per_cpu(od_cpu_dbs_info, j); dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); - if (od_tuners->ignore_nice) + if (od_tuners->ignore_nice_load) dbs_info->cdbs.prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; @@ -461,7 +461,7 @@ show_store_one(od, sampling_rate); show_store_one(od, io_is_busy); show_store_one(od, up_threshold); show_store_one(od, sampling_down_factor); -show_store_one(od, ignore_nice); +show_store_one(od, ignore_nice_load); show_store_one(od, powersave_bias); declare_show_sampling_rate_min(od); @@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate); gov_sys_pol_attr_rw(io_is_busy); gov_sys_pol_attr_rw(up_threshold); gov_sys_pol_attr_rw(sampling_down_factor); -gov_sys_pol_attr_rw(ignore_nice); +gov_sys_pol_attr_rw(ignore_nice_load); gov_sys_pol_attr_rw(powersave_bias); gov_sys_pol_attr_ro(sampling_rate_min); @@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = { &sampling_rate_gov_sys.attr, &up_threshold_gov_sys.attr, &sampling_down_factor_gov_sys.attr, - &ignore_nice_gov_sys.attr, + &ignore_nice_load_gov_sys.attr, &powersave_bias_gov_sys.attr, &io_is_busy_gov_sys.attr, NULL @@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = { &sampling_rate_gov_pol.attr, &up_threshold_gov_pol.attr, &sampling_down_factor_gov_pol.attr, - &ignore_nice_gov_pol.attr, + &ignore_nice_load_gov_pol.attr, &powersave_bias_gov_pol.attr, &io_is_busy_gov_pol.attr, NULL @@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data) } tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; - tuners->ignore_nice = 0; + tuners->ignore_nice_load = 0; tuners->powersave_bias = default_powersave_bias; tuners->io_is_busy = should_io_be_busy(); diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index bb838b985077..9536852c504a 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) clk_put(cpuclk); return -EINVAL; } - ret = clk_set_rate(cpuclk, rate); - if (ret) { - clk_put(cpuclk); - return ret; - } /* clock table init */ for (i = 2; @@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) i++) loongson2_clockmod_table[i].frequency = (rate * i) / 8; + ret = clk_set_rate(cpuclk, rate); + if (ret) { + clk_put(cpuclk); + return ret; + } + policy->cur = loongson2_cpufreq_get(policy->cpu); cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index b67f45f5c271..5039fbc88254 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c @@ -400,8 +400,8 @@ static size_t sh_dmae_get_partial(struct shdma_chan *schan, shdma_chan); struct sh_dmae_desc *sh_desc = container_of(sdesc, struct sh_dmae_desc, shdma_desc); - return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << - sh_chan->xmit_shift; + return sh_desc->hw.tcr - + (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift); } /* Called from error IRQ or NMI */ diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 8b6a0343c220..d6c0c6590607 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -123,7 +123,7 @@ static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct) u32 reg = 0; amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); - reg &= 0xfffffffe; + reg &= (pvt->model >= 0x30) ? ~3 : ~1; reg |= dct; amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); } @@ -133,8 +133,9 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, { u8 dct = 0; + /* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */ if (addr >= 0x140 && addr <= 0x1a0) { - dct = 1; + dct = (pvt->model >= 0x30) ? 3 : 1; addr -= 0x100; } @@ -202,11 +203,11 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) struct amd64_pvt *pvt = mci->pvt_info; u32 min_scrubrate = 0x5; - if (boot_cpu_data.x86 == 0xf) + if (pvt->fam == 0xf) min_scrubrate = 0x0; - /* F15h Erratum #505 */ - if (boot_cpu_data.x86 == 0x15) + /* Erratum #505 for F15h Model 0x00 - Model 0x01, Stepping 0 */ + if (pvt->fam == 0x15 && pvt->model <= 0x01 && pvt->stepping < 0x1) f15h_select_dct(pvt, 0); return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); @@ -218,8 +219,8 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci) u32 scrubval = 0; int i, retval = -EINVAL; - /* F15h Erratum #505 */ - if (boot_cpu_data.x86 == 0x15) + /* Erratum #505 for F15h Model 0x00 - Model 0x01, Stepping 0 */ + if (pvt->fam == 0x15 && pvt->model <= 0x01 && pvt->stepping < 0x1) f15h_select_dct(pvt, 0); amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); @@ -335,7 +336,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, u64 csbase, csmask, base_bits, mask_bits; u8 addr_shift; - if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { + if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { csbase = pvt->csels[dct].csbases[csrow]; csmask = pvt->csels[dct].csmasks[csrow]; base_bits = GENMASK(21, 31) | GENMASK(9, 15); @@ -343,10 +344,11 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, addr_shift = 4; /* - * F16h needs two addr_shift values: 8 for high and 6 for low - * (cf. F16h BKDG). - */ - } else if (boot_cpu_data.x86 == 0x16) { + * F16h and F15h, models 30h and later need two addr_shift values: + * 8 for high and 6 for low (cf. F16h BKDG). + */ + } else if (pvt->fam == 0x16 || + (pvt->fam == 0x15 && pvt->model >= 0x30)) { csbase = pvt->csels[dct].csbases[csrow]; csmask = pvt->csels[dct].csmasks[csrow >> 1]; @@ -367,7 +369,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, csmask = pvt->csels[dct].csmasks[csrow >> 1]; addr_shift = 8; - if (boot_cpu_data.x86 == 0x15) + if (pvt->fam == 0x15) base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13); else base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13); @@ -447,14 +449,14 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, struct amd64_pvt *pvt = mci->pvt_info; /* only revE and later have the DRAM Hole Address Register */ - if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { + if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) { edac_dbg(1, " revision %d for node %d does not support DHAR\n", pvt->ext_model, pvt->mc_node_id); return 1; } /* valid for Fam10h and above */ - if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { + if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) { edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n"); return 1; } @@ -486,10 +488,8 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, *hole_base = dhar_base(pvt); *hole_size = (1ULL << 32) - *hole_base; - if (boot_cpu_data.x86 > 0xf) - *hole_offset = f10_dhar_offset(pvt); - else - *hole_offset = k8_dhar_offset(pvt); + *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt) + : k8_dhar_offset(pvt); edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", pvt->mc_node_id, (unsigned long)*hole_base, @@ -663,7 +663,7 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) u8 bit; unsigned long edac_cap = EDAC_FLAG_NONE; - bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) + bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F) ? 19 : 17; @@ -675,7 +675,7 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); -static void amd64_dump_dramcfg_low(u32 dclr, int chan) +static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) { edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); @@ -686,7 +686,7 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) edac_dbg(1, " PAR/ERR parity: %s\n", (dclr & BIT(8)) ? "enabled" : "disabled"); - if (boot_cpu_data.x86 == 0x10) + if (pvt->fam == 0x10) edac_dbg(1, " DCT 128bit mode width: %s\n", (dclr & BIT(11)) ? "128b" : "64b"); @@ -709,21 +709,21 @@ static void dump_misc_regs(struct amd64_pvt *pvt) (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); - amd64_dump_dramcfg_low(pvt->dclr0, 0); + amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0); edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n", pvt->dhar, dhar_base(pvt), - (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) - : f10_dhar_offset(pvt)); + (pvt->fam == 0xf) ? k8_dhar_offset(pvt) + : f10_dhar_offset(pvt)); edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); amd64_debug_display_dimm_sizes(pvt, 0); /* everything below this point is Fam10h and above */ - if (boot_cpu_data.x86 == 0xf) + if (pvt->fam == 0xf) return; amd64_debug_display_dimm_sizes(pvt, 1); @@ -732,17 +732,20 @@ static void dump_misc_regs(struct amd64_pvt *pvt) /* Only if NOT ganged does dclr1 have valid info */ if (!dct_ganging_enabled(pvt)) - amd64_dump_dramcfg_low(pvt->dclr1, 1); + amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1); } /* - * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] + * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] */ static void prep_chip_selects(struct amd64_pvt *pvt) { - if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { + if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; + } else if (pvt->fam == 0x15 && pvt->model >= 0x30) { + pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4; + pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2; } else { pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; @@ -768,7 +771,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", cs, *base0, reg0); - if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) + if (pvt->fam == 0xf || dct_ganging_enabled(pvt)) continue; if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) @@ -786,7 +789,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", cs, *mask0, reg0); - if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) + if (pvt->fam == 0xf || dct_ganging_enabled(pvt)) continue; if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) @@ -800,9 +803,9 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) enum mem_type type; /* F15h supports only DDR3 */ - if (boot_cpu_data.x86 >= 0x15) + if (pvt->fam >= 0x15) type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; - else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) { + else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) { if (pvt->dchr0 & DDR3_MODE) type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; else @@ -835,14 +838,13 @@ static int k8_early_channel_count(struct amd64_pvt *pvt) } /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ -static u64 get_error_address(struct mce *m) +static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m) { - struct cpuinfo_x86 *c = &boot_cpu_data; u64 addr; u8 start_bit = 1; u8 end_bit = 47; - if (c->x86 == 0xf) { + if (pvt->fam == 0xf) { start_bit = 3; end_bit = 39; } @@ -852,7 +854,7 @@ static u64 get_error_address(struct mce *m) /* * Erratum 637 workaround */ - if (c->x86 == 0x15) { + if (pvt->fam == 0x15) { struct amd64_pvt *pvt; u64 cc6_base, tmp_addr; u32 tmp; @@ -916,15 +918,15 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor, static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) { struct amd_northbridge *nb; - struct pci_dev *misc, *f1 = NULL; - struct cpuinfo_x86 *c = &boot_cpu_data; + struct pci_dev *f1 = NULL; + unsigned int pci_func; int off = range << 3; u32 llim; amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); - if (c->x86 == 0xf) + if (pvt->fam == 0xf) return; if (!dram_rw(pvt, range)) @@ -934,15 +936,17 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); /* F15h: factor in CC6 save area by reading dst node's limit reg */ - if (c->x86 != 0x15) + if (pvt->fam != 0x15) return; nb = node_to_amd_nb(dram_dst_node(pvt, range)); if (WARN_ON(!nb)) return; - misc = nb->misc; - f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc); + pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 + : PCI_DEVICE_ID_AMD_15H_NB_F1; + + f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc); if (WARN_ON(!f1)) return; @@ -1089,7 +1093,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt) int i, j, channels = 0; /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ - if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128)) + if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128)) return 2; /* @@ -1173,7 +1177,7 @@ static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, } /* - * F16h has only limited cs_modes + * F16h and F15h model 30h have only limited cs_modes. */ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, unsigned cs_mode) @@ -1190,7 +1194,7 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, static void read_dram_ctl_register(struct amd64_pvt *pvt) { - if (boot_cpu_data.x86 == 0xf) + if (pvt->fam == 0xf) return; if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { @@ -1218,6 +1222,29 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt) } /* + * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG, + * 2.10.12 Memory Interleaving Modes). + */ +static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, + u8 intlv_en, int num_dcts_intlv, + u32 dct_sel) +{ + u8 channel = 0; + u8 select; + + if (!(intlv_en)) + return (u8)(dct_sel); + + if (num_dcts_intlv == 2) { + select = (sys_addr >> 8) & 0x3; + channel = select ? 0x3 : 0; + } else if (num_dcts_intlv == 4) + channel = (sys_addr >> 8) & 0x7; + + return channel; +} + +/* * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory * Interleaving Modes. */ @@ -1366,6 +1393,10 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct) (in_addr & cs_mask), (cs_base & cs_mask)); if ((in_addr & cs_mask) == (cs_base & cs_mask)) { + if (pvt->fam == 0x15 && pvt->model >= 0x30) { + cs_found = csrow; + break; + } cs_found = f10_process_possible_spare(pvt, dct, csrow); edac_dbg(1, " MATCH csrow=%d\n", cs_found); @@ -1384,11 +1415,9 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) { u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr; - if (boot_cpu_data.x86 == 0x10) { + if (pvt->fam == 0x10) { /* only revC3 and revE have that feature */ - if (boot_cpu_data.x86_model < 4 || - (boot_cpu_data.x86_model < 0xa && - boot_cpu_data.x86_mask < 3)) + if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3)) return sys_addr; } @@ -1492,20 +1521,142 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, return cs_found; } -static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, - int *chan_sel) +static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range, + u64 sys_addr, int *chan_sel) +{ + int cs_found = -EINVAL; + int num_dcts_intlv = 0; + u64 chan_addr, chan_offset; + u64 dct_base, dct_limit; + u32 dct_cont_base_reg, dct_cont_limit_reg, tmp; + u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en; + + u64 dhar_offset = f10_dhar_offset(pvt); + u8 intlv_addr = dct_sel_interleave_addr(pvt); + u8 node_id = dram_dst_node(pvt, range); + u8 intlv_en = dram_intlv_en(pvt, range); + + amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg); + amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg); + + dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0)); + dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7); + + edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", + range, sys_addr, get_dram_limit(pvt, range)); + + if (!(get_dram_base(pvt, range) <= sys_addr) && + !(get_dram_limit(pvt, range) >= sys_addr)) + return -EINVAL; + + if (dhar_valid(pvt) && + dhar_base(pvt) <= sys_addr && + sys_addr < BIT_64(32)) { + amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n", + sys_addr); + return -EINVAL; + } + + /* Verify sys_addr is within DCT Range. */ + dct_base = (dct_sel_baseaddr(pvt) << 27); + dct_limit = (((dct_cont_limit_reg >> 11) & 0x1FFF) << 27) | 0x7FFFFFF; + + if (!(dct_cont_base_reg & BIT(0)) && + !(dct_base <= sys_addr && dct_limit >= sys_addr)) + return -EINVAL; + + /* Verify number of dct's that participate in channel interleaving. */ + num_dcts_intlv = (int) hweight8(intlv_en); + + if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4)) + return -EINVAL; + + channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en, + num_dcts_intlv, dct_sel); + + /* Verify we stay within the MAX number of channels allowed */ + if (channel > 4 || channel < 0) + return -EINVAL; + + leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0)); + + /* Get normalized DCT addr */ + if (leg_mmio_hole && (sys_addr >= BIT_64(32))) + chan_offset = dhar_offset; + else + chan_offset = dct_base; + + chan_addr = sys_addr - chan_offset; + + /* remove channel interleave */ + if (num_dcts_intlv == 2) { + if (intlv_addr == 0x4) + chan_addr = ((chan_addr >> 9) << 8) | + (chan_addr & 0xff); + else if (intlv_addr == 0x5) + chan_addr = ((chan_addr >> 10) << 9) | + (chan_addr & 0x1ff); + else + return -EINVAL; + + } else if (num_dcts_intlv == 4) { + if (intlv_addr == 0x4) + chan_addr = ((chan_addr >> 10) << 8) | + (chan_addr & 0xff); + else if (intlv_addr == 0x5) + chan_addr = ((chan_addr >> 11) << 9) | + (chan_addr & 0x1ff); + else + return -EINVAL; + } + + if (dct_offset_en) { + amd64_read_pci_cfg(pvt->F1, + DRAM_CONT_HIGH_OFF + (int) channel * 4, + &tmp); + chan_addr += ((tmp >> 11) & 0xfff) << 27; + } + + f15h_select_dct(pvt, channel); + + edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr); + + /* + * Find Chip select: + * if channel = 3, then alias it to 1. This is because, in F15 M30h, + * there is support for 4 DCT's, but only 2 are currently functional. + * They are DCT0 and DCT3. But we have read all registers of DCT3 into + * pvt->csels[1]. So we need to use '1' here to get correct info. + * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications. + */ + alias_channel = (channel == 3) ? 1 : channel; + + cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel); + + if (cs_found >= 0) + *chan_sel = alias_channel; + + return cs_found; +} + +static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, + u64 sys_addr, + int *chan_sel) { int cs_found = -EINVAL; unsigned range; for (range = 0; range < DRAM_RANGES; range++) { - if (!dram_rw(pvt, range)) continue; - if ((get_dram_base(pvt, range) <= sys_addr) && - (get_dram_limit(pvt, range) >= sys_addr)) { + if (pvt->fam == 0x15 && pvt->model >= 0x30) + cs_found = f15_m30h_match_to_this_node(pvt, range, + sys_addr, + chan_sel); + else if ((get_dram_base(pvt, range) <= sys_addr) && + (get_dram_limit(pvt, range) >= sys_addr)) { cs_found = f1x_match_to_this_node(pvt, range, sys_addr, chan_sel); if (cs_found >= 0) @@ -1554,7 +1705,7 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; - if (boot_cpu_data.x86 == 0xf) { + if (pvt->fam == 0xf) { /* K8 families < revF not supported yet */ if (pvt->ext_model < K8_REV_F) return; @@ -1624,6 +1775,17 @@ static struct amd64_family_type amd64_family_types[] = { .read_dct_pci_cfg = f15_read_dct_pci_cfg, } }, + [F15_M30H_CPUS] = { + .ctl_name = "F15h_M30h", + .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1, + .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3, + .ops = { + .early_channel_count = f1x_early_channel_count, + .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, + .dbam_to_cs = f16_dbam_to_chip_select, + .read_dct_pci_cfg = f15_read_dct_pci_cfg, + } + }, [F16_CPUS] = { .ctl_name = "F16h", .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1, @@ -1860,7 +2022,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, memset(&err, 0, sizeof(err)); - sys_addr = get_error_address(m); + sys_addr = get_error_address(pvt, m); if (ecc_type == 2) err.syndrome = extract_syndrome(m->status); @@ -1921,10 +2083,9 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt) */ static void read_mc_regs(struct amd64_pvt *pvt) { - struct cpuinfo_x86 *c = &boot_cpu_data; + unsigned range; u64 msr_val; u32 tmp; - unsigned range; /* * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since @@ -1985,14 +2146,14 @@ static void read_mc_regs(struct amd64_pvt *pvt) pvt->ecc_sym_sz = 4; - if (c->x86 >= 0x10) { + if (pvt->fam >= 0x10) { amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); - if (c->x86 != 0x16) + if (pvt->fam != 0x16) /* F16h has only DCT0 */ amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); /* F10h, revD and later can do x8 ECC too */ - if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25)) + if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25)) pvt->ecc_sym_sz = 8; } dump_misc_regs(pvt); @@ -2086,7 +2247,7 @@ static int init_csrows(struct mem_ctl_info *mci) bool row_dct0 = !!csrow_enabled(i, 0, pvt); bool row_dct1 = false; - if (boot_cpu_data.x86 != 0xf) + if (pvt->fam != 0xf) row_dct1 = !!csrow_enabled(i, 1, pvt); if (!row_dct0 && !row_dct1) @@ -2104,7 +2265,7 @@ static int init_csrows(struct mem_ctl_info *mci) } /* K8 has only one DCT */ - if (boot_cpu_data.x86 != 0xf && row_dct1) { + if (pvt->fam != 0xf && row_dct1) { int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); csrow->channels[1]->dimm->nr_pages = row_dct1_pages; @@ -2333,13 +2494,14 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid) static int set_mc_sysfs_attrs(struct mem_ctl_info *mci) { + struct amd64_pvt *pvt = mci->pvt_info; int rc; rc = amd64_create_sysfs_dbg_files(mci); if (rc < 0) return rc; - if (boot_cpu_data.x86 >= 0x10) { + if (pvt->fam >= 0x10) { rc = amd64_create_sysfs_inject_files(mci); if (rc < 0) return rc; @@ -2350,9 +2512,11 @@ static int set_mc_sysfs_attrs(struct mem_ctl_info *mci) static void del_mc_sysfs_attrs(struct mem_ctl_info *mci) { + struct amd64_pvt *pvt = mci->pvt_info; + amd64_remove_sysfs_dbg_files(mci); - if (boot_cpu_data.x86 >= 0x10) + if (pvt->fam >= 0x10) amd64_remove_sysfs_inject_files(mci); } @@ -2387,10 +2551,14 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci, */ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) { - u8 fam = boot_cpu_data.x86; struct amd64_family_type *fam_type = NULL; - switch (fam) { + pvt->ext_model = boot_cpu_data.x86_model >> 4; + pvt->stepping = boot_cpu_data.x86_mask; + pvt->model = boot_cpu_data.x86_model; + pvt->fam = boot_cpu_data.x86; + + switch (pvt->fam) { case 0xf: fam_type = &amd64_family_types[K8_CPUS]; pvt->ops = &amd64_family_types[K8_CPUS].ops; @@ -2402,6 +2570,12 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) break; case 0x15: + if (pvt->model == 0x30) { + fam_type = &amd64_family_types[F15_M30H_CPUS]; + pvt->ops = &amd64_family_types[F15_M30H_CPUS].ops; + break; + } + fam_type = &amd64_family_types[F15_CPUS]; pvt->ops = &amd64_family_types[F15_CPUS].ops; break; @@ -2416,10 +2590,8 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) return NULL; } - pvt->ext_model = boot_cpu_data.x86_model >> 4; - amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, - (fam == 0xf ? + (pvt->fam == 0xf ? (pvt->ext_model >= K8_REV_F ? "revF or later " : "revE or earlier ") : ""), pvt->mc_node_id); @@ -2579,6 +2751,8 @@ static void amd64_remove_one_instance(struct pci_dev *pdev) struct ecc_settings *s = ecc_stngs[nid]; mci = find_mci_by_dev(&pdev->dev); + WARN_ON(!mci); + del_mc_sysfs_attrs(mci); /* Remove from EDAC CORE tracking list */ mci = edac_mc_del_mc(&pdev->dev); @@ -2638,6 +2812,14 @@ static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = { }, { .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0, + .class_mask = 0, + }, + { + .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_16H_NB_F2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 2c6f113bae2b..d2443cfa0698 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -170,6 +170,8 @@ /* * PCI-defined configuration space registers */ +#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b +#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F2 0x141c #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 #define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531 @@ -181,13 +183,22 @@ #define DRAM_BASE_LO 0x40 #define DRAM_LIMIT_LO 0x44 -#define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7)) +/* + * F15 M30h D18F1x2[1C:00] + */ +#define DRAM_CONT_BASE 0x200 +#define DRAM_CONT_LIMIT 0x204 + +/* + * F15 M30h D18F1x2[4C:40] + */ +#define DRAM_CONT_HIGH_OFF 0x240 + #define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3)) #define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7)) #define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7)) #define DHAR 0xf0 -#define dhar_valid(pvt) ((pvt)->dhar & BIT(0)) #define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1)) #define dhar_base(pvt) ((pvt)->dhar & 0xff000000) #define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16) @@ -234,8 +245,6 @@ #define DDR3_MODE BIT(8) #define DCT_SEL_LO 0x110 -#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800) -#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3) #define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0)) #define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2)) @@ -297,6 +306,7 @@ enum amd_families { K8_CPUS = 0, F10_CPUS, F15_CPUS, + F15_M30H_CPUS, F16_CPUS, NUM_FAMILIES, }; @@ -337,6 +347,10 @@ struct amd64_pvt { struct pci_dev *F1, *F2, *F3; u16 mc_node_id; /* MC index of this MC node */ + u8 fam; /* CPU family */ + u8 model; /* ... model */ + u8 stepping; /* ... stepping */ + int ext_model; /* extended model value of this node */ int channel_count; @@ -414,6 +428,14 @@ static inline u16 extract_syndrome(u64 status) return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00); } +static inline u8 dct_sel_interleave_addr(struct amd64_pvt *pvt) +{ + if (pvt->fam == 0x15 && pvt->model >= 0x30) + return (((pvt->dct_sel_hi >> 9) & 0x1) << 2) | + ((pvt->dct_sel_lo >> 6) & 0x3); + + return ((pvt)->dct_sel_lo >> 6) & 0x3; +} /* * per-node ECC settings descriptor */ @@ -504,3 +526,33 @@ static inline void enable_caches(void *dummy) { write_cr0(read_cr0() & ~X86_CR0_CD); } + +static inline u8 dram_intlv_en(struct amd64_pvt *pvt, unsigned int i) +{ + if (pvt->fam == 0x15 && pvt->model >= 0x30) { + u32 tmp; + amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &tmp); + return (u8) tmp & 0xF; + } + return (u8) (pvt->ranges[i].base.lo >> 8) & 0x7; +} + +static inline u8 dhar_valid(struct amd64_pvt *pvt) +{ + if (pvt->fam == 0x15 && pvt->model >= 0x30) { + u32 tmp; + amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp); + return (tmp >> 1) & BIT(0); + } + return (pvt)->dhar & BIT(0); +} + +static inline u32 dct_sel_baseaddr(struct amd64_pvt *pvt) +{ + if (pvt->fam == 0x15 && pvt->model >= 0x30) { + u32 tmp; + amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp); + return (tmp >> 11) & 0x1FFF; + } + return (pvt)->dct_sel_lo & 0xFFFFF800; +} diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 98d670825a1a..6e8887fe6c1b 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -323,6 +323,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align, astbo->gem.driver_private = NULL; astbo->bo.bdev = &ast->ttm.bdev; + astbo->bo.bdev->dev_mapping = dev->dev_mapping; ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 0047012045c2..69fd8f1ac8df 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -328,6 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, cirrusbo->gem.driver_private = NULL; cirrusbo->bo.bdev = &cirrus->ttm.bdev; + cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping; cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 8bcce7866d36..f92da0a32f0d 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, /* Subtract time delta from raw timestamp to get final * vblank_time timestamp for end of vblank. */ - etime = ktime_sub_ns(etime, delta_ns); + if (delta_ns < 0) + etime = ktime_add_ns(etime, -delta_ns); + else + etime = ktime_sub_ns(etime, delta_ns); *vblank_time = ktime_to_timeval(etime); DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f2326fc60ac9..6f514297c483 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1856,10 +1856,16 @@ #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) -/* HDMI/DP bits are gen4+ */ -#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) +/* + * HDMI/DP bits are gen4+ + * + * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. + * Please check the detailed lore in the commit message for for experimental + * evidence. + */ +#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29) #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) -#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) +#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27) #define PORTD_HOTPLUG_INT_STATUS (3 << 21) #define PORTC_HOTPLUG_INT_STATUS (3 << 19) #define PORTB_HOTPLUG_INT_STATUS (3 << 17) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5fb305840db8..e38b45786653 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8269,9 +8269,11 @@ check_crtc_state(struct drm_device *dev) list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + enum pipe pipe; if (encoder->base.crtc != &crtc->base) continue; - if (encoder->get_config) + if (encoder->get_config && + encoder->get_hw_state(encoder, &pipe)) encoder->get_config(encoder, &pipe_config); } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 67e2c1f1c9a8..5950888ae1d0 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -497,8 +497,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) goto out; } - /* scale to hardware */ - level = level * freq / max; + /* scale to hardware, but be careful to not overflow */ + if (freq < max) + level = level * freq / max; + else + level = freq / max * level; dev_priv->backlight.level = level; if (dev_priv->backlight.device) @@ -515,6 +518,17 @@ void intel_panel_disable_backlight(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; + /* + * Do not disable backlight on the vgaswitcheroo path. When switching + * away from i915, the other client may depend on i915 to handle the + * backlight. This will leave the backlight on unnecessarily when + * another client is not activated. + */ + if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { + DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); + return; + } + spin_lock_irqsave(&dev_priv->backlight.lock, flags); dev_priv->backlight.enabled = false; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f895d1508df8..b0e4a0bd1313 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5063,8 +5063,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) } } else { if (enable_requested) { + unsigned long irqflags; + enum pipe p; + I915_WRITE(HSW_PWR_WELL_DRIVER, 0); + POSTING_READ(HSW_PWR_WELL_DRIVER); DRM_DEBUG_KMS("Requesting to disable the power well\n"); + + /* + * After this, the registers on the pipes that are part + * of the power well will become zero, so we have to + * adjust our counters according to that. + * + * FIXME: Should we do this in general in + * drm_vblank_post_modeset? + */ + spin_lock_irqsave(&dev->vbl_lock, irqflags); + for_each_pipe(p) + if (p != PIPE_A) + dev->last_vblank[p] = 0; + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } } } diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 13878d5de063..d70e4a92773b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -323,6 +323,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, mgabo->gem.driver_private = NULL; mgabo->bo.bdev = &mdev->ttm.bdev; + mgabo->bo.bdev->dev_mapping = dev->dev_mapping; mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0bfd55e08820..9953e1fbc46d 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev) { struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - u16 data_offset, size; - u8 frev, crev; struct atom_clock_dividers dividers; int ret; @@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev) eg_pi->vddci_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 6dacec4e2090..8928bd109c16 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -2587,9 +2587,11 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, if (rdev->wb.enabled) { rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); } else { + mutex_lock(&rdev->srbm_mutex); cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); rptr = RREG32(CP_HQD_PQ_RPTR); cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); } rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; @@ -2604,9 +2606,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, if (rdev->wb.enabled) { wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); } else { + mutex_lock(&rdev->srbm_mutex); cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); wptr = RREG32(CP_HQD_PQ_WPTR); cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); } wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; @@ -2897,6 +2901,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) WREG32(CP_CPF_DEBUG, tmp); /* init the pipes */ + mutex_lock(&rdev->srbm_mutex); for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { int me = (i < 4) ? 1 : 2; int pipe = (i < 4) ? i : (i - 4); @@ -2919,6 +2924,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) WREG32(CP_HPD_EOP_CONTROL, tmp); } cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); /* init the queues. Just two for now. */ for (i = 0; i < 2; i++) { @@ -2972,6 +2978,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) mqd->static_thread_mgmt23[0] = 0xffffffff; mqd->static_thread_mgmt23[1] = 0xffffffff; + mutex_lock(&rdev->srbm_mutex); cik_srbm_select(rdev, rdev->ring[idx].me, rdev->ring[idx].pipe, rdev->ring[idx].queue, 0); @@ -3099,6 +3106,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); radeon_bo_kunmap(rdev->ring[idx].mqd_obj); radeon_bo_unreserve(rdev->ring[idx].mqd_obj); @@ -4320,6 +4328,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ + mutex_lock(&rdev->srbm_mutex); for (i = 0; i < 16; i++) { cik_srbm_select(rdev, 0, 0, 0, i); /* CP and shaders */ @@ -4335,6 +4344,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) /* XXX SDMA RLC - todo */ } cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); cik_pcie_gart_tlb_flush(rdev); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", @@ -5954,6 +5964,8 @@ static int cik_startup(struct radeon_device *rdev) struct radeon_ring *ring; int r; + cik_mc_program(rdev); + if (rdev->flags & RADEON_IS_IGP) { if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { @@ -5985,7 +5997,6 @@ static int cik_startup(struct radeon_device *rdev) if (r) return r; - cik_mc_program(rdev); r = cik_pcie_gart_enable(rdev); if (r) return r; @@ -6194,7 +6205,7 @@ int cik_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); cik_cp_enable(rdev, false); cik_sdma_enable(rdev, false); - r600_uvd_rbc_stop(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); cik_irq_suspend(rdev); radeon_wb_disable(rdev); @@ -6358,6 +6369,7 @@ void cik_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); cik_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); @@ -6978,7 +6990,7 @@ int cik_uvd_resume(struct radeon_device *rdev) /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; WREG32(UVD_VCPU_CACHE_OFFSET0, addr); WREG32(UVD_VCPU_CACHE_SIZE0, size); diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 9bcdd174780f..7e5d0b570a30 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev) { struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - uint16_t data_offset, size; - uint8_t frev, crev; struct atom_clock_dividers dividers; int ret; @@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev) eg_pi->vddci_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 038dcac7670c..d5b49e33315e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5106,6 +5106,8 @@ static int evergreen_startup(struct radeon_device *rdev) /* enable aspm */ evergreen_program_aspm(rdev); + evergreen_mc_program(rdev); + if (ASIC_IS_DCE5(rdev)) { if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { r = ni_init_microcode(rdev); @@ -5133,7 +5135,6 @@ static int evergreen_startup(struct radeon_device *rdev) if (r) return r; - evergreen_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { evergreen_agp_enable(rdev); } else { @@ -5291,10 +5292,10 @@ int evergreen_resume(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); r700_cp_stop(rdev); r600_dma_stop(rdev); - r600_uvd_rbc_stop(rdev); evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); evergreen_pcie_gart_disable(rdev); @@ -5429,6 +5430,7 @@ void evergreen_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index bb9ea3641312..b0e280058b9b 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -148,18 +148,40 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); u32 base_rate = 24000; + u32 max_ratio = clock / base_rate; + u32 dto_phase; + u32 dto_modulo = clock; + u32 wallclock_ratio; + u32 dto_cntl; if (!dig || !dig->afmt) return; + if (max_ratio >= 8) { + dto_phase = 192 * 1000; + wallclock_ratio = 3; + } else if (max_ratio >= 4) { + dto_phase = 96 * 1000; + wallclock_ratio = 2; + } else if (max_ratio >= 2) { + dto_phase = 48 * 1000; + wallclock_ratio = 1; + } else { + dto_phase = 24 * 1000; + wallclock_ratio = 0; + } + dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; + dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); + WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); + /* XXX two dtos; generally use dto0 for hdmi */ /* Express [24MHz / target pixel clock] as an exact rational * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator */ WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); - WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); + WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); + WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); } diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index a7baf67aef6c..0d582ac1dc31 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -497,6 +497,9 @@ #define DCCG_AUDIO_DTO0_MODULE 0x05b4 #define DCCG_AUDIO_DTO0_LOAD 0x05b8 #define DCCG_AUDIO_DTO0_CNTL 0x05bc +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 #define DCCG_AUDIO_DTO1_PHASE 0x05c0 #define DCCG_AUDIO_DTO1_MODULE 0x05c4 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 56bd4f3be4fe..ccb4f8b54852 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -794,9 +794,13 @@ int ni_init_microcode(struct radeon_device *rdev) if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->smc_fw->size != smc_req_size) { + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + } else if (rdev->smc_fw->size != smc_req_size) { printk(KERN_ERR "ni_mc: Bogus length %zu in firmware \"%s\"\n", rdev->mc_fw->size, fw_name); @@ -2079,6 +2083,8 @@ static int cayman_startup(struct radeon_device *rdev) /* enable aspm */ evergreen_program_aspm(rdev); + evergreen_mc_program(rdev); + if (rdev->flags & RADEON_IS_IGP) { if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = ni_init_microcode(rdev); @@ -2107,7 +2113,6 @@ static int cayman_startup(struct radeon_device *rdev) if (r) return r; - evergreen_mc_program(rdev); r = cayman_pcie_gart_enable(rdev); if (r) return r; @@ -2286,7 +2291,7 @@ int cayman_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); - r600_uvd_rbc_stop(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); @@ -2418,6 +2423,7 @@ void cayman_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); cayman_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 4f9b9bc20daa..f0f5f748938a 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -4067,9 +4067,6 @@ int ni_dpm_init(struct radeon_device *rdev) struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; struct ni_power_info *ni_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - u16 data_offset, size; - u8 frev, crev; struct atom_clock_dividers dividers; int ret; @@ -4162,16 +4159,7 @@ int ni_dpm_init(struct radeon_device *rdev) eg_pi->vddci_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -4188,8 +4176,7 @@ int ni_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 10f712e37003..e66e72077350 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2299,9 +2299,13 @@ int r600_init_microcode(struct radeon_device *rdev) if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->smc_fw->size != smc_req_size) { + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + } else if (rdev->smc_fw->size != smc_req_size) { printk(KERN_ERR "smc: Bogus length %zu in firmware \"%s\"\n", rdev->smc_fw->size, fw_name); @@ -2697,12 +2701,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) return 0; } -void r600_uvd_rbc_stop(struct radeon_device *rdev) +void r600_uvd_stop(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; /* force RBC into idle state */ WREG32(UVD_RBC_RB_CNTL, 0x11010101); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); + mdelay(1); + + /* put VCPU into reset */ + WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); + mdelay(5); + + /* disable VCPU clock */ + WREG32(UVD_VCPU_CNTL, 0x0); + + /* Unstall UMC and register bus */ + WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); + ring->ready = false; } @@ -2722,6 +2743,11 @@ int r600_uvd_init(struct radeon_device *rdev) /* disable interupt */ WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); + mdelay(1); + /* put LMI, VCPU, RBC etc... into reset */ WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | @@ -2751,10 +2777,6 @@ int r600_uvd_init(struct radeon_device *rdev) WREG32(UVD_MPC_SET_ALU, 0); WREG32(UVD_MPC_SET_MUX, 0x88); - /* Stall UMC */ - WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); - WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); - /* take all subblocks out of reset, except VCPU */ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); mdelay(5); @@ -3312,6 +3334,8 @@ static int r600_startup(struct radeon_device *rdev) /* enable pcie gen2 link */ r600_pcie_gen2_enable(rdev); + r600_mc_program(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = r600_init_microcode(rdev); if (r) { @@ -3324,7 +3348,6 @@ static int r600_startup(struct radeon_device *rdev) if (r) return r; - r600_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { r600_agp_enable(rdev); } else { diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f48240bb8c56..f264df5470f7 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; u32 base_rate = 24000; + u32 max_ratio = clock / base_rate; + u32 dto_phase; + u32 dto_modulo = clock; + u32 wallclock_ratio; + u32 dto_cntl; if (!dig || !dig->afmt) return; + if (max_ratio >= 8) { + dto_phase = 192 * 1000; + wallclock_ratio = 3; + } else if (max_ratio >= 4) { + dto_phase = 96 * 1000; + wallclock_ratio = 2; + } else if (max_ratio >= 2) { + dto_phase = 48 * 1000; + wallclock_ratio = 1; + } else { + dto_phase = 24 * 1000; + wallclock_ratio = 0; + } + /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. * doesn't matter which one you use. Just use the first one. */ @@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) /* according to the reg specs, this should DCE3.2 only, but in * practice it seems to cover DCE3.0 as well. */ - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); - WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); - WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ + if (dig->dig_encoder == 0) { + dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; + dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); + WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); + WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); + WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); + WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ + } else { + dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; + dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); + WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl); + WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase); + WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); + WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ + } } else { /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 8e3fe815edab..7c780839a7f4 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -933,6 +933,9 @@ #define DCCG_AUDIO_DTO0_LOAD 0x051c # define DTO_LOAD (1 << 31) #define DCCG_AUDIO_DTO0_CNTL 0x0520 +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 #define DCCG_AUDIO_DTO1_PHASE 0x0524 #define DCCG_AUDIO_DTO1_MODULE 0x0528 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2f08219c39b6..274b8e1b889f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1468,7 +1468,6 @@ struct radeon_uvd { void *cpu_addr; uint64_t gpu_addr; void *saved_bo; - unsigned fw_size; atomic_t handles[RADEON_MAX_UVD_HANDLES]; struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; struct delayed_work idle_work; @@ -2066,6 +2065,7 @@ struct radeon_device { const struct firmware *mec_fw; /* CIK MEC firmware */ const struct firmware *sdma_fw; /* CIK SDMA firmware */ const struct firmware *smc_fw; /* SMC firmware */ + const struct firmware *uvd_fw; /* UVD firmware */ struct r600_blit r600_blit; struct r600_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ @@ -2095,6 +2095,8 @@ struct radeon_device { /* ACPI interface */ struct radeon_atif atif; struct radeon_atcs atcs; + /* srbm instance registers */ + struct mutex srbm_mutex; }; int radeon_device_init(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 902479fa737f..3d61d5aac18f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -441,7 +441,7 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde /* uvd */ int r600_uvd_init(struct radeon_device *rdev); int r600_uvd_rbc_start(struct radeon_device *rdev); -void r600_uvd_rbc_stop(struct radeon_device *rdev); +void r600_uvd_stop(struct radeon_device *rdev); int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); void r600_uvd_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 82335e38ec4f..63398ae1dbf5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1163,6 +1163,7 @@ int radeon_device_init(struct radeon_device *rdev, mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); mutex_init(&rdev->gpu_clock_mutex); + mutex_init(&rdev->srbm_mutex); init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); @@ -1519,6 +1520,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) radeon_save_bios_scratch_regs(rdev); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); + radeon_pm_suspend(rdev); radeon_suspend(rdev); for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -1564,6 +1566,7 @@ retry: } } + radeon_pm_resume(rdev); drm_helper_resume_force_mode(rdev->ddev); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7ddb0efe2408..ddb8f8e04eb5 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) } else { /* put fence directly behind firmware */ - index = ALIGN(rdev->uvd.fw_size, 8); + index = ALIGN(rdev->uvd_fw->size, 8); rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 6a51d943ccf4..b990b1a2bd50 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev) if (rdev->gart.robj == NULL) { return; } - radeon_gart_table_vram_unpin(rdev); radeon_bo_unref(&rdev->gart.robj); } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f374c467aaca..c557850cd345 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1176,7 +1176,14 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_VERDE: case CHIP_OLAND: case CHIP_HAINAN: - if (radeon_dpm == 1) + /* DPM requires the RLC, RV770+ dGPU requires SMC */ + if (!rdev->rlc_fw) + rdev->pm.pm_method = PM_METHOD_PROFILE; + else if ((rdev->family >= CHIP_RV770) && + (!(rdev->flags & RADEON_IS_IGP)) && + (!rdev->smc_fw)) + rdev->pm.pm_method = PM_METHOD_PROFILE; + else if (radeon_dpm == 1) rdev->pm.pm_method = PM_METHOD_DPM; else rdev->pm.pm_method = PM_METHOD_PROFILE; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 414fd145d20e..f1c15754e73c 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -56,7 +56,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work); int radeon_uvd_init(struct radeon_device *rdev) { - const struct firmware *fw; unsigned long bo_size; const char *fw_name; int i, r; @@ -105,14 +104,14 @@ int radeon_uvd_init(struct radeon_device *rdev) return -EINVAL; } - r = request_firmware(&fw, fw_name, rdev->dev); + r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); if (r) { dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", fw_name); return r; } - bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) + + bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); @@ -145,12 +144,6 @@ int radeon_uvd_init(struct radeon_device *rdev) radeon_bo_unreserve(rdev->uvd.vcpu_bo); - rdev->uvd.fw_size = fw->size; - memset(rdev->uvd.cpu_addr, 0, bo_size); - memcpy(rdev->uvd.cpu_addr, fw->data, fw->size); - - release_firmware(fw); - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { atomic_set(&rdev->uvd.handles[i], 0); rdev->uvd.filp[i] = NULL; @@ -174,33 +167,60 @@ void radeon_uvd_fini(struct radeon_device *rdev) } radeon_bo_unref(&rdev->uvd.vcpu_bo); + + release_firmware(rdev->uvd_fw); } int radeon_uvd_suspend(struct radeon_device *rdev) { unsigned size; + void *ptr; + int i; if (rdev->uvd.vcpu_bo == NULL) return 0; + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) + if (atomic_read(&rdev->uvd.handles[i])) + break; + + if (i == RADEON_MAX_UVD_HANDLES) + return 0; + size = radeon_bo_size(rdev->uvd.vcpu_bo); + size -= rdev->uvd_fw->size; + + ptr = rdev->uvd.cpu_addr; + ptr += rdev->uvd_fw->size; + rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); - memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size); + memcpy(rdev->uvd.saved_bo, ptr, size); return 0; } int radeon_uvd_resume(struct radeon_device *rdev) { + unsigned size; + void *ptr; + if (rdev->uvd.vcpu_bo == NULL) return -EINVAL; + memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); + + size = radeon_bo_size(rdev->uvd.vcpu_bo); + size -= rdev->uvd_fw->size; + + ptr = rdev->uvd.cpu_addr; + ptr += rdev->uvd_fw->size; + if (rdev->uvd.saved_bo != NULL) { - unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo); - memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size); + memcpy(ptr, rdev->uvd.saved_bo, size); kfree(rdev->uvd.saved_bo); rdev->uvd.saved_bo = NULL; - } + } else + memset(ptr, 0, size); return 0; } @@ -215,8 +235,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) { int i, r; for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (rdev->uvd.filp[i] == filp) { - uint32_t handle = atomic_read(&rdev->uvd.handles[i]); + uint32_t handle = atomic_read(&rdev->uvd.handles[i]); + if (handle != 0 && rdev->uvd.filp[i] == filp) { struct radeon_fence *fence; r = radeon_uvd_get_destroy_msg(rdev, @@ -337,8 +357,10 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, } r = radeon_bo_kmap(bo, &ptr); - if (r) + if (r) { + DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); return r; + } msg = ptr + offset; @@ -364,8 +386,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, radeon_bo_kunmap(bo); return 0; } else { - /* it's a create msg, no special handling needed */ radeon_bo_kunmap(bo); + + if (msg_type != 0) { + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + return -EINVAL; + } + + /* it's a create msg, no special handling needed */ } /* create or decode, validate the handle */ @@ -388,7 +416,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, int data0, int data1, - unsigned buf_sizes[]) + unsigned buf_sizes[], bool *has_msg_cmd) { struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_reloc *reloc; @@ -417,7 +445,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, if (cmd < 0x4) { if ((end - start) < buf_sizes[cmd]) { - DRM_ERROR("buffer to small (%d / %d)!\n", + DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned)(end - start), buf_sizes[cmd]); return -EINVAL; } @@ -442,9 +470,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, } if (cmd == 0) { + if (*has_msg_cmd) { + DRM_ERROR("More than one message in a UVD-IB!\n"); + return -EINVAL; + } + *has_msg_cmd = true; r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); if (r) return r; + } else if (!*has_msg_cmd) { + DRM_ERROR("Message needed before other commands are send!\n"); + return -EINVAL; } return 0; @@ -453,7 +489,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, int *data0, int *data1, - unsigned buf_sizes[]) + unsigned buf_sizes[], + bool *has_msg_cmd) { int i, r; @@ -467,7 +504,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, *data1 = p->idx; break; case UVD_GPCOM_VCPU_CMD: - r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes); + r = radeon_uvd_cs_reloc(p, *data0, *data1, + buf_sizes, has_msg_cmd); if (r) return r; break; @@ -488,6 +526,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) struct radeon_cs_packet pkt; int r, data0 = 0, data1 = 0; + /* does the IB has a msg command */ + bool has_msg_cmd = false; + /* minimum buffer sizes */ unsigned buf_sizes[] = { [0x00000000] = 2048, @@ -514,8 +555,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) return r; switch (pkt.type) { case RADEON_PACKET_TYPE0: - r = radeon_uvd_cs_reg(p, &pkt, &data0, - &data1, buf_sizes); + r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, + buf_sizes, &has_msg_cmd); if (r) return r; break; @@ -527,6 +568,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); + + if (!has_msg_cmd) { + DRM_ERROR("UVD-IBs need a msg command!\n"); + return -EINVAL; + } + return 0; } diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 363018c60412..bdd888b4db2b 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c @@ -1944,9 +1944,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) int rv6xx_dpm_init(struct radeon_device *rdev) { - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - uint16_t data_offset, size; - uint8_t frev, crev; + struct radeon_atom_ss ss; struct atom_clock_dividers dividers; struct rv6xx_power_info *pi; int ret; @@ -1989,16 +1987,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev) pi->gfx_clock_gating = true; - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; + pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_ENGINE_SS, 0); + pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_MEMORY_SS, 0); + + /* Disable sclk ss, causes hangs on a lot of systems */ + pi->sclk_ss = false; + + if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; + else pi->dynamic_ss = false; - } pi->dynamic_pcie_gen2 = true; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 30ea14e8854c..bcc68ec204ad 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev) /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; WREG32(UVD_VCPU_CACHE_OFFSET0, addr); WREG32(UVD_VCPU_CACHE_SIZE0, size); @@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev) /* enable pcie gen2 link */ rv770_pcie_gen2_enable(rdev); + rv770_mc_program(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = r600_init_microcode(rdev); if (r) { @@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; - rv770_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { rv770_agp_enable(rdev); } else { @@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev) int rv770_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); r700_cp_stop(rdev); r600_dma_stop(rdev); @@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 2d347925f77d..094c67a29d0d 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2319,12 +2319,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) return 0; } +void rv770_get_engine_memory_ss(struct radeon_device *rdev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(rdev); + struct radeon_atom_ss ss; + + pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_ENGINE_SS, 0); + pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_MEMORY_SS, 0); + + if (pi->sclk_ss || pi->mclk_ss) + pi->dynamic_ss = true; + else + pi->dynamic_ss = false; +} + int rv770_dpm_init(struct radeon_device *rdev) { struct rv7xx_power_info *pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - uint16_t data_offset, size; - uint8_t frev, crev; struct atom_clock_dividers dividers; int ret; @@ -2369,16 +2382,7 @@ int rv770_dpm_init(struct radeon_device *rdev) pi->mvdd_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = false; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = RV770_HASI_DFLT; @@ -2393,8 +2397,7 @@ int rv770_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h index 96b1b2a62a8a..9244effc6b59 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.h +++ b/drivers/gpu/drm/radeon/rv770_dpm.h @@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, struct radeon_ps *new_ps, struct radeon_ps *old_ps); +void rv770_get_engine_memory_ss(struct radeon_device *rdev); /* smc */ int rv770_read_smc_soft_register(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 6ca904673a4f..daa8d2df8ec5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1663,9 +1663,13 @@ static int si_init_microcode(struct radeon_device *rdev) snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->smc_fw->size != smc_req_size) { + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + } else if (rdev->smc_fw->size != smc_req_size) { printk(KERN_ERR "si_smc: Bogus length %zu in firmware \"%s\"\n", rdev->smc_fw->size, fw_name); @@ -6418,6 +6422,8 @@ static int si_startup(struct radeon_device *rdev) /* enable aspm */ si_program_aspm(rdev); + si_mc_program(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || !rdev->rlc_fw || !rdev->mc_fw) { r = si_init_microcode(rdev); @@ -6437,7 +6443,6 @@ static int si_startup(struct radeon_device *rdev) if (r) return r; - si_mc_program(rdev); r = si_pcie_gart_enable(rdev); if (r) return r; @@ -6621,7 +6626,7 @@ int si_suspend(struct radeon_device *rdev) si_cp_enable(rdev, false); cayman_dma_stop(rdev); if (rdev->has_uvd) { - r600_uvd_rbc_stop(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); } si_irq_suspend(rdev); @@ -6763,8 +6768,10 @@ void si_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - if (rdev->has_uvd) + if (rdev->has_uvd) { + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); + } si_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 41825575b403..88699e3cd868 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2903,7 +2903,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, { struct ni_ps *ps = ni_get_ps(rps); struct radeon_clock_and_voltage_limits *max_limits; - bool disable_mclk_switching; + bool disable_mclk_switching = false; + bool disable_sclk_switching = false; u32 mclk, sclk; u16 vddc, vddci; int i; @@ -2911,8 +2912,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, if ((rdev->pm.dpm.new_active_crtc_count > 1) || ni_dpm_vblank_too_short(rdev)) disable_mclk_switching = true; - else - disable_mclk_switching = false; + + if (rps->vclk || rps->dclk) { + disable_mclk_switching = true; + disable_sclk_switching = true; + } if (rdev->pm.dpm.ac_power) max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; @@ -2940,27 +2944,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, if (disable_mclk_switching) { mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; - sclk = ps->performance_levels[0].sclk; - vddc = ps->performance_levels[0].vddc; vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; } else { - sclk = ps->performance_levels[0].sclk; mclk = ps->performance_levels[0].mclk; - vddc = ps->performance_levels[0].vddc; vddci = ps->performance_levels[0].vddci; } + if (disable_sclk_switching) { + sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; + vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; + } else { + sclk = ps->performance_levels[0].sclk; + vddc = ps->performance_levels[0].vddc; + } + /* adjusted low state */ ps->performance_levels[0].sclk = sclk; ps->performance_levels[0].mclk = mclk; ps->performance_levels[0].vddc = vddc; ps->performance_levels[0].vddci = vddci; - for (i = 1; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) - ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; - if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) - ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; + if (disable_sclk_switching) { + sclk = ps->performance_levels[0].sclk; + for (i = 1; i < ps->performance_level_count; i++) { + if (sclk < ps->performance_levels[i].sclk) + sclk = ps->performance_levels[i].sclk; + } + for (i = 0; i < ps->performance_level_count; i++) { + ps->performance_levels[i].sclk = sclk; + ps->performance_levels[i].vddc = vddc; + } + } else { + for (i = 1; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) + ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; + if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) + ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; + } } if (disable_mclk_switching) { @@ -6253,9 +6273,6 @@ int si_dpm_init(struct radeon_device *rdev) struct evergreen_power_info *eg_pi; struct ni_power_info *ni_pi; struct si_power_info *si_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - u16 data_offset, size; - u8 frev, crev; struct atom_clock_dividers dividers; int ret; u32 mask; @@ -6346,16 +6363,7 @@ int si_dpm_init(struct radeon_device *rdev) si_pi->vddc_phase_shed_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -6366,8 +6374,7 @@ int si_dpm_init(struct radeon_device *rdev) eg_pi->sclk_deep_sleep = true; si_pi->sclk_deep_sleep_above_low = false; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 7a5764843bfb..cd33084c7860 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -488,8 +488,6 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) if (djrcv_dev->querying_devices) return 0; - djrcv_dev->querying_devices = true; - dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 0f34bca9f5e5..6099f50b28aa 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg, u16 value) { return i2c_smbus_write_byte_data(client, reg, value & 0xFF) - && i2c_smbus_write_byte_data(client, reg + 1, value >> 8); + || i2c_smbus_write_byte_data(client, reg + 1, value >> 8); } static void adt7470_init_client(struct i2c_client *client) diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c index ccec916bc3eb..af8f65fb1c05 100644 --- a/drivers/i2c/busses/i2c-kempld.c +++ b/drivers/i2c/busses/i2c-kempld.c @@ -246,9 +246,9 @@ static void kempld_i2c_device_init(struct kempld_i2c_data *i2c) bus_frequency = KEMPLD_I2C_FREQ_MAX; if (pld->info.spec_major == 1) - prescale = pld->pld_clock / bus_frequency * 5 - 1000; + prescale = pld->pld_clock / (bus_frequency * 5) - 1000; else - prescale = pld->pld_clock / bus_frequency * 4 - 3000; + prescale = pld->pld_clock / (bus_frequency * 4) - 3000; if (prescale < 0) prescale = 0; diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index df8ff5aea5b5..e2e9a0dade96 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -493,7 +493,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, * based on this empirical measurement and a lot of previous frobbing. */ i2c->cmd_err = 0; - if (msg->len < 8) { + if (0) { /* disable PIO mode until a proper fix is made */ ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); if (ret) mxs_i2c_reset(i2c); diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 0ad208a69c29..3ceac3e91dde 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -60,7 +60,6 @@ static void tiadc_step_config(struct tiadc_device *adc_dev) { unsigned int stepconfig; int i, steps; - u32 step_en; /* * There are 16 configurable steps and 8 analog input @@ -86,8 +85,7 @@ static void tiadc_step_config(struct tiadc_device *adc_dev) adc_dev->channel_step[i] = steps; steps++; } - step_en = get_adc_step_mask(adc_dev); - am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en); + } static const char * const chan_name_ain[] = { @@ -142,10 +140,22 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct tiadc_device *adc_dev = iio_priv(indio_dev); - int i; - unsigned int fifo1count, read; + int i, map_val; + unsigned int fifo1count, read, stepid; u32 step = UINT_MAX; bool found = false; + u32 step_en; + unsigned long timeout = jiffies + usecs_to_jiffies + (IDLE_TIMEOUT * adc_dev->channels); + step_en = get_adc_step_mask(adc_dev); + am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en); + + /* Wait for ADC sequencer to complete sampling */ + while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) { + if (time_after(jiffies, timeout)) + return -EAGAIN; + } + map_val = chan->channel + TOTAL_CHANNELS; /* * When the sub-system is first enabled, @@ -170,12 +180,16 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); for (i = 0; i < fifo1count; i++) { read = tiadc_readl(adc_dev, REG_FIFO1); - if (read >> 16 == step) { - *val = read & 0xfff; + stepid = read & FIFOREAD_CHNLID_MASK; + stepid = stepid >> 0x10; + + if (stepid == map_val) { + read = read & FIFOREAD_DATA_MASK; found = true; + *val = read; } } - am335x_tsc_se_update(adc_dev->mfd_tscadc); + if (found == false) return -EBUSY; return IIO_VAL_INT; diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index ea8a4146620d..0dd9bb873130 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -127,12 +127,17 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name, void iio_trigger_poll(struct iio_trigger *trig, s64 time) { int i; - if (!trig->use_count) - for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) - if (trig->subirqs[i].enabled) { - trig->use_count++; + + if (!atomic_read(&trig->use_count)) { + atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); + + for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { + if (trig->subirqs[i].enabled) generic_handle_irq(trig->subirq_base + i); - } + else + iio_trigger_notify_done(trig); + } + } } EXPORT_SYMBOL(iio_trigger_poll); @@ -146,19 +151,24 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) { int i; - if (!trig->use_count) - for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) - if (trig->subirqs[i].enabled) { - trig->use_count++; + + if (!atomic_read(&trig->use_count)) { + atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); + + for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { + if (trig->subirqs[i].enabled) handle_nested_irq(trig->subirq_base + i); - } + else + iio_trigger_notify_done(trig); + } + } } EXPORT_SYMBOL(iio_trigger_poll_chained); void iio_trigger_notify_done(struct iio_trigger *trig) { - trig->use_count--; - if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable) + if (atomic_dec_and_test(&trig->use_count) && trig->ops && + trig->ops->try_reenable) if (trig->ops->try_reenable(trig)) /* Missed an interrupt so launch new poll now */ iio_trigger_poll(trig, 0); diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c index efdc873e58d1..a9857022f71d 100644 --- a/drivers/media/i2c/ml86v7667.c +++ b/drivers/media/i2c/ml86v7667.c @@ -117,7 +117,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; + int ret = -EINVAL; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: @@ -157,7 +157,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl) break; } - return 0; + return ret; } static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index df4ada880e42..bd9405df1bd6 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c @@ -1987,7 +1987,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids); #ifdef CONFIG_OF static const struct of_device_id coda_dt_ids[] = { - { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] }, + { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] }, { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] }, { /* sentinel */ } }; diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index 553d87e5ceab..fd6289d60cde 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -784,6 +784,7 @@ static int g2d_probe(struct platform_device *pdev) } *vfd = g2d_videodev; vfd->lock = &dev->mutex; + vfd->v4l2_dev = &dev->v4l2_dev; ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 5296385153d5..4f6dd42c9adb 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -344,7 +344,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) pix_mp->num_planes = 2; /* Set pixelformat to the format in which MFC outputs the decoded frame */ - pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT; + pix_mp->pixelformat = ctx->dst_fmt->fourcc; pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; @@ -382,10 +382,16 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) mfc_err("Unsupported format for source.\n"); return -EINVAL; } - if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { - mfc_err("Not supported format.\n"); + if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) { + mfc_err("Unknown codec\n"); return -EINVAL; } + if (!IS_MFCV6(dev)) { + if (fmt->fourcc == V4L2_PIX_FMT_VP8) { + mfc_err("Not supported format.\n"); + return -EINVAL; + } + } } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { fmt = find_format(f, MFC_FMT_RAW); if (!fmt) { @@ -411,7 +417,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); int ret = 0; - struct s5p_mfc_fmt *fmt; struct v4l2_pix_format_mplane *pix_mp; mfc_debug_enter(); @@ -425,54 +430,32 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) goto out; } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { - fmt = find_format(f, MFC_FMT_RAW); - if (!fmt) { - mfc_err("Unsupported format for source.\n"); - return -EINVAL; - } - if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) { - mfc_err("Not supported format.\n"); - return -EINVAL; - } else if (IS_MFCV6(dev) && - (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) { - mfc_err("Not supported format.\n"); - return -EINVAL; - } - ctx->dst_fmt = fmt; - mfc_debug_leave(); - return ret; - } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { - mfc_err("Wrong type error for S_FMT : %d", f->type); - return -EINVAL; - } - fmt = find_format(f, MFC_FMT_DEC); - if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) { - mfc_err("Unknown codec\n"); - ret = -EINVAL; + /* dst_fmt is validated by call to vidioc_try_fmt */ + ctx->dst_fmt = find_format(f, MFC_FMT_RAW); + ret = 0; goto out; - } - if (fmt->type != MFC_FMT_DEC) { - mfc_err("Wrong format selected, you should choose " - "format for decoding\n"); + } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + /* src_fmt is validated by call to vidioc_try_fmt */ + ctx->src_fmt = find_format(f, MFC_FMT_DEC); + ctx->codec_mode = ctx->src_fmt->codec_mode; + mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); + pix_mp->height = 0; + pix_mp->width = 0; + if (pix_mp->plane_fmt[0].sizeimage) + ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; + else + pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = + DEF_CPB_SIZE; + pix_mp->plane_fmt[0].bytesperline = 0; + ctx->state = MFCINST_INIT; + ret = 0; + goto out; + } else { + mfc_err("Wrong type error for S_FMT : %d", f->type); ret = -EINVAL; goto out; } - if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { - mfc_err("Not supported format.\n"); - return -EINVAL; - } - ctx->src_fmt = fmt; - ctx->codec_mode = fmt->codec_mode; - mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); - pix_mp->height = 0; - pix_mp->width = 0; - if (pix_mp->plane_fmt[0].sizeimage) - ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; - else - pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = - DEF_CPB_SIZE; - pix_mp->plane_fmt[0].bytesperline = 0; - ctx->state = MFCINST_INIT; + out: mfc_debug_leave(); return ret; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 2549967b2f85..59e56f4c8ce3 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -906,6 +906,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { + struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_fmt *fmt; struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; @@ -930,6 +931,18 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) return -EINVAL; } + if (!IS_MFCV6(dev)) { + if (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) { + mfc_err("Not supported format.\n"); + return -EINVAL; + } + } else if (IS_MFCV6(dev)) { + if (fmt->fourcc == V4L2_PIX_FMT_NV12MT) { + mfc_err("Not supported format.\n"); + return -EINVAL; + } + } + if (fmt->num_planes != pix_fmt_mp->num_planes) { mfc_err("failed to try output format\n"); return -EINVAL; @@ -947,7 +960,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); - struct s5p_mfc_fmt *fmt; struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; int ret = 0; @@ -960,13 +972,9 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) goto out; } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { - fmt = find_format(f, MFC_FMT_ENC); - if (!fmt) { - mfc_err("failed to set capture format\n"); - return -EINVAL; - } + /* dst_fmt is validated by call to vidioc_try_fmt */ + ctx->dst_fmt = find_format(f, MFC_FMT_ENC); ctx->state = MFCINST_INIT; - ctx->dst_fmt = fmt; ctx->codec_mode = ctx->dst_fmt->codec_mode; ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage; pix_fmt_mp->plane_fmt[0].bytesperline = 0; @@ -987,28 +995,8 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) } mfc_debug(2, "Got instance number: %d\n", ctx->inst_no); } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { - fmt = find_format(f, MFC_FMT_RAW); - if (!fmt) { - mfc_err("failed to set output format\n"); - return -EINVAL; - } - - if (!IS_MFCV6(dev) && - (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) { - mfc_err("Not supported format.\n"); - return -EINVAL; - } else if (IS_MFCV6(dev) && - (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) { - mfc_err("Not supported format.\n"); - return -EINVAL; - } - - if (fmt->num_planes != pix_fmt_mp->num_planes) { - mfc_err("failed to set output format\n"); - ret = -EINVAL; - goto out; - } - ctx->src_fmt = fmt; + /* src_fmt is validated by call to vidioc_try_fmt */ + ctx->src_fmt = find_format(f, MFC_FMT_RAW); ctx->img_width = pix_fmt_mp->width; ctx->img_height = pix_fmt_mp->height; mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode); diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index 4851cc2e4a4d..c4ff9739a7ae 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c @@ -726,7 +726,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus, *eedata = data; *eedata_len = len; - dev_config = (void *)eedata; + dev_config = (void *)*eedata; switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) { case 0: diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index cb694055ba7d..6e5070774dc2 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -303,6 +303,11 @@ static int hdpvr_probe(struct usb_interface *interface, dev->workqueue = 0; + /* init video transfer queues first of all */ + /* to prevent oops in hdpvr_delete() on error paths */ + INIT_LIST_HEAD(&dev->free_buff_list); + INIT_LIST_HEAD(&dev->rec_buff_list); + /* register v4l2_device early so it can be used for printks */ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { dev_err(&interface->dev, "v4l2_device_register failed\n"); @@ -325,10 +330,6 @@ static int hdpvr_probe(struct usb_interface *interface, if (!dev->workqueue) goto error; - /* init video transfer queues */ - INIT_LIST_HEAD(&dev->free_buff_list); - INIT_LIST_HEAD(&dev->rec_buff_list); - dev->options = hdpvr_default_options; if (default_video_input < HDPVR_VIDEO_INPUTS) @@ -405,7 +406,7 @@ static int hdpvr_probe(struct usb_interface *interface, video_nr[atomic_inc_return(&dev_nr)]); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); - goto error; + goto reg_fail; } /* let the user know what node this device is now attached to */ diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig index 8864436464bf..7c5b86006ee6 100644 --- a/drivers/media/usb/usbtv/Kconfig +++ b/drivers/media/usb/usbtv/Kconfig @@ -1,6 +1,6 @@ config VIDEO_USBTV tristate "USBTV007 video capture support" - depends on VIDEO_DEV + depends on VIDEO_V4L2 select VIDEOBUF2_VMALLOC ---help--- diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c index bf43f874685e..91650173941a 100644 --- a/drivers/media/usb/usbtv/usbtv.c +++ b/drivers/media/usb/usbtv/usbtv.c @@ -57,7 +57,7 @@ #define USBTV_CHUNK_SIZE 256 #define USBTV_CHUNK 240 #define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ - / 2 / USBTV_CHUNK) + / 4 / USBTV_CHUNK) /* Chunk header. */ #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ @@ -89,6 +89,7 @@ struct usbtv { /* Number of currently processed frame, useful find * out when a new one begins. */ u32 frame_id; + int chunks_done; int iso_size; unsigned int sequence; @@ -202,6 +203,26 @@ static int usbtv_setup_capture(struct usbtv *usbtv) return 0; } +/* Copy data from chunk into a frame buffer, deinterlacing the data + * into every second line. Unfortunately, they don't align nicely into + * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels. + * Therefore, we break down the chunk into two halves before copyting, + * so that we can interleave a line if needed. */ +static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd) +{ + int half; + + for (half = 0; half < 2; half++) { + int part_no = chunk_no * 2 + half; + int line = part_no / 3; + int part_index = (line * 2 + !odd) * 3 + (part_no % 3); + + u32 *dst = &frame[part_index * USBTV_CHUNK/2]; + memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src)); + src += USBTV_CHUNK/2; + } +} + /* Called for each 256-byte image chunk. * First word identifies the chunk, followed by 240 words of image * data and padding. */ @@ -218,17 +239,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) frame_id = USBTV_FRAME_ID(chunk); odd = USBTV_ODD(chunk); chunk_no = USBTV_CHUNK_NO(chunk); - - /* Deinterlace. TODO: Use interlaced frame format. */ - chunk_no = (chunk_no - chunk_no % 3) * 2 + chunk_no % 3; - chunk_no += !odd * 3; - if (chunk_no >= USBTV_CHUNKS) return; /* Beginning of a frame. */ - if (chunk_no == 0) + if (chunk_no == 0) { usbtv->frame_id = frame_id; + usbtv->chunks_done = 0; + } + + if (usbtv->frame_id != frame_id) + return; spin_lock_irqsave(&usbtv->buflock, flags); if (list_empty(&usbtv->bufs)) { @@ -241,19 +262,23 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list); frame = vb2_plane_vaddr(&buf->vb, 0); - /* Copy the chunk. */ - memcpy(&frame[chunk_no * USBTV_CHUNK], &chunk[1], - USBTV_CHUNK * sizeof(chunk[1])); + /* Copy the chunk data. */ + usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd); + usbtv->chunks_done++; /* Last chunk in a frame, signalling an end */ - if (usbtv->frame_id && chunk_no == USBTV_CHUNKS-1) { + if (odd && chunk_no == USBTV_CHUNKS-1) { int size = vb2_plane_size(&buf->vb, 0); + enum vb2_buffer_state state = usbtv->chunks_done == + USBTV_CHUNKS ? + VB2_BUF_STATE_DONE : + VB2_BUF_STATE_ERROR; buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; buf->vb.v4l2_buf.sequence = usbtv->sequence++; v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); vb2_set_plane_payload(&buf->vb, 0, size); - vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); + vb2_buffer_done(&buf->vb, state); list_del(&buf->list); } @@ -518,7 +543,7 @@ static int usbtv_queue_setup(struct vb2_queue *vq, if (*nbuffers < 2) *nbuffers = 2; *nplanes = 1; - sizes[0] = USBTV_CHUNK * USBTV_CHUNKS * sizeof(u32); + sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32); return 0; } diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dbdc5f7e2b29..01e264fb50e0 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus) /* ACPI bus type */ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) { - struct pci_dev * pci_dev; - u64 addr; + struct pci_dev *pci_dev = to_pci_dev(dev); + bool is_bridge; + u64 addr; - pci_dev = to_pci_dev(dev); + /* + * pci_is_bridge() is not suitable here, because pci_dev->subordinate + * is set only after acpi_pci_find_device() has been called for the + * given device. + */ + is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE + || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; /* Please ref to ACPI spec for the syntax of _ADR */ addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); - *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr); + *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge); if (!*handle) return -ENODEV; return 0; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 17150a778984..451bf99582ff 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2392,6 +2392,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) rc = cqr->intrc; else rc = -EIO; + + /* kick tasklets */ + dasd_schedule_device_bh(device); + if (device->block) + dasd_schedule_block_bh(device->block); + return rc; } diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index b6d1f92ed33c..c18c68150e9f 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -38,7 +38,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.5.0.22" +#define DRV_VERSION "1.5.0.23" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 5f09d1814d26..42e15ee6e1bb 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -642,19 +642,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); INIT_WORK(&fnic->event_work, fnic_handle_event); skb_queue_head_init(&fnic->fip_frame_queue); - spin_lock_irqsave(&fnic_list_lock, flags); - if (!fnic_fip_queue) { - fnic_fip_queue = - create_singlethread_workqueue("fnic_fip_q"); - if (!fnic_fip_queue) { - spin_unlock_irqrestore(&fnic_list_lock, flags); - printk(KERN_ERR PFX "fnic FIP work queue " - "create failed\n"); - err = -ENOMEM; - goto err_out_free_max_pool; - } - } - spin_unlock_irqrestore(&fnic_list_lock, flags); INIT_LIST_HEAD(&fnic->evlist); INIT_LIST_HEAD(&fnic->vlans); } else { @@ -960,6 +947,13 @@ static int __init fnic_init_module(void) spin_lock_init(&fnic_list_lock); INIT_LIST_HEAD(&fnic_list); + fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); + if (!fnic_fip_queue) { + printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); + err = -ENOMEM; + goto err_create_fip_workq; + } + fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); if (!fnic_fc_transport) { printk(KERN_ERR PFX "fc_attach_transport error\n"); @@ -978,6 +972,8 @@ static int __init fnic_init_module(void) err_pci_register: fc_release_transport(fnic_fc_transport); err_fc_transport: + destroy_workqueue(fnic_fip_queue); +err_create_fip_workq: destroy_workqueue(fnic_event_queue); err_create_fnic_workq: kmem_cache_destroy(fnic_io_req_cache); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 0177295599e0..1f0ca68409d4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance) break; } - /* - * We expect the FW state to be READY - */ - if (megasas_transition_to_ready(instance, 0)) - goto fail_ready_state; + if (megasas_transition_to_ready(instance, 0)) { + atomic_set(&instance->fw_reset_no_pci_access, 1); + instance->instancet->adp_reset + (instance, instance->reg_set); + atomic_set(&instance->fw_reset_no_pci_access, 0); + dev_info(&instance->pdev->dev, + "megasas: FW restarted successfully from %s!\n", + __func__); + + /*waitting for about 30 second before retry*/ + ssleep(30); + + if (megasas_transition_to_ready(instance, 0)) + goto fail_ready_state; + } /* * MSI-X host index 0 is common for all adapter. diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3b1ea34e1f5a..eaa808e6ba91 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -1031,6 +1031,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, { int i, result; + if (sdev->skip_vpd_pages) + goto fail; + /* Ask for all the pages supported by this device */ result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); if (result) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 2168258fb2c3..74b88efde6ad 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -751,7 +751,7 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) vscsi->affinity_hint_set = true; } else { - for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++) + for (i = 0; i < vscsi->num_queues; i++) virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); vscsi->affinity_hint_set = false; diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 222d3e37fc28..707966bd5610 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -609,7 +609,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) else buf = (void *)t->tx_buf; t->tx_dma = dma_map_single(&spi->dev, buf, - t->len, DMA_FROM_DEVICE); + t->len, DMA_TO_DEVICE); if (!t->tx_dma) { ret = -EFAULT; goto err_tx_map; diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index dcceed29d31a..81972fa47beb 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1811,10 +1811,12 @@ static int zcache_comp_init(void) #else if (*zcache_comp_name != '\0') { ret = crypto_has_comp(zcache_comp_name, 0, 0); - if (!ret) + if (!ret) { pr_info("zcache: %s not supported\n", zcache_comp_name); - goto out; + ret = 1; + goto out; + } } if (!ret) strcpy(zcache_comp_name, "lzo"); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 4a8a1d68002c..558313de4911 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4798,7 +4798,8 @@ static void hub_events(void) hub->ports[i - 1]->child; dev_dbg(hub_dev, "warm reset port %d\n", i); - if (!udev) { + if (!udev || !(portstatus & + USB_PORT_STAT_CONNECTION)) { status = hub_port_reset(hub, i, NULL, HUB_BH_RESET_TIME, true); @@ -4808,8 +4809,8 @@ static void hub_events(void) usb_lock_device(udev); status = usb_reset_device(udev); usb_unlock_device(udev); + connect_change = 0; } - connect_change = 0; } if (connect_change) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index df6978abd7e6..6f8c2fd47675 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -24,6 +24,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/dmapool.h> +#include <linux/dma-mapping.h> #include "xhci.h" diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 41eb4fc33453..9478caa2f71f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -27,6 +27,7 @@ #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/dmi.h> +#include <linux/dma-mapping.h> #include "xhci.h" diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 3ba37713b1f9..dc09ebe4aba5 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -239,24 +239,6 @@ static const struct fb_bitfield def_rgb565[] = { } }; -static const struct fb_bitfield def_rgb666[] = { - [RED] = { - .offset = 16, - .length = 6, - }, - [GREEN] = { - .offset = 8, - .length = 6, - }, - [BLUE] = { - .offset = 0, - .length = 6, - }, - [TRANSP] = { /* no support for transparency */ - .length = 0, - } -}; - static const struct fb_bitfield def_rgb888[] = { [RED] = { .offset = 16, @@ -309,9 +291,6 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var, break; case STMLCDIF_16BIT: case STMLCDIF_18BIT: - /* 24 bit to 18 bit mapping */ - rgb = def_rgb666; - break; case STMLCDIF_24BIT: /* real 24 bit */ rgb = def_rgb888; @@ -453,11 +432,6 @@ static int mxsfb_set_par(struct fb_info *fb_info) return -EINVAL; case STMLCDIF_16BIT: case STMLCDIF_18BIT: - /* 24 bit to 18 bit mapping */ - ctrl |= CTRL_DF24; /* ignore the upper 2 bits in - * each colour component - */ - break; case STMLCDIF_24BIT: /* real 24 bit */ break; diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c index 5338f362293b..1b60698f141e 100644 --- a/drivers/video/omap2/displays-new/connector-analog-tv.c +++ b/drivers/video/omap2/displays-new/connector-analog-tv.c @@ -28,6 +28,20 @@ struct panel_drv_data { bool invert_polarity; }; +static const struct omap_video_timings tvc_pal_timings = { + .x_res = 720, + .y_res = 574, + .pixel_clock = 13500, + .hsw = 64, + .hfp = 12, + .hbp = 68, + .vsw = 5, + .vfp = 5, + .vbp = 41, + + .interlace = true, +}; + #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) static int tvc_connect(struct omap_dss_device *dssdev) @@ -212,14 +226,14 @@ static int tvc_probe(struct platform_device *pdev) return -ENODEV; } - ddata->timings = omap_dss_pal_timings; + ddata->timings = tvc_pal_timings; dssdev = &ddata->dssdev; dssdev->driver = &tvc_driver; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; - dssdev->panel.timings = omap_dss_pal_timings; + dssdev->panel.timings = tvc_pal_timings; r = omapdss_register_display(dssdev); if (r) { |