diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-26 22:23:40 +1030 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-26 22:23:40 +1030 |
commit | fbb776c3ca4501d5a2821bf1e9bceefcaec7ae47 (patch) | |
tree | ef2b0bba2d31a6408224b4bedcd518b5e5ed6e5c /arch/ia64/kernel | |
parent | 86c6f274f52c3e991d429869780945c0790e7b65 (diff) |
cpumask: IA64: Introduce cpumask_of_{node,pcibus} to replace {node,pcibus}_to_cpumask
Impact: New APIs
The old node_to_cpumask/node_to_pcibus returned a cpumask_t: these
return a pointer to a struct cpumask. Part of removing cpumasks from
the stack.
We can also use the new for_each_cpu_and() to avoid a temporary cpumask,
and a gratuitous test in sn_topology_show.
(Includes fix from KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>)
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/acpi.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 23 |
2 files changed, 12 insertions, 13 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index bd7acc71e8a9..54ae373e6e22 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -1001,7 +1001,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) node = pxm_to_node(pxm); if (node >= MAX_NUMNODES || !node_online(node) || - cpus_empty(node_to_cpumask(node))) + cpumask_empty(cpumask_of_node(node))) return AE_OK; /* We know a gsi to node mapping! */ diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index c8adecd5b416..5cfd3d91001a 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -695,32 +695,31 @@ get_target_cpu (unsigned int gsi, int irq) #ifdef CONFIG_NUMA { int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; - cpumask_t cpu_mask; + const struct cpumask *cpu_mask; iosapic_index = find_iosapic(gsi); if (iosapic_index < 0 || iosapic_lists[iosapic_index].node == MAX_NUMNODES) goto skip_numa_setup; - cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); - cpus_and(cpu_mask, cpu_mask, domain); - for_each_cpu_mask(numa_cpu, cpu_mask) { - if (!cpu_online(numa_cpu)) - cpu_clear(numa_cpu, cpu_mask); + cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node); + num_cpus = 0; + for_each_cpu_and(numa_cpu, cpu_mask, &domain) { + if (cpu_online(numa_cpu)) + num_cpus++; } - num_cpus = cpus_weight(cpu_mask); - if (!num_cpus) goto skip_numa_setup; /* Use irq assignment to distribute across cpus in node */ cpu_index = irq % num_cpus; - for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) - numa_cpu = next_cpu(numa_cpu, cpu_mask); + for_each_cpu_and(numa_cpu, cpu_mask, &domain) + if (cpu_online(numa_cpu) && i++ >= cpu_index) + break; - if (numa_cpu != NR_CPUS) + if (numa_cpu < nr_cpu_ids) return cpu_physical_id(numa_cpu); } skip_numa_setup: @@ -731,7 +730,7 @@ skip_numa_setup: * case of NUMA.) */ do { - if (++cpu >= NR_CPUS) + if (++cpu >= nr_cpu_ids) cpu = 0; } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); |