aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-03 12:04:39 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-03 12:04:39 -0800
commit7d3b56ba37a95f1f370f50258ed3954c304c524b (patch)
tree86102527b92f02450aa245f084ffb491c18d2e0a /drivers
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu (diff)
parentx86: setup_per_cpu_areas() cleanup (diff)
downloadlinux-dev-7d3b56ba37a95f1f370f50258ed3954c304c524b.tar.xz
linux-dev-7d3b56ba37a95f1f370f50258ed3954c304c524b.zip
Merge branch 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (77 commits) x86: setup_per_cpu_areas() cleanup cpumask: fix compile error when CONFIG_NR_CPUS is not defined cpumask: use alloc_cpumask_var_node where appropriate cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t x86: use cpumask_var_t in acpi/boot.c x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids sched: put back some stack hog changes that were undone in kernel/sched.c x86: enable cpus display of kernel_max and offlined cpus ia64: cpumask fix for is_affinity_mask_valid() cpumask: convert RCU implementations, fix xtensa: define __fls mn10300: define __fls m32r: define __fls h8300: define __fls frv: define __fls cris: define __fls cpumask: CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS cpumask: zero extra bits in alloc_cpumask_var_node cpumask: replace for_each_cpu_mask_nr with for_each_cpu in kernel/time/ cpumask: convert mm/ ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_core.c14
-rw-r--r--drivers/acpi/processor_perflib.c28
-rw-r--r--drivers/acpi/processor_throttling.c80
-rw-r--r--drivers/base/cpu.c44
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c17
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c8
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c2
7 files changed, 135 insertions, 58 deletions
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 34948362f41d..0cc2fd31e376 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -826,6 +826,11 @@ static int acpi_processor_add(struct acpi_device *device)
if (!pr)
return -ENOMEM;
+ if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
+ kfree(pr);
+ return -ENOMEM;
+ }
+
pr->handle = device->handle;
strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
@@ -845,10 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
pr = acpi_driver_data(device);
- if (pr->id >= nr_cpu_ids) {
- kfree(pr);
- return 0;
- }
+ if (pr->id >= nr_cpu_ids)
+ goto free;
if (type == ACPI_BUS_REMOVAL_EJECT) {
if (acpi_processor_handle_eject(pr))
@@ -873,6 +876,9 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
per_cpu(processors, pr->id) = NULL;
per_cpu(processor_device_array, pr->id) = NULL;
+
+free:
+ free_cpumask_var(pr->throttling.shared_cpu_map);
kfree(pr);
return 0;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0d7b772bef50..846e227592d4 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -588,12 +588,15 @@ int acpi_processor_preregister_performance(
int count, count_target;
int retval = 0;
unsigned int i, j;
- cpumask_t covered_cpus;
+ cpumask_var_t covered_cpus;
struct acpi_processor *pr;
struct acpi_psd_package *pdomain;
struct acpi_processor *match_pr;
struct acpi_psd_package *match_pdomain;
+ if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+ return -ENOMEM;
+
mutex_lock(&performance_mutex);
retval = 0;
@@ -617,7 +620,7 @@ int acpi_processor_preregister_performance(
}
pr->performance = percpu_ptr(performance, i);
- cpu_set(i, pr->performance->shared_cpu_map);
+ cpumask_set_cpu(i, pr->performance->shared_cpu_map);
if (acpi_processor_get_psd(pr)) {
retval = -EINVAL;
continue;
@@ -650,18 +653,18 @@ int acpi_processor_preregister_performance(
}
}
- cpus_clear(covered_cpus);
+ cpumask_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
- if (cpu_isset(i, covered_cpus))
+ if (cpumask_test_cpu(i, covered_cpus))
continue;
pdomain = &(pr->performance->domain_info);
- cpu_set(i, pr->performance->shared_cpu_map);
- cpu_set(i, covered_cpus);
+ cpumask_set_cpu(i, pr->performance->shared_cpu_map);
+ cpumask_set_cpu(i, covered_cpus);
if (pdomain->num_processors <= 1)
continue;
@@ -699,8 +702,8 @@ int acpi_processor_preregister_performance(
goto err_ret;
}
- cpu_set(j, covered_cpus);
- cpu_set(j, pr->performance->shared_cpu_map);
+ cpumask_set_cpu(j, covered_cpus);
+ cpumask_set_cpu(j, pr->performance->shared_cpu_map);
count++;
}
@@ -718,8 +721,8 @@ int acpi_processor_preregister_performance(
match_pr->performance->shared_type =
pr->performance->shared_type;
- match_pr->performance->shared_cpu_map =
- pr->performance->shared_cpu_map;
+ cpumask_copy(match_pr->performance->shared_cpu_map,
+ pr->performance->shared_cpu_map);
}
}
@@ -731,14 +734,15 @@ err_ret:
/* Assume no coordination on any error parsing domain info */
if (retval) {
- cpus_clear(pr->performance->shared_cpu_map);
- cpu_set(i, pr->performance->shared_cpu_map);
+ cpumask_clear(pr->performance->shared_cpu_map);
+ cpumask_set_cpu(i, pr->performance->shared_cpu_map);
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
}
pr->performance = NULL; /* Will be set for real in register */
}
mutex_unlock(&performance_mutex);
+ free_cpumask_var(covered_cpus);
return retval;
}
EXPORT_SYMBOL(acpi_processor_preregister_performance);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index a0c38c94a8a0..d27838171f4a 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -61,11 +61,14 @@ static int acpi_processor_update_tsd_coord(void)
int count, count_target;
int retval = 0;
unsigned int i, j;
- cpumask_t covered_cpus;
+ cpumask_var_t covered_cpus;
struct acpi_processor *pr, *match_pr;
struct acpi_tsd_package *pdomain, *match_pdomain;
struct acpi_processor_throttling *pthrottling, *match_pthrottling;
+ if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+ return -ENOMEM;
+
/*
* Now that we have _TSD data from all CPUs, lets setup T-state
* coordination between all CPUs.
@@ -91,19 +94,19 @@ static int acpi_processor_update_tsd_coord(void)
if (retval)
goto err_ret;
- cpus_clear(covered_cpus);
+ cpumask_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
- if (cpu_isset(i, covered_cpus))
+ if (cpumask_test_cpu(i, covered_cpus))
continue;
pthrottling = &pr->throttling;
pdomain = &(pthrottling->domain_info);
- cpu_set(i, pthrottling->shared_cpu_map);
- cpu_set(i, covered_cpus);
+ cpumask_set_cpu(i, pthrottling->shared_cpu_map);
+ cpumask_set_cpu(i, covered_cpus);
/*
* If the number of processor in the TSD domain is 1, it is
* unnecessary to parse the coordination for this CPU.
@@ -144,8 +147,8 @@ static int acpi_processor_update_tsd_coord(void)
goto err_ret;
}
- cpu_set(j, covered_cpus);
- cpu_set(j, pthrottling->shared_cpu_map);
+ cpumask_set_cpu(j, covered_cpus);
+ cpumask_set_cpu(j, pthrottling->shared_cpu_map);
count++;
}
for_each_possible_cpu(j) {
@@ -165,12 +168,14 @@ static int acpi_processor_update_tsd_coord(void)
* If some CPUS have the same domain, they
* will have the same shared_cpu_map.
*/
- match_pthrottling->shared_cpu_map =
- pthrottling->shared_cpu_map;
+ cpumask_copy(match_pthrottling->shared_cpu_map,
+ pthrottling->shared_cpu_map);
}
}
err_ret:
+ free_cpumask_var(covered_cpus);
+
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
@@ -182,8 +187,8 @@ err_ret:
*/
if (retval) {
pthrottling = &(pr->throttling);
- cpus_clear(pthrottling->shared_cpu_map);
- cpu_set(i, pthrottling->shared_cpu_map);
+ cpumask_clear(pthrottling->shared_cpu_map);
+ cpumask_set_cpu(i, pthrottling->shared_cpu_map);
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
}
@@ -567,7 +572,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 1;
pthrottling->shared_type = pdomain->coord_type;
- cpu_set(pr->id, pthrottling->shared_cpu_map);
+ cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
/*
* If the coordination type is not defined in ACPI spec,
* the tsd_valid_flag will be clear and coordination type
@@ -826,7 +831,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
- cpumask_t saved_mask;
+ cpumask_var_t saved_mask;
int ret;
if (!pr)
@@ -834,14 +839,20 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
if (!pr->flags.throttling)
return -ENODEV;
+
+ if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+ return -ENOMEM;
+
/*
* Migrate task to the cpu pointed by pr.
*/
- saved_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+ cpumask_copy(saved_mask, &current->cpus_allowed);
+ /* FIXME: use work_on_cpu() */
+ set_cpus_allowed_ptr(current, cpumask_of(pr->id));
ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */
- set_cpus_allowed_ptr(current, &saved_mask);
+ set_cpus_allowed_ptr(current, saved_mask);
+ free_cpumask_var(saved_mask);
return ret;
}
@@ -986,13 +997,13 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{
- cpumask_t saved_mask;
+ cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling;
struct throttling_tstate t_state;
- cpumask_t online_throttling_cpus;
+ cpumask_var_t online_throttling_cpus;
if (!pr)
return -EINVAL;
@@ -1003,17 +1014,25 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
- saved_mask = current->cpus_allowed;
+ if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
+ free_cpumask_var(saved_mask);
+ return -ENOMEM;
+ }
+
+ cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
- cpus_and(online_throttling_cpus, cpu_online_map,
- p_throttling->shared_cpu_map);
+ cpumask_and(online_throttling_cpus, cpu_online_mask,
+ p_throttling->shared_cpu_map);
/*
* The throttling notifier will be called for every
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
- for_each_cpu_mask_nr(i, online_throttling_cpus) {
+ for_each_cpu(i, online_throttling_cpus) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
@@ -1025,7 +1044,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+ /* FIXME: use work_on_cpu() */
+ set_cpus_allowed_ptr(current, cpumask_of(pr->id));
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state);
} else {
@@ -1034,7 +1054,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it is necessary to set T-state for every affected
* cpus.
*/
- for_each_cpu_mask_nr(i, online_throttling_cpus) {
+ for_each_cpu(i, online_throttling_cpus) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@@ -1056,7 +1076,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
continue;
}
t_state.cpu = i;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+ /* FIXME: use work_on_cpu() */
+ set_cpus_allowed_ptr(current, cpumask_of(i));
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state);
@@ -1068,13 +1089,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
- for_each_cpu_mask_nr(i, online_throttling_cpus) {
+ for_each_cpu(i, online_throttling_cpus) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
/* restore the previous state */
- set_cpus_allowed_ptr(current, &saved_mask);
+ /* FIXME: use work_on_cpu() */
+ set_cpus_allowed_ptr(current, saved_mask);
+ free_cpumask_var(online_throttling_cpus);
+ free_cpumask_var(saved_mask);
return ret;
}
@@ -1120,7 +1144,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
if (acpi_processor_get_tsd(pr)) {
pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 0;
- cpu_set(pr->id, pthrottling->shared_cpu_map);
+ cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4259072f5bd0..719ee5c1c8d9 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -128,10 +128,54 @@ print_cpus_func(online);
print_cpus_func(possible);
print_cpus_func(present);
+/*
+ * Print values for NR_CPUS and offlined cpus
+ */
+static ssize_t print_cpus_kernel_max(struct sysdev_class *class, char *buf)
+{
+ int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
+ return n;
+}
+static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
+
+/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
+unsigned int total_cpus;
+
+static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf)
+{
+ int n = 0, len = PAGE_SIZE-2;
+ cpumask_var_t offline;
+
+ /* display offline cpus < nr_cpu_ids */
+ if (!alloc_cpumask_var(&offline, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_complement(offline, cpu_online_mask);
+ n = cpulist_scnprintf(buf, len, offline);
+ free_cpumask_var(offline);
+
+ /* display offline cpus >= nr_cpu_ids */
+ if (total_cpus && nr_cpu_ids < total_cpus) {
+ if (n && n < len)
+ buf[n++] = ',';
+
+ if (nr_cpu_ids == total_cpus-1)
+ n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
+ else
+ n += snprintf(&buf[n], len - n, "%d-%d",
+ nr_cpu_ids, total_cpus-1);
+ }
+
+ n += snprintf(&buf[n], len - n, "\n");
+ return n;
+}
+static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
+
static struct sysdev_class_attribute *cpu_state_attr[] = {
&attr_online_map,
&attr_possible_map,
&attr_present_map,
+ &attr_kernel_max,
+ &attr_offline,
};
static int cpu_states_init(void)
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 757035ea246f..3128a5090dbd 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -659,12 +659,12 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
WARN_ON_ONCE(!in_interrupt());
if (ehca_debug_level >= 3)
- ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
+ ehca_dmp(cpu_online_mask, cpumask_size(), "");
spin_lock_irqsave(&pool->last_cpu_lock, flags);
- cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
+ cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
if (cpu >= nr_cpu_ids)
- cpu = first_cpu(cpu_online_map);
+ cpu = cpumask_first(cpu_online_mask);
pool->last_cpu = cpu;
spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
@@ -855,7 +855,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
case CPU_UP_CANCELED_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- kthread_bind(cct->task, any_online_cpu(cpu_online_map));
+ kthread_bind(cct->task, cpumask_any(cpu_online_mask));
destroy_comp_task(pool, cpu);
break;
case CPU_ONLINE:
@@ -902,7 +902,7 @@ int ehca_create_comp_pool(void)
return -ENOMEM;
spin_lock_init(&pool->last_cpu_lock);
- pool->last_cpu = any_online_cpu(cpu_online_map);
+ pool->last_cpu = cpumask_any(cpu_online_mask);
pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
if (pool->cpu_comp_tasks == NULL) {
@@ -934,10 +934,9 @@ void ehca_destroy_comp_pool(void)
unregister_hotcpu_notifier(&comp_pool_callback_nb);
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i))
- destroy_comp_task(pool, i);
- }
+ for_each_online_cpu(i)
+ destroy_comp_task(pool, i);
+
free_percpu(pool->cpu_comp_tasks);
kfree(pool);
}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 239d4e8068ac..23173982b32c 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1679,7 +1679,7 @@ static int find_best_unit(struct file *fp,
* InfiniPath chip to that processor (we assume reasonable connectivity,
* for now). This code assumes that if affinity has been set
* before this point, that at most one cpu is set; for now this
- * is reasonable. I check for both cpus_empty() and cpus_full(),
+ * is reasonable. I check for both cpumask_empty() and cpumask_full(),
* in case some kernel variant sets none of the bits when no
* affinity is set. 2.6.11 and 12 kernels have all present
* cpus set. Some day we'll have to fix it up further to handle
@@ -1688,11 +1688,11 @@ static int find_best_unit(struct file *fp,
* information. There may be some issues with dual core numbering
* as well. This needs more work prior to release.
*/
- if (!cpus_empty(current->cpus_allowed) &&
- !cpus_full(current->cpus_allowed)) {
+ if (!cpumask_empty(&current->cpus_allowed) &&
+ !cpumask_full(&current->cpus_allowed)) {
int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
for (i = 0; i < ncpus; i++)
- if (cpu_isset(i, current->cpus_allowed)) {
+ if (cpumask_test_cpu(i, &current->cpus_allowed)) {
ipath_cdbg(PROC, "%s[%u] affinity set for "
"cpu %d/%d\n", current->comm,
current->pid, i, ncpus);
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 7ff824496b39..7e6b5a3b3281 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -481,7 +481,7 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
- for (i = 0; i < NR_CPUS; i++) {
+ for_each_possible_cpu(i) {
struct desc_struct *gdt = get_cpu_gdt_table(i);
if (!gdt)
continue;