diff options
Diffstat (limited to 'drivers/thermal/intel/intel_powerclamp.c')
-rw-r--r-- | drivers/thermal/intel/intel_powerclamp.c | 55 |
1 files changed, 15 insertions, 40 deletions
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index 53216dcbe173..b80e25ec1261 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -62,17 +62,13 @@ static struct dentry *debug_dir; static unsigned int set_target_ratio; static unsigned int current_ratio; static bool should_skip; -static bool reduce_irq; -static atomic_t idle_wakeup_counter; + static unsigned int control_cpu; /* The cpu assigned to collect stat and update * control parameters. default to BSP but BSP * can be offlined. */ static bool clamping; -static const struct sched_param sparam = { - .sched_priority = MAX_USER_RT_PRIO / 2, -}; struct powerclamp_worker_data { struct kthread_worker *worker; struct kthread_work balancing_work; @@ -288,9 +284,6 @@ static unsigned int get_compensation(int ratio) cal_data[ratio + 1].steady_comp) / 3; } - /* REVISIT: simple penalty of double idle injection */ - if (reduce_irq) - comp = ratio; /* do not exceed limit */ if (comp + ratio >= MAX_TARGET_RATIO) comp = MAX_TARGET_RATIO - ratio - 1; @@ -304,13 +297,9 @@ static void adjust_compensation(int target_ratio, unsigned int win) struct powerclamp_calibration_data *d = &cal_data[target_ratio]; /* - * adjust compensations if confidence level has not been reached or - * there are too many wakeups during the last idle injection period, we - * cannot trust the data for compensation. + * adjust compensations if confidence level has not been reached. */ - if (d->confidence >= CONFIDENCE_OK || - atomic_read(&idle_wakeup_counter) > - win * num_online_cpus()) + if (d->confidence >= CONFIDENCE_OK) return; delta = set_target_ratio - current_ratio; @@ -350,14 +339,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio, tsc_last = tsc_now; adjust_compensation(target_ratio, win); - /* - * too many external interrupts, set flag such - * that we can take measure later. - */ - reduce_irq = atomic_read(&idle_wakeup_counter) >= - 2 * win * num_online_cpus(); - atomic_set(&idle_wakeup_counter, 0); /* if we are above target+guard, skip */ return set_target_ratio + guard <= current_ratio; } @@ -488,7 +470,7 @@ static void start_power_clamp_worker(unsigned long cpu) w_data->cpu = cpu; w_data->clamping = true; set_bit(cpu, cpu_clamping_mask); - sched_setscheduler(worker->task, SCHED_FIFO, &sparam); + sched_set_fifo(worker->task); kthread_init_work(&w_data->balancing_work, clamp_balancing_func); kthread_init_delayed_work(&w_data->idle_injection_work, clamp_idle_injection_func); @@ -531,12 +513,10 @@ static int start_power_clamp(void) set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1); /* prevent cpu hotplug */ - get_online_cpus(); + cpus_read_lock(); /* prefer BSP */ - control_cpu = 0; - if (!cpu_online(control_cpu)) - control_cpu = smp_processor_id(); + control_cpu = cpumask_first(cpu_online_mask); clamping = true; schedule_delayed_work(&poll_pkg_cstate_work, 0); @@ -545,7 +525,7 @@ static int start_power_clamp(void) for_each_online_cpu(cpu) { start_power_clamp_worker(cpu); } - put_online_cpus(); + cpus_read_unlock(); return 0; } @@ -559,12 +539,9 @@ static void end_power_clamp(void) * stop faster. */ clamping = false; - if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) { - for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) { - pr_debug("clamping worker for cpu %d alive, destroy\n", - i); - stop_power_clamp_worker(i); - } + for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) { + pr_debug("clamping worker for cpu %d alive, destroy\n", i); + stop_power_clamp_worker(i); } } @@ -644,14 +621,14 @@ exit_set: } /* bind to generic thermal layer as cooling device*/ -static struct thermal_cooling_device_ops powerclamp_cooling_ops = { +static const struct thermal_cooling_device_ops powerclamp_cooling_ops = { .get_max_state = powerclamp_get_max_state, .get_cur_state = powerclamp_get_cur_state, .set_cur_state = powerclamp_set_cur_state, }; static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = { - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, + X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_MWAIT, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); @@ -708,10 +685,8 @@ static enum cpuhp_state hp_state; static int __init powerclamp_init(void) { int retval; - int bitmap_size; - bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long); - cpu_clamping_mask = kzalloc(bitmap_size, GFP_KERNEL); + cpu_clamping_mask = bitmap_zalloc(num_possible_cpus(), GFP_KERNEL); if (!cpu_clamping_mask) return -ENOMEM; @@ -756,7 +731,7 @@ exit_free_thread: exit_unregister: cpuhp_remove_state_nocalls(hp_state); exit_free: - kfree(cpu_clamping_mask); + bitmap_free(cpu_clamping_mask); return retval; } module_init(powerclamp_init); @@ -767,7 +742,7 @@ static void __exit powerclamp_exit(void) cpuhp_remove_state_nocalls(hp_state); free_percpu(worker_data); thermal_cooling_device_unregister(cooling_dev); - kfree(cpu_clamping_mask); + bitmap_free(cpu_clamping_mask); cancel_delayed_work_sync(&poll_pkg_cstate_work); debugfs_remove_recursive(debug_dir); |