aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c35
1 files changed, 24 insertions, 11 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6fedf3a98581..d8d76a65cfdd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -24,7 +24,7 @@
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
/*
* Debugging: various feature bits
*
@@ -396,6 +396,18 @@ static bool set_nr_if_polling(struct task_struct *p)
#endif
#endif
+/**
+ * wake_q_add() - queue a wakeup for 'later' waking.
+ * @head: the wake_q_head to add @task to
+ * @task: the task to queue for 'later' wakeup
+ *
+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
+ * instantly.
+ *
+ * This function must be used as-if it were wake_up_process(); IOW the task
+ * must be ready to be woken at this location.
+ */
void wake_q_add(struct wake_q_head *head, struct task_struct *task)
{
struct wake_q_node *node = &task->wake_q;
@@ -405,10 +417,11 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
* its already queued (either by us or someone else) and will get the
* wakeup due to that.
*
- * This cmpxchg() executes a full barrier, which pairs with the full
- * barrier executed by the wakeup in wake_up_q().
+ * In order to ensure that a pending wakeup will observe our pending
+ * state, even in the failed case, an explicit smp_mb() must be used.
*/
- if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
+ smp_mb__before_atomic();
+ if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))
return;
get_task_struct(task);
@@ -697,7 +710,7 @@ static void set_load_weight(struct task_struct *p, bool update_load)
/*
* SCHED_IDLE tasks get minimal weight:
*/
- if (idle_policy(p->policy)) {
+ if (task_has_idle_policy(p)) {
load->weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO;
p->se.runnable_weight = load->weight;
@@ -2857,7 +2870,7 @@ unsigned long nr_running(void)
* preemption, thus the result might have a time-of-check-to-time-of-use
* race. The caller is responsible to use it correctly, for example:
*
- * - from a non-preemptable section (of course)
+ * - from a non-preemptible section (of course)
*
* - from a thread that is bound to a single CPU
*
@@ -3416,7 +3429,7 @@ static void __sched notrace __schedule(bool preempt)
switch_count = &prev->nivcsw;
if (!preempt && prev->state) {
- if (unlikely(signal_pending_state(prev->state, prev))) {
+ if (signal_pending_state(prev->state, prev)) {
prev->state = TASK_RUNNING;
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
@@ -4191,7 +4204,7 @@ recheck:
* Treat SCHED_IDLE as nice 20. Only allow a switch to
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
*/
- if (idle_policy(p->policy) && !idle_policy(policy)) {
+ if (task_has_idle_policy(p) && !idle_policy(policy)) {
if (!can_nice(p, task_nice(p)))
return -EPERM;
}
@@ -4450,7 +4463,7 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
u32 size;
int ret;
- if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
+ if (!access_ok(uattr, SCHED_ATTR_SIZE_VER0))
return -EFAULT;
/* Zero the full structure, so that a short copy will be nice: */
@@ -4650,7 +4663,7 @@ static int sched_read_attr(struct sched_attr __user *uattr,
{
int ret;
- if (!access_ok(VERIFY_WRITE, uattr, usize))
+ if (!access_ok(uattr, usize))
return -EFAULT;
/*
@@ -5783,7 +5796,7 @@ int sched_cpu_deactivate(unsigned int cpu)
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
- synchronize_rcu_mult(call_rcu, call_rcu_sched);
+ synchronize_rcu();
#ifdef CONFIG_SCHED_SMT
/*