aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-05-11 22:54:30 +0200
committerThomas Gleixner <tglx@linutronix.de>2020-05-11 22:54:30 +0200
commit68f0f2690e183306b52671a9ad09fb31808b0500 (patch)
treeed86c9a38d307f609ccaab0baac367df3e7e5e39 /include/linux/sched.h
parentMerge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu (diff)
parentMerge branches 'fixes.2020.04.27a', 'kfree_rcu.2020.04.27a', 'rcu-tasks.2020.04.27a', 'stall.2020.04.27a' and 'torture.2020.05.07a' into HEAD (diff)
downloadwireguard-linux-68f0f2690e183306b52671a9ad09fb31808b0500.tar.xz
wireguard-linux-68f0f2690e183306b52671a9ad09fb31808b0500.zip
Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul McKenney: 1. Miscellaneous fixes. 2. kfree_rcu() updates. 3. Remove scheduler locking restriction 4. RCU-tasks update, including addition of RCU Tasks Trace for BPF use and RCU Tasks Rude. (This branch is on top of #3 due to overlap of changed code.) 5. RCU CPU stall warning updates. 6. Torture-test updates.
Diffstat (limited to '')
-rw-r--r--include/linux/sched.h53
1 files changed, 34 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04278493bf15..9437b53cc603 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -356,28 +356,30 @@ struct util_est {
} __attribute__((__aligned__(sizeof(u64))));
/*
- * The load_avg/util_avg accumulates an infinite geometric series
- * (see __update_load_avg() in kernel/sched/fair.c).
+ * The load/runnable/util_avg accumulates an infinite geometric series
+ * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
*
* [load_avg definition]
*
* load_avg = runnable% * scale_load_down(load)
*
- * where runnable% is the time ratio that a sched_entity is runnable.
- * For cfs_rq, it is the aggregated load_avg of all runnable and
- * blocked sched_entities.
+ * [runnable_avg definition]
+ *
+ * runnable_avg = runnable% * SCHED_CAPACITY_SCALE
*
* [util_avg definition]
*
* util_avg = running% * SCHED_CAPACITY_SCALE
*
- * where running% is the time ratio that a sched_entity is running on
- * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
- * and blocked sched_entities.
+ * where runnable% is the time ratio that a sched_entity is runnable and
+ * running% the time ratio that a sched_entity is running.
+ *
+ * For cfs_rq, they are the aggregated values of all runnable and blocked
+ * sched_entities.
*
- * load_avg and util_avg don't direcly factor frequency scaling and CPU
- * capacity scaling. The scaling is done through the rq_clock_pelt that
- * is used for computing those signals (see update_rq_clock_pelt())
+ * The load/runnable/util_avg doesn't direcly factor frequency scaling and CPU
+ * capacity scaling. The scaling is done through the rq_clock_pelt that is used
+ * for computing those signals (see update_rq_clock_pelt())
*
* N.B., the above ratios (runnable% and running%) themselves are in the
* range of [0, 1]. To do fixed point arithmetics, we therefore scale them
@@ -401,11 +403,11 @@ struct util_est {
struct sched_avg {
u64 last_update_time;
u64 load_sum;
- u64 runnable_load_sum;
+ u64 runnable_sum;
u32 util_sum;
u32 period_contrib;
unsigned long load_avg;
- unsigned long runnable_load_avg;
+ unsigned long runnable_avg;
unsigned long util_avg;
struct util_est util_est;
} ____cacheline_aligned;
@@ -449,7 +451,6 @@ struct sched_statistics {
struct sched_entity {
/* For load-balancing: */
struct load_weight load;
- unsigned long runnable_weight;
struct rb_node run_node;
struct list_head group_node;
unsigned int on_rq;
@@ -470,6 +471,8 @@ struct sched_entity {
struct cfs_rq *cfs_rq;
/* rq "owned" by this entity/group: */
struct cfs_rq *my_q;
+ /* cached value of my_q->h_nr_running */
+ unsigned long runnable_weight;
#endif
#ifdef CONFIG_SMP
@@ -610,7 +613,7 @@ union rcu_special {
u8 blocked;
u8 need_qs;
u8 exp_hint; /* Hint for performance. */
- u8 deferred_qs;
+ u8 need_mb; /* Readers need smp_mb(). */
} b; /* Bits. */
u32 s; /* Set of bits. */
};
@@ -721,6 +724,14 @@ struct task_struct {
struct list_head rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */
+#ifdef CONFIG_TASKS_TRACE_RCU
+ int trc_reader_nesting;
+ int trc_ipi_to_cpu;
+ union rcu_special trc_reader_special;
+ bool trc_reader_checked;
+ struct list_head trc_holdout_list;
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
+
struct sched_info sched_info;
struct list_head tasks;
@@ -782,9 +793,12 @@ struct task_struct {
unsigned frozen:1;
#endif
#ifdef CONFIG_BLK_CGROUP
- /* to be used once the psi infrastructure lands upstream. */
unsigned use_memdelay:1;
#endif
+#ifdef CONFIG_PSI
+ /* Stalled due to lack of memory */
+ unsigned in_memstall:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -939,8 +953,8 @@ struct task_struct {
struct seccomp seccomp;
/* Thread group tracking: */
- u32 parent_exec_id;
- u32 self_exec_id;
+ u64 parent_exec_id;
+ u64 self_exec_id;
/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
spinlock_t alloc_lock;
@@ -970,6 +984,7 @@ struct task_struct {
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
+ unsigned int hardirq_threaded;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
@@ -982,6 +997,7 @@ struct task_struct {
unsigned int softirq_enable_event;
int softirqs_enabled;
int softirq_context;
+ int irq_config;
#endif
#ifdef CONFIG_LOCKDEP
@@ -1477,7 +1493,6 @@ extern struct pid *cad_pid;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
-#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */