aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h66
1 files changed, 62 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e3a5eeec509..ef00bb22164c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -47,6 +47,7 @@ struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
+struct io_uring_task;
struct mempolicy;
struct nameidata;
struct nsproxy;
@@ -65,7 +66,6 @@ struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
-struct io_uring_task;
/*
* Task state bitmask. NOTE! These bits are also
@@ -895,6 +895,9 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid;
+ /* PF_IO_WORKER */
+ void *pf_io_worker;
+
u64 utime;
u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
@@ -1871,11 +1874,32 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
*/
-#ifndef CONFIG_PREEMPTION
-extern int _cond_resched(void);
+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
+extern int __cond_resched(void);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+DECLARE_STATIC_CALL(cond_resched, __cond_resched);
+
+static __always_inline int _cond_resched(void)
+{
+ return static_call_mod(cond_resched)();
+}
+
#else
+
+static inline int _cond_resched(void)
+{
+ return __cond_resched();
+}
+
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+#else
+
static inline int _cond_resched(void) { return 0; }
-#endif
+
+#endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
#define cond_resched() ({ \
___might_sleep(__FILE__, __LINE__, 0); \
@@ -1883,12 +1907,24 @@ static inline int _cond_resched(void) { return 0; }
})
extern int __cond_resched_lock(spinlock_t *lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock);
#define cond_resched_lock(lock) ({ \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
})
+#define cond_resched_rwlock_read(lock) ({ \
+ __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+ __cond_resched_rwlock_read(lock); \
+})
+
+#define cond_resched_rwlock_write(lock) ({ \
+ __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+ __cond_resched_rwlock_write(lock); \
+})
+
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
@@ -1912,6 +1948,23 @@ static inline int spin_needbreak(spinlock_t *lock)
#endif
}
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return rwlock_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@@ -1968,6 +2021,11 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
+#ifdef CONFIG_SMP
+/* Returns effective CPU energy utilization, as seen by the scheduler */
+unsigned long sched_cpu_util(int cpu, unsigned long max);
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_RSEQ
/*