aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-03-14 16:34:35 +0100
committerThomas Gleixner <tglx@linutronix.de>2021-03-14 16:34:35 +0100
commitb470ebc9e0e57f53d1db9c49b8a3de4086babd05 (patch)
tree95c61291ad5f216967a9be36f19774026ffc88cb /include/linux/sched.h
parentgenirq: Prevent [devm_]irq_alloc_desc from returning irq 0 (diff)
parentirqchip/ingenic: Add support for the JZ4760 (diff)
downloadwireguard-linux-b470ebc9e0e57f53d1db9c49b8a3de4086babd05.tar.xz
wireguard-linux-b470ebc9e0e57f53d1db9c49b8a3de4086babd05.zip
Merge tag 'irqchip-fixes-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/urgent
Pull irqchip fixes from Marc Zyngier: - More compatible strings for the Ingenic irqchip (introducing the JZ4760B SoC) - Select GENERIC_IRQ_MULTI_HANDLER on the ARM ep93xx platform - Drop all GENERIC_IRQ_MULTI_HANDLER selections from the irqchip Kconfig, now relying on the architecture to get it right - Drop the debugfs_file field from struct irq_domain, now that debugfs can track things on its own
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h66
1 files changed, 62 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e3a5eeec509..ef00bb22164c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -47,6 +47,7 @@ struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
+struct io_uring_task;
struct mempolicy;
struct nameidata;
struct nsproxy;
@@ -65,7 +66,6 @@ struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
-struct io_uring_task;
/*
* Task state bitmask. NOTE! These bits are also
@@ -895,6 +895,9 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid;
+ /* PF_IO_WORKER */
+ void *pf_io_worker;
+
u64 utime;
u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
@@ -1871,11 +1874,32 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
*/
-#ifndef CONFIG_PREEMPTION
-extern int _cond_resched(void);
+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
+extern int __cond_resched(void);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+DECLARE_STATIC_CALL(cond_resched, __cond_resched);
+
+static __always_inline int _cond_resched(void)
+{
+ return static_call_mod(cond_resched)();
+}
+
#else
+
+static inline int _cond_resched(void)
+{
+ return __cond_resched();
+}
+
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+#else
+
static inline int _cond_resched(void) { return 0; }
-#endif
+
+#endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
#define cond_resched() ({ \
___might_sleep(__FILE__, __LINE__, 0); \
@@ -1883,12 +1907,24 @@ static inline int _cond_resched(void) { return 0; }
})
extern int __cond_resched_lock(spinlock_t *lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock);
#define cond_resched_lock(lock) ({ \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
})
+#define cond_resched_rwlock_read(lock) ({ \
+ __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+ __cond_resched_rwlock_read(lock); \
+})
+
+#define cond_resched_rwlock_write(lock) ({ \
+ __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+ __cond_resched_rwlock_write(lock); \
+})
+
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
@@ -1912,6 +1948,23 @@ static inline int spin_needbreak(spinlock_t *lock)
#endif
}
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return rwlock_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@@ -1968,6 +2021,11 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
+#ifdef CONFIG_SMP
+/* Returns effective CPU energy utilization, as seen by the scheduler */
+unsigned long sched_cpu_util(int cpu, unsigned long max);
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_RSEQ
/*