aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--include/linux/interrupt.h22
1 files changed, 22 insertions, 0 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0a4ce25c1464..3c8a29176258 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -675,10 +675,21 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t)
while (test_bit(TASKLET_STATE_RUN, &t->state))
cpu_relax();
}
+
+/*
+ * Do not use in new code. Waiting for tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &t->state))
+ cpu_relax();
+}
#else
static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
static inline void tasklet_unlock(struct tasklet_struct *t) { }
static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
#endif
extern void __tasklet_schedule(struct tasklet_struct *t);
@@ -703,6 +714,17 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t)
smp_mb__after_atomic();
}
+/*
+ * Do not use in new code. Disabling tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
+{
+ tasklet_disable_nosync(t);
+ tasklet_unlock_spin_wait(t);
+ smp_mb();
+}
+
static inline void tasklet_disable(struct tasklet_struct *t)
{
tasklet_disable_nosync(t);