aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/rcutiny.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/rcutiny.h')
-rw-r--r--include/linux/rcutiny.h103
1 files changed, 63 insertions, 40 deletions
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7c1ecdb356d8..f519cd680228 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -14,12 +14,43 @@
#include <asm/param.h> /* for HZ */
-/* Never flag non-existent other CPUs! */
-static inline bool rcu_eqs_special_set(int cpu) { return false; }
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+};
-static inline unsigned long get_state_synchronize_rcu(void)
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
+
+/*
+ * Are the two oldstate values the same? See the Tree RCU version for
+ * docbook header.
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
{
- return 0;
+ return rgosp1->rgos_norm == rgosp2->rgos_norm;
+}
+
+unsigned long get_state_synchronize_rcu(void);
+
+static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = get_state_synchronize_rcu();
+}
+
+unsigned long start_poll_synchronize_rcu(void);
+
+static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu();
+}
+
+bool poll_state_synchronize_rcu(unsigned long oldstate);
+
+static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ return poll_state_synchronize_rcu(rgosp->rgos_norm);
}
static inline void cond_synchronize_rcu(unsigned long oldstate)
@@ -27,32 +58,36 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
might_sleep();
}
-extern void rcu_barrier(void);
+static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu(rgosp->rgos_norm);
+}
-static inline void synchronize_rcu_expedited(void)
+static inline unsigned long start_poll_synchronize_rcu_expedited(void)
{
- synchronize_rcu();
+ return start_poll_synchronize_rcu();
}
-/*
- * Add one more declaration of kvfree() here. It is
- * not so straight forward to just include <linux/mm.h>
- * where it is defined due to getting many compile
- * errors caused by that include.
- */
-extern void kvfree(const void *addr);
+static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
+}
-static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
{
- if (head) {
- call_rcu(head, func);
- return;
- }
+ cond_synchronize_rcu(oldstate);
+}
- // kvfree_rcu(one_arg) call.
- might_sleep();
+static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu_expedited(rgosp->rgos_norm);
+}
+
+extern void rcu_barrier(void);
+
+static inline void synchronize_rcu_expedited(void)
+{
synchronize_rcu();
- kvfree((void *) func);
}
void rcu_qs(void);
@@ -68,26 +103,20 @@ static inline void rcu_softirq_qs(void)
rcu_tasks_qs(current, (preempt)); \
} while (0)
-static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
+static inline int rcu_needs_cpu(void)
{
- *nextevt = KTIME_MAX;
return 0;
}
+static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
+
/*
* Take advantage of the fact that there is only one CPU, which
* allows us to ignore virtualization-based context switches.
*/
-static inline void rcu_virt_note_context_switch(int cpu) { }
+static inline void rcu_virt_note_context_switch(void) { }
static inline void rcu_cpu_stall_reset(void) { }
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
-static inline void rcu_idle_enter(void) { }
-static inline void rcu_idle_exit(void) { }
-static inline void rcu_irq_enter(void) { }
-static inline void rcu_irq_exit_irqson(void) { }
-static inline void rcu_irq_enter_irqson(void) { }
-static inline void rcu_irq_exit(void) { }
-static inline void rcu_irq_exit_preempt(void) { }
static inline void rcu_irq_exit_check_preempt(void) { }
static inline void exit_rcu(void) { }
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
@@ -95,17 +124,11 @@ static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
return false;
}
static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
-#ifdef CONFIG_SRCU
void rcu_scheduler_starting(void);
-#else /* #ifndef CONFIG_SRCU */
-static inline void rcu_scheduler_starting(void) { }
-#endif /* #else #ifndef CONFIG_SRCU */
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
-static inline void rcu_momentary_dyntick_idle(void) { }
-static inline void kfree_rcu_scheduler_running(void) { }
-static inline bool rcu_gp_might_be_stalled(void) { return false; }
+static inline void rcu_momentary_eqs(void) { }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }
@@ -116,6 +139,6 @@ static inline void rcu_all_qs(void) { barrier(); }
#define rcutree_offline_cpu NULL
#define rcutree_dead_cpu NULL
#define rcutree_dying_cpu NULL
-static inline void rcu_cpu_starting(unsigned int cpu) { }
+static inline void rcutree_report_cpu_starting(unsigned int cpu) { }
#endif /* __LINUX_RCUTINY_H */