From 8ecca39483ed4e4e97096d0d6f8e25fdd323b189 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 29 Apr 2020 17:04:41 +0200 Subject: rbtree, sched/deadline: Use rb_add_cached() Reduce rbtree boiler plate by using the new helpers. Make rb_add_cached() / rb_erase_cached() return a pointer to the leftmost node to aid in updating additional state. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Acked-by: Davidlohr Bueso --- include/linux/rbtree.h | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'include/linux/rbtree.h') diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index e0b300de8f3f..d31ecaf4fdd3 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -141,12 +141,18 @@ static inline void rb_insert_color_cached(struct rb_node *node, rb_insert_color(node, &root->rb_root); } -static inline void rb_erase_cached(struct rb_node *node, - struct rb_root_cached *root) + +static inline struct rb_node * +rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) { + struct rb_node *leftmost = NULL; + if (root->rb_leftmost == node) - root->rb_leftmost = rb_next(node); + leftmost = root->rb_leftmost = rb_next(node); + rb_erase(node, &root->rb_root); + + return leftmost; } static inline void rb_replace_node_cached(struct rb_node *victim, @@ -179,8 +185,10 @@ static inline void rb_replace_node_cached(struct rb_node *victim, * @node: node to insert * @tree: leftmost cached tree to insert @node into * @less: operator defining the (partial) node order + * + * Returns @node when it is the new leftmost, or NULL. */ -static __always_inline void +static __always_inline struct rb_node * rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, bool (*less)(struct rb_node *, const struct rb_node *)) { @@ -200,6 +208,8 @@ rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, rb_link_node(node, parent, link); rb_insert_color_cached(node, tree, leftmost); + + return leftmost ? node : NULL; } /** -- cgit v1.2.3-59-g8ed1b