aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/list_lru.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/list_lru.h')
-rw-r--r--include/linux/list_lru.h23
1 files changed, 18 insertions, 5 deletions
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 1a548b0b7578..f4d4cb608c02 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -8,6 +8,7 @@
#define _LRU_LIST_H
#include <linux/list.h>
+#include <linux/nodemask.h>
/* list_lru_walk_cb has to always return one of those */
enum lru_status {
@@ -18,11 +19,26 @@ enum lru_status {
internally, but has to return locked. */
};
-struct list_lru {
+struct list_lru_node {
spinlock_t lock;
struct list_head list;
/* kept as signed so we can catch imbalance bugs */
long nr_items;
+} ____cacheline_aligned_in_smp;
+
+struct list_lru {
+ /*
+ * Because we use a fixed-size array, this struct can be very big if
+ * MAX_NUMNODES is big. If this becomes a problem this is fixable by
+ * turning this into a pointer and dynamically allocating this to
+ * nr_node_ids. This quantity is firwmare-provided, and still would
+ * provide room for all nodes at the cost of a pointer lookup and an
+ * extra allocation. Because that allocation will most likely come from
+ * a different slab cache than the main structure holding this
+ * structure, we may very well fail.
+ */
+ struct list_lru_node node[MAX_NUMNODES];
+ nodemask_t active_nodes;
};
int list_lru_init(struct list_lru *lru);
@@ -66,10 +82,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* guarantee that the list is not updated while the count is being computed.
* Callers that want such a guarantee need to provide an outer lock.
*/
-static inline unsigned long list_lru_count(struct list_lru *lru)
-{
- return lru->nr_items;
-}
+unsigned long list_lru_count(struct list_lru *lru);
typedef enum lru_status
(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);