diff options
Diffstat (limited to 'arch/x86/mm/pat_rbtree.c')
| -rw-r--r-- | arch/x86/mm/pat_rbtree.c | 273 | 
1 files changed, 273 insertions, 0 deletions
| diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c new file mode 100644 index 000000000000..07de4cb8cc30 --- /dev/null +++ b/arch/x86/mm/pat_rbtree.c @@ -0,0 +1,273 @@ +/* + * Handle caching attributes in page tables (PAT) + * + * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> + *          Suresh B Siddha <suresh.b.siddha@intel.com> + * + * Interval tree (augmented rbtree) used to store the PAT memory type + * reservations. + */ + +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/gfp.h> + +#include <asm/pgtable.h> +#include <asm/pat.h> + +#include "pat_internal.h" + +/* + * The memtype tree keeps track of memory type for specific + * physical memory areas. Without proper tracking, conflicting memory + * types in different mappings can cause CPU cache corruption. + * + * The tree is an interval tree (augmented rbtree) with tree ordered + * on starting address. Tree can contain multiple entries for + * different regions which overlap. All the aliases have the same + * cache attributes of course. + * + * memtype_lock protects the rbtree. + */ + +static void memtype_rb_augment_cb(struct rb_node *node); +static struct rb_root memtype_rbroot = RB_AUGMENT_ROOT(&memtype_rb_augment_cb); + +static int is_node_overlap(struct memtype *node, u64 start, u64 end) +{ +	if (node->start >= end || node->end <= start) +		return 0; + +	return 1; +} + +static u64 get_subtree_max_end(struct rb_node *node) +{ +	u64 ret = 0; +	if (node) { +		struct memtype *data = container_of(node, struct memtype, rb); +		ret = data->subtree_max_end; +	} +	return ret; +} + +/* Update 'subtree_max_end' for a node, based on node and its children */ +static void update_node_max_end(struct rb_node *node) +{ +	struct memtype *data; +	u64 max_end, child_max_end; + +	if (!node) +		return; + +	data = container_of(node, struct memtype, rb); +	max_end = data->end; + +	child_max_end = get_subtree_max_end(node->rb_right); +	if (child_max_end > max_end) +		max_end = child_max_end; + +	child_max_end = get_subtree_max_end(node->rb_left); +	if (child_max_end > max_end) +		max_end = child_max_end; + +	data->subtree_max_end = max_end; +} + +/* Update 'subtree_max_end' for a node and all its ancestors */ +static void update_path_max_end(struct rb_node *node) +{ +	u64 old_max_end, new_max_end; + +	while (node) { +		struct memtype *data = container_of(node, struct memtype, rb); + +		old_max_end = data->subtree_max_end; +		update_node_max_end(node); +		new_max_end = data->subtree_max_end; + +		if (new_max_end == old_max_end) +			break; + +		node = rb_parent(node); +	} +} + +/* Find the first (lowest start addr) overlapping range from rb tree */ +static struct memtype *memtype_rb_lowest_match(struct rb_root *root, +				u64 start, u64 end) +{ +	struct rb_node *node = root->rb_node; +	struct memtype *last_lower = NULL; + +	while (node) { +		struct memtype *data = container_of(node, struct memtype, rb); + +		if (get_subtree_max_end(node->rb_left) > start) { +			/* Lowest overlap if any must be on left side */ +			node = node->rb_left; +		} else if (is_node_overlap(data, start, end)) { +			last_lower = data; +			break; +		} else if (start >= data->start) { +			/* Lowest overlap if any must be on right side */ +			node = node->rb_right; +		} else { +			break; +		} +	} +	return last_lower; /* Returns NULL if there is no overlap */ +} + +static struct memtype *memtype_rb_exact_match(struct rb_root *root, +				u64 start, u64 end) +{ +	struct memtype *match; + +	match = memtype_rb_lowest_match(root, start, end); +	while (match != NULL && match->start < end) { +		struct rb_node *node; + +		if (match->start == start && match->end == end) +			return match; + +		node = rb_next(&match->rb); +		if (node) +			match = container_of(node, struct memtype, rb); +		else +			match = NULL; +	} + +	return NULL; /* Returns NULL if there is no exact match */ +} + +static int memtype_rb_check_conflict(struct rb_root *root, +				u64 start, u64 end, +				unsigned long reqtype, unsigned long *newtype) +{ +	struct rb_node *node; +	struct memtype *match; +	int found_type = reqtype; + +	match = memtype_rb_lowest_match(&memtype_rbroot, start, end); +	if (match == NULL) +		goto success; + +	if (match->type != found_type && newtype == NULL) +		goto failure; + +	dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end); +	found_type = match->type; + +	node = rb_next(&match->rb); +	while (node) { +		match = container_of(node, struct memtype, rb); + +		if (match->start >= end) /* Checked all possible matches */ +			goto success; + +		if (is_node_overlap(match, start, end) && +		    match->type != found_type) { +			goto failure; +		} + +		node = rb_next(&match->rb); +	} +success: +	if (newtype) +		*newtype = found_type; + +	return 0; + +failure: +	printk(KERN_INFO "%s:%d conflicting memory types " +		"%Lx-%Lx %s<->%s\n", current->comm, current->pid, start, +		end, cattr_name(found_type), cattr_name(match->type)); +	return -EBUSY; +} + +static void memtype_rb_augment_cb(struct rb_node *node) +{ +	if (node) +		update_path_max_end(node); +} + +static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) +{ +	struct rb_node **node = &(root->rb_node); +	struct rb_node *parent = NULL; + +	while (*node) { +		struct memtype *data = container_of(*node, struct memtype, rb); + +		parent = *node; +		if (newdata->start <= data->start) +			node = &((*node)->rb_left); +		else if (newdata->start > data->start) +			node = &((*node)->rb_right); +	} + +	rb_link_node(&newdata->rb, parent, node); +	rb_insert_color(&newdata->rb, root); +} + +int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) +{ +	int err = 0; + +	err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end, +						new->type, ret_type); + +	if (!err) { +		if (ret_type) +			new->type = *ret_type; + +		memtype_rb_insert(&memtype_rbroot, new); +	} +	return err; +} + +int rbt_memtype_erase(u64 start, u64 end) +{ +	struct memtype *data; + +	data = memtype_rb_exact_match(&memtype_rbroot, start, end); +	if (!data) +		return -EINVAL; + +	rb_erase(&data->rb, &memtype_rbroot); +	return 0; +} + +struct memtype *rbt_memtype_lookup(u64 addr) +{ +	struct memtype *data; +	data = memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE); +	return data; +} + +#if defined(CONFIG_DEBUG_FS) +int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos) +{ +	struct rb_node *node; +	int i = 1; + +	node = rb_first(&memtype_rbroot); +	while (node && pos != i) { +		node = rb_next(node); +		i++; +	} + +	if (node) { /* pos == i */ +		struct memtype *this = container_of(node, struct memtype, rb); +		*out = *this; +		return 0; +	} else { +		return 1; +	} +} +#endif | 
