pat_rbtree.c 6.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * Handle caching attributes in page tables (PAT)
 *
 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Suresh B Siddha <suresh.b.siddha@intel.com>
 *
 * Interval tree (augmented rbtree) used to store the PAT memory type
 * reservations.
 */

#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/gfp.h>

#include <asm/pgtable.h>
#include <asm/pat.h>

#include "pat_internal.h"

/*
 * The memtype tree keeps track of memory type for specific
 * physical memory areas. Without proper tracking, conflicting memory
 * types in different mappings can cause CPU cache corruption.
 *
 * The tree is an interval tree (augmented rbtree) with tree ordered
 * on starting address. Tree can contain multiple entries for
 * different regions which overlap. All the aliases have the same
 * cache attributes of course.
 *
 * memtype_lock protects the rbtree.
 */

37
static struct rb_root memtype_rbroot = RB_ROOT;
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56

static int is_node_overlap(struct memtype *node, u64 start, u64 end)
{
	if (node->start >= end || node->end <= start)
		return 0;

	return 1;
}

static u64 get_subtree_max_end(struct rb_node *node)
{
	u64 ret = 0;
	if (node) {
		struct memtype *data = container_of(node, struct memtype, rb);
		ret = data->subtree_max_end;
	}
	return ret;
}

57
static u64 compute_subtree_max_end(struct memtype *data)
58
{
59
	u64 max_end = data->end, child_max_end;
60

61
	child_max_end = get_subtree_max_end(data->rb.rb_right);
62 63 64
	if (child_max_end > max_end)
		max_end = child_max_end;

65
	child_max_end = get_subtree_max_end(data->rb.rb_left);
66 67 68
	if (child_max_end > max_end)
		max_end = child_max_end;

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
	return max_end;
}

/* Update 'subtree_max_end' for node and its parents */
static void memtype_rb_propagate_cb(struct rb_node *node, struct rb_node *stop)
{
	while (node != stop) {
		struct memtype *data = container_of(node, struct memtype, rb);
		u64 subtree_max_end = compute_subtree_max_end(data);
		if (data->subtree_max_end == subtree_max_end)
			break;
		data->subtree_max_end = subtree_max_end;
		node = rb_parent(&data->rb);
	}
}

static void memtype_rb_copy_cb(struct rb_node *old, struct rb_node *new)
{
	struct memtype *old_data = container_of(old, struct memtype, rb);
	struct memtype *new_data = container_of(new, struct memtype, rb);

	new_data->subtree_max_end = old_data->subtree_max_end;
91 92
}

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
/* Update 'subtree_max_end' after tree rotation. old and new are the
 * former and current subtree roots */
static void memtype_rb_rotate_cb(struct rb_node *old, struct rb_node *new)
{
	struct memtype *old_data = container_of(old, struct memtype, rb);
	struct memtype *new_data = container_of(new, struct memtype, rb);

	new_data->subtree_max_end = old_data->subtree_max_end;
	old_data->subtree_max_end = compute_subtree_max_end(old_data);
}

static const struct rb_augment_callbacks memtype_rb_augment_cb = {
	memtype_rb_propagate_cb, memtype_rb_copy_cb, memtype_rb_rotate_cb
};

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
/* Find the first (lowest start addr) overlapping range from rb tree */
static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
				u64 start, u64 end)
{
	struct rb_node *node = root->rb_node;
	struct memtype *last_lower = NULL;

	while (node) {
		struct memtype *data = container_of(node, struct memtype, rb);

		if (get_subtree_max_end(node->rb_left) > start) {
			/* Lowest overlap if any must be on left side */
			node = node->rb_left;
		} else if (is_node_overlap(data, start, end)) {
			last_lower = data;
			break;
		} else if (start >= data->start) {
			/* Lowest overlap if any must be on right side */
			node = node->rb_right;
		} else {
			break;
		}
	}
	return last_lower; /* Returns NULL if there is no overlap */
}

static struct memtype *memtype_rb_exact_match(struct rb_root *root,
				u64 start, u64 end)
{
	struct memtype *match;

	match = memtype_rb_lowest_match(root, start, end);
	while (match != NULL && match->start < end) {
		struct rb_node *node;

		if (match->start == start && match->end == end)
			return match;

		node = rb_next(&match->rb);
		if (node)
			match = container_of(node, struct memtype, rb);
		else
			match = NULL;
	}

	return NULL; /* Returns NULL if there is no exact match */
}

static int memtype_rb_check_conflict(struct rb_root *root,
				u64 start, u64 end,
				unsigned long reqtype, unsigned long *newtype)
{
	struct rb_node *node;
	struct memtype *match;
	int found_type = reqtype;

	match = memtype_rb_lowest_match(&memtype_rbroot, start, end);
	if (match == NULL)
		goto success;

	if (match->type != found_type && newtype == NULL)
		goto failure;

	dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end);
	found_type = match->type;

	node = rb_next(&match->rb);
	while (node) {
		match = container_of(node, struct memtype, rb);

		if (match->start >= end) /* Checked all possible matches */
			goto success;

		if (is_node_overlap(match, start, end) &&
		    match->type != found_type) {
			goto failure;
		}

		node = rb_next(&match->rb);
	}
success:
	if (newtype)
		*newtype = found_type;

	return 0;

failure:
	printk(KERN_INFO "%s:%d conflicting memory types "
		"%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
		end, cattr_name(found_type), cattr_name(match->type));
	return -EBUSY;
}

static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
{
	struct rb_node **node = &(root->rb_node);
	struct rb_node *parent = NULL;

	while (*node) {
		struct memtype *data = container_of(*node, struct memtype, rb);

		parent = *node;
210 211
		if (data->subtree_max_end < newdata->end)
			data->subtree_max_end = newdata->end;
212 213 214 215 216 217
		if (newdata->start <= data->start)
			node = &((*node)->rb_left);
		else if (newdata->start > data->start)
			node = &((*node)->rb_right);
	}

218
	newdata->subtree_max_end = newdata->end;
219
	rb_link_node(&newdata->rb, parent, node);
220
	rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb);
221 222 223 224 225 226 227 228 229 230
}

int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type)
{
	int err = 0;

	err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end,
						new->type, ret_type);

	if (!err) {
231 232 233
		if (ret_type)
			new->type = *ret_type;

234
		new->subtree_max_end = new->end;
235 236 237 238 239
		memtype_rb_insert(&memtype_rbroot, new);
	}
	return err;
}

240
struct memtype *rbt_memtype_erase(u64 start, u64 end)
241 242 243 244 245
{
	struct memtype *data;

	data = memtype_rb_exact_match(&memtype_rbroot, start, end);
	if (!data)
246
		goto out;
247

248
	rb_erase_augmented(&data->rb, &memtype_rbroot, &memtype_rb_augment_cb);
249 250
out:
	return data;
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
}

struct memtype *rbt_memtype_lookup(u64 addr)
{
	struct memtype *data;
	data = memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE);
	return data;
}

#if defined(CONFIG_DEBUG_FS)
int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
{
	struct rb_node *node;
	int i = 1;

	node = rb_first(&memtype_rbroot);
	while (node && pos != i) {
		node = rb_next(node);
		i++;
	}

	if (node) { /* pos == i */
		struct memtype *this = container_of(node, struct memtype, rb);
		*out = *this;
		return 0;
	} else {
		return 1;
	}
}
#endif