regcache-rbtree.c 13.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Register cache access API - rbtree caching support
 *
 * Copyright 2011 Wolfson Microelectronics plc
 *
 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/slab.h>
14
#include <linux/device.h>
15
#include <linux/debugfs.h>
16
#include <linux/rbtree.h>
17
#include <linux/seq_file.h>
18 19 20 21 22

#include "internal.h"

static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
				 unsigned int value);
23
static int regcache_rbtree_exit(struct regmap *map);
24 25 26 27

struct regcache_rbtree_node {
	/* block of adjacent registers */
	void *block;
28 29
	/* Which registers are present */
	long *cache_present;
30 31
	/* base register handled by this block */
	unsigned int base_reg;
32 33
	/* number of registers available in the block */
	unsigned int blklen;
34 35
	/* the actual rbtree node holding this block */
	struct rb_node node;
36 37 38 39 40 41 42 43
} __attribute__ ((packed));

struct regcache_rbtree_ctx {
	struct rb_root root;
	struct regcache_rbtree_node *cached_rbnode;
};

static inline void regcache_rbtree_get_base_top_reg(
44
	struct regmap *map,
45 46 47 48
	struct regcache_rbtree_node *rbnode,
	unsigned int *base, unsigned int *top)
{
	*base = rbnode->base_reg;
49
	*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
50 51
}

52 53
static unsigned int regcache_rbtree_get_register(struct regmap *map,
	struct regcache_rbtree_node *rbnode, unsigned int idx)
54
{
55
	return regcache_get_val(map, rbnode->block, idx);
56 57
}

58 59 60
static void regcache_rbtree_set_register(struct regmap *map,
					 struct regcache_rbtree_node *rbnode,
					 unsigned int idx, unsigned int val)
61
{
62
	set_bit(idx, rbnode->cache_present);
63
	regcache_set_val(map, rbnode->block, idx, val);
64 65
}

66
static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
67
							   unsigned int reg)
68
{
69
	struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
70 71 72 73
	struct rb_node *node;
	struct regcache_rbtree_node *rbnode;
	unsigned int base_reg, top_reg;

74 75
	rbnode = rbtree_ctx->cached_rbnode;
	if (rbnode) {
76 77
		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
						 &top_reg);
78 79 80 81 82
		if (reg >= base_reg && reg <= top_reg)
			return rbnode;
	}

	node = rbtree_ctx->root.rb_node;
83 84
	while (node) {
		rbnode = container_of(node, struct regcache_rbtree_node, node);
85 86
		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
						 &top_reg);
87 88
		if (reg >= base_reg && reg <= top_reg) {
			rbtree_ctx->cached_rbnode = rbnode;
89
			return rbnode;
90
		} else if (reg > top_reg) {
91
			node = node->rb_right;
92
		} else if (reg < base_reg) {
93
			node = node->rb_left;
94
		}
95 96 97 98 99
	}

	return NULL;
}

100
static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
101 102 103 104 105 106 107 108 109 110 111 112 113
				  struct regcache_rbtree_node *rbnode)
{
	struct rb_node **new, *parent;
	struct regcache_rbtree_node *rbnode_tmp;
	unsigned int base_reg_tmp, top_reg_tmp;
	unsigned int base_reg;

	parent = NULL;
	new = &root->rb_node;
	while (*new) {
		rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
					  node);
		/* base and top registers of the current rbnode */
114
		regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
						 &top_reg_tmp);
		/* base register of the rbnode to be added */
		base_reg = rbnode->base_reg;
		parent = *new;
		/* if this register has already been inserted, just return */
		if (base_reg >= base_reg_tmp &&
		    base_reg <= top_reg_tmp)
			return 0;
		else if (base_reg > top_reg_tmp)
			new = &((*new)->rb_right);
		else if (base_reg < base_reg_tmp)
			new = &((*new)->rb_left);
	}

	/* insert the node into the rbtree */
	rb_link_node(&rbnode->node, parent, new);
	rb_insert_color(&rbnode->node, root);

	return 1;
}

136 137 138 139 140 141 142 143
#ifdef CONFIG_DEBUG_FS
static int rbtree_show(struct seq_file *s, void *ignored)
{
	struct regmap *map = s->private;
	struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
	struct regcache_rbtree_node *n;
	struct rb_node *node;
	unsigned int base, top;
144
	size_t mem_size;
145 146
	int nodes = 0;
	int registers = 0;
147
	int this_registers, average;
148

149
	map->lock(map->lock_arg);
150

151 152
	mem_size = sizeof(*rbtree_ctx);

153 154 155
	for (node = rb_first(&rbtree_ctx->root); node != NULL;
	     node = rb_next(node)) {
		n = container_of(node, struct regcache_rbtree_node, node);
156 157
		mem_size += sizeof(*n);
		mem_size += (n->blklen * map->cache_word_size);
158
		mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
159

160 161 162
		regcache_rbtree_get_base_top_reg(map, n, &base, &top);
		this_registers = ((top - base) / map->reg_stride) + 1;
		seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
163 164

		nodes++;
165
		registers += this_registers;
166 167
	}

168 169 170 171 172
	if (nodes)
		average = registers / nodes;
	else
		average = 0;

173 174
	seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
		   nodes, registers, average, mem_size);
175

176
	map->unlock(map->lock_arg);
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191

	return 0;
}

static int rbtree_open(struct inode *inode, struct file *file)
{
	return single_open(file, rbtree_show, inode->i_private);
}

static const struct file_operations rbtree_fops = {
	.open		= rbtree_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};
192 193 194 195 196 197 198 199 200

static void rbtree_debugfs_init(struct regmap *map)
{
	debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
}
#else
static void rbtree_debugfs_init(struct regmap *map)
{
}
201 202
#endif

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
static int regcache_rbtree_init(struct regmap *map)
{
	struct regcache_rbtree_ctx *rbtree_ctx;
	int i;
	int ret;

	map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
	if (!map->cache)
		return -ENOMEM;

	rbtree_ctx = map->cache;
	rbtree_ctx->root = RB_ROOT;
	rbtree_ctx->cached_rbnode = NULL;

	for (i = 0; i < map->num_reg_defaults; i++) {
		ret = regcache_rbtree_write(map,
					    map->reg_defaults[i].reg,
					    map->reg_defaults[i].def);
		if (ret)
			goto err;
	}

225
	rbtree_debugfs_init(map);
226

227 228 229
	return 0;

err:
230
	regcache_rbtree_exit(map);
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
	return ret;
}

static int regcache_rbtree_exit(struct regmap *map)
{
	struct rb_node *next;
	struct regcache_rbtree_ctx *rbtree_ctx;
	struct regcache_rbtree_node *rbtree_node;

	/* if we've already been called then just return */
	rbtree_ctx = map->cache;
	if (!rbtree_ctx)
		return 0;

	/* free up the rbtree */
	next = rb_first(&rbtree_ctx->root);
	while (next) {
		rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
		next = rb_next(&rbtree_node->node);
		rb_erase(&rbtree_node->node, &rbtree_ctx->root);
251
		kfree(rbtree_node->cache_present);
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
		kfree(rbtree_node->block);
		kfree(rbtree_node);
	}

	/* release the resources */
	kfree(map->cache);
	map->cache = NULL;

	return 0;
}

static int regcache_rbtree_read(struct regmap *map,
				unsigned int reg, unsigned int *value)
{
	struct regcache_rbtree_node *rbnode;
	unsigned int reg_tmp;

269
	rbnode = regcache_rbtree_lookup(map, reg);
270
	if (rbnode) {
271
		reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
272
		if (!test_bit(reg_tmp, rbnode->cache_present))
273
			return -ENOENT;
274
		*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
275
	} else {
276
		return -ENOENT;
277 278 279 280 281 282
	}

	return 0;
}


283 284
static int regcache_rbtree_insert_to_block(struct regmap *map,
					   struct regcache_rbtree_node *rbnode,
285 286 287
					   unsigned int base_reg,
					   unsigned int top_reg,
					   unsigned int reg,
288
					   unsigned int value)
289
{
290 291
	unsigned int blklen;
	unsigned int pos, offset;
292
	unsigned long *present;
293 294
	u8 *blk;

295 296 297 298
	blklen = (top_reg - base_reg) / map->reg_stride + 1;
	pos = (reg - base_reg) / map->reg_stride;
	offset = (rbnode->base_reg - base_reg) / map->reg_stride;

299
	blk = krealloc(rbnode->block,
300
		       blklen * map->cache_word_size,
301
		       GFP_KERNEL);
302 303 304
	if (!blk)
		return -ENOMEM;

305 306 307 308 309 310 311
	present = krealloc(rbnode->cache_present,
		    BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
	if (!present) {
		kfree(blk);
		return -ENOMEM;
	}

312
	/* insert the register value in the correct place in the rbnode block */
313
	if (pos == 0) {
314 315
		memmove(blk + offset * map->cache_word_size,
			blk, rbnode->blklen * map->cache_word_size);
316 317
		bitmap_shift_right(present, present, offset, blklen);
	}
318 319 320

	/* update the rbnode block, its size and the base register */
	rbnode->block = blk;
321 322
	rbnode->blklen = blklen;
	rbnode->base_reg = base_reg;
323
	rbnode->cache_present = present;
324

325
	regcache_rbtree_set_register(map, rbnode, pos, value);
326 327 328
	return 0;
}

329 330 331 332
static struct regcache_rbtree_node *
regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
{
	struct regcache_rbtree_node *rbnode;
333 334
	const struct regmap_range *range;
	int i;
335 336 337 338 339

	rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
	if (!rbnode)
		return NULL;

340 341 342 343 344 345 346 347 348 349
	/* If there is a read table then use it to guess at an allocation */
	if (map->rd_table) {
		for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
			if (regmap_reg_in_range(reg,
						&map->rd_table->yes_ranges[i]))
				break;
		}

		if (i != map->rd_table->n_yes_ranges) {
			range = &map->rd_table->yes_ranges[i];
350 351
			rbnode->blklen = (range->range_max - range->range_min) /
				map->reg_stride	+ 1;
352 353 354 355 356
			rbnode->base_reg = range->range_min;
		}
	}

	if (!rbnode->blklen) {
357
		rbnode->blklen = 1;
358 359 360
		rbnode->base_reg = reg;
	}

361 362
	rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
				GFP_KERNEL);
363 364 365 366 367 368 369
	if (!rbnode->block)
		goto err_free;

	rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) *
		sizeof(*rbnode->cache_present), GFP_KERNEL);
	if (!rbnode->cache_present)
		goto err_free_block;
370 371

	return rbnode;
372 373 374 375 376 377

err_free_block:
	kfree(rbnode->block);
err_free:
	kfree(rbnode);
	return NULL;
378 379
}

380 381 382 383 384 385 386 387 388 389
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
				 unsigned int value)
{
	struct regcache_rbtree_ctx *rbtree_ctx;
	struct regcache_rbtree_node *rbnode, *rbnode_tmp;
	struct rb_node *node;
	unsigned int reg_tmp;
	int ret;

	rbtree_ctx = map->cache;
390

391 392 393
	/* if we can't locate it in the cached rbnode we'll have
	 * to traverse the rbtree looking for it.
	 */
394
	rbnode = regcache_rbtree_lookup(map, reg);
395
	if (rbnode) {
396
		reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
397
		regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
398
	} else {
399 400 401 402 403 404 405 406 407 408 409 410 411
		unsigned int base_reg, top_reg;
		unsigned int new_base_reg, new_top_reg;
		unsigned int min, max;
		unsigned int max_dist;

		max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
			map->cache_word_size;
		if (reg < max_dist)
			min = 0;
		else
			min = reg - max_dist;
		max = reg + max_dist;

412 413 414
		/* look for an adjacent register to the one we are about to add */
		for (node = rb_first(&rbtree_ctx->root); node;
		     node = rb_next(node)) {
415 416
			rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
					      node);
417 418 419 420

			regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
				&base_reg, &top_reg);

421 422 423 424
			if (base_reg <= max && top_reg >= min) {
				new_base_reg = min(reg, base_reg);
				new_top_reg = max(reg, top_reg);
			} else {
425
				continue;
426
			}
427 428

			ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
429 430 431
							      new_base_reg,
							      new_top_reg, reg,
							      value);
432 433 434 435
			if (ret)
				return ret;
			rbtree_ctx->cached_rbnode = rbnode_tmp;
			return 0;
436
		}
437 438 439

		/* We did not manage to find a place to insert it in
		 * an existing block so create a new rbnode.
440
		 */
441
		rbnode = regcache_rbtree_node_alloc(map, reg);
442 443
		if (!rbnode)
			return -ENOMEM;
444 445
		regcache_rbtree_set_register(map, rbnode,
					     reg - rbnode->base_reg, value);
446
		regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
447 448 449 450 451 452
		rbtree_ctx->cached_rbnode = rbnode;
	}

	return 0;
}

453 454
static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
				unsigned int max)
455 456 457 458
{
	struct regcache_rbtree_ctx *rbtree_ctx;
	struct rb_node *node;
	struct regcache_rbtree_node *rbnode;
459 460
	unsigned int base_reg, top_reg;
	unsigned int start, end;
461 462 463 464 465
	int ret;

	rbtree_ctx = map->cache;
	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
		rbnode = rb_entry(node, struct regcache_rbtree_node, node);
466

467 468 469
		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
			&top_reg);
		if (base_reg > max)
470
			break;
471
		if (top_reg < min)
472 473
			continue;

474 475
		if (min > base_reg)
			start = (min - base_reg) / map->reg_stride;
476
		else
477
			start = 0;
478

479 480
		if (max < top_reg)
			end = (max - base_reg) / map->reg_stride + 1;
481 482 483
		else
			end = rbnode->blklen;

484 485 486
		ret = regcache_sync_block(map, rbnode->block,
					  rbnode->cache_present,
					  rbnode->base_reg, start, end);
487 488
		if (ret != 0)
			return ret;
489 490
	}

491
	return regmap_async_complete(map);
492 493
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
				unsigned int max)
{
	struct regcache_rbtree_ctx *rbtree_ctx;
	struct regcache_rbtree_node *rbnode;
	struct rb_node *node;
	unsigned int base_reg, top_reg;
	unsigned int start, end;

	rbtree_ctx = map->cache;
	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
		rbnode = rb_entry(node, struct regcache_rbtree_node, node);

		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
			&top_reg);
		if (base_reg > max)
			break;
		if (top_reg < min)
			continue;

		if (min > base_reg)
			start = (min - base_reg) / map->reg_stride;
		else
			start = 0;

		if (max < top_reg)
			end = (max - base_reg) / map->reg_stride + 1;
		else
			end = rbnode->blklen;

		bitmap_clear(rbnode->cache_present, start, end - start);
	}

	return 0;
}

530 531 532 533 534 535 536
struct regcache_ops regcache_rbtree_ops = {
	.type = REGCACHE_RBTREE,
	.name = "rbtree",
	.init = regcache_rbtree_init,
	.exit = regcache_rbtree_exit,
	.read = regcache_rbtree_read,
	.write = regcache_rbtree_write,
537 538
	.sync = regcache_rbtree_sync,
	.drop = regcache_rbtree_drop,
539
};