genalloc.c 17.8 KB
Newer Older
J
Jes Sorensen 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 * Basic general purpose allocator for managing special purpose
 * memory, for example, memory that is not managed by the regular
 * kmalloc/kfree interface.  Uses for this includes on-device special
 * memory, uncached memory etc.
 *
 * It is safe to use the allocator in NMI handlers and other special
 * unblockable contexts that could otherwise deadlock on locks.  This
 * is implemented by using atomic operations and retries on any
 * conflicts.  The disadvantage is that there may be livelocks in
 * extreme cases.  For better scalability, one allocator can be used
 * for each CPU.
 *
 * The lockless operation only works if there is enough memory
 * available.  If new memory is added to the pool a lock has to be
 * still taken.  So any user relying on locklessness has to ensure
 * that sufficient memory is preallocated.
 *
 * The basic atomic operation of this allocator is cmpxchg on long.
 * On architectures that don't have NMI-safe cmpxchg implementation,
 * the allocator can NOT be used in NMI handler.  So code uses the
 * allocator in NMI handler should depend on
 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
J
Jes Sorensen 已提交
24 25 26 27 28 29 30
 *
 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

31
#include <linux/slab.h>
32
#include <linux/export.h>
33
#include <linux/bitmap.h>
34 35
#include <linux/rculist.h>
#include <linux/interrupt.h>
J
Jes Sorensen 已提交
36
#include <linux/genalloc.h>
37
#include <linux/of_device.h>
J
Jes Sorensen 已提交
38

39 40 41 42 43
static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
{
	return chunk->end_addr - chunk->start_addr + 1;
}

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
{
	unsigned long val, nval;

	nval = *addr;
	do {
		val = nval;
		if (val & mask_to_set)
			return -EBUSY;
		cpu_relax();
	} while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);

	return 0;
}

static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
{
	unsigned long val, nval;

	nval = *addr;
	do {
		val = nval;
		if ((val & mask_to_clear) != mask_to_clear)
			return -EBUSY;
		cpu_relax();
	} while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);

	return 0;
}

/*
 * bitmap_set_ll - set the specified number of bits at the specified position
 * @map: pointer to a bitmap
 * @start: a bit position in @map
 * @nr: number of bits to set
 *
 * Set @nr bits start from @start in @map lock-lessly. Several users
 * can set/clear the same bitmap simultaneously without lock. If two
 * users set the same bit, one user will return remain bits, otherwise
 * return 0.
 */
static int bitmap_set_ll(unsigned long *map, int start, int nr)
{
	unsigned long *p = map + BIT_WORD(start);
	const int size = start + nr;
	int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
	unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);

	while (nr - bits_to_set >= 0) {
		if (set_bits_ll(p, mask_to_set))
			return nr;
		nr -= bits_to_set;
		bits_to_set = BITS_PER_LONG;
		mask_to_set = ~0UL;
		p++;
	}
	if (nr) {
		mask_to_set &= BITMAP_LAST_WORD_MASK(size);
		if (set_bits_ll(p, mask_to_set))
			return nr;
	}

	return 0;
}

/*
 * bitmap_clear_ll - clear the specified number of bits at the specified position
 * @map: pointer to a bitmap
 * @start: a bit position in @map
 * @nr: number of bits to set
 *
 * Clear @nr bits start from @start in @map lock-lessly. Several users
 * can set/clear the same bitmap simultaneously without lock. If two
 * users clear the same bit, one user will return remain bits,
 * otherwise return 0.
 */
static int bitmap_clear_ll(unsigned long *map, int start, int nr)
{
	unsigned long *p = map + BIT_WORD(start);
	const int size = start + nr;
	int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
	unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);

	while (nr - bits_to_clear >= 0) {
		if (clear_bits_ll(p, mask_to_clear))
			return nr;
		nr -= bits_to_clear;
		bits_to_clear = BITS_PER_LONG;
		mask_to_clear = ~0UL;
		p++;
	}
	if (nr) {
		mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
		if (clear_bits_ll(p, mask_to_clear))
			return nr;
	}

	return 0;
}
J
Jes Sorensen 已提交
143

144 145
/**
 * gen_pool_create - create a new special memory pool
146 147
 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
 * @nid: node id of the node the pool structure should be allocated on, or -1
148 149 150
 *
 * Create a new special memory pool that can be used to manage special purpose
 * memory not managed by the regular kmalloc/kfree interface.
151 152
 */
struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
J
Jes Sorensen 已提交
153
{
154
	struct gen_pool *pool;
J
Jes Sorensen 已提交
155

156 157
	pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
	if (pool != NULL) {
158
		spin_lock_init(&pool->lock);
159 160
		INIT_LIST_HEAD(&pool->chunks);
		pool->min_alloc_order = min_alloc_order;
161 162
		pool->algo = gen_pool_first_fit;
		pool->data = NULL;
163 164
	}
	return pool;
J
Jes Sorensen 已提交
165 166 167
}
EXPORT_SYMBOL(gen_pool_create);

168
/**
169
 * gen_pool_add_virt - add a new chunk of special memory to the pool
170
 * @pool: pool to add new memory chunk to
171 172
 * @virt: virtual starting address of memory chunk to add to pool
 * @phys: physical starting address of memory chunk to add to pool
173 174 175
 * @size: size in bytes of the memory chunk to add to pool
 * @nid: node id of the node the chunk structure and bitmap should be
 *       allocated on, or -1
176 177
 *
 * Add a new chunk of special memory to the specified pool.
178 179
 *
 * Returns 0 on success or a -ve errno on failure.
J
Jes Sorensen 已提交
180
 */
181 182
int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
		 size_t size, int nid)
J
Jes Sorensen 已提交
183
{
184 185 186
	struct gen_pool_chunk *chunk;
	int nbits = size >> pool->min_alloc_order;
	int nbytes = sizeof(struct gen_pool_chunk) +
187
				BITS_TO_LONGS(nbits) * sizeof(long);
J
Jes Sorensen 已提交
188

189
	chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
190
	if (unlikely(chunk == NULL))
191
		return -ENOMEM;
J
Jes Sorensen 已提交
192

193 194
	chunk->phys_addr = phys;
	chunk->start_addr = virt;
195
	chunk->end_addr = virt + size - 1;
196
	atomic_set(&chunk->avail, size);
J
Jes Sorensen 已提交
197

198 199 200
	spin_lock(&pool->lock);
	list_add_rcu(&chunk->next_chunk, &pool->chunks);
	spin_unlock(&pool->lock);
201 202

	return 0;
J
Jes Sorensen 已提交
203
}
204 205 206 207 208 209 210 211 212 213 214 215
EXPORT_SYMBOL(gen_pool_add_virt);

/**
 * gen_pool_virt_to_phys - return the physical address of memory
 * @pool: pool to allocate from
 * @addr: starting address of memory
 *
 * Returns the physical address on success, or -1 on error.
 */
phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
{
	struct gen_pool_chunk *chunk;
216
	phys_addr_t paddr = -1;
217

218 219
	rcu_read_lock();
	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
220
		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
221 222 223
			paddr = chunk->phys_addr + (addr - chunk->start_addr);
			break;
		}
224
	}
225
	rcu_read_unlock();
226

227
	return paddr;
228 229
}
EXPORT_SYMBOL(gen_pool_virt_to_phys);
J
Jes Sorensen 已提交
230

231 232
/**
 * gen_pool_destroy - destroy a special memory pool
S
Steve Wise 已提交
233
 * @pool: pool to destroy
234 235 236
 *
 * Destroy the specified special memory pool. Verifies that there are no
 * outstanding allocations.
S
Steve Wise 已提交
237 238 239 240 241 242 243 244 245 246 247 248
 */
void gen_pool_destroy(struct gen_pool *pool)
{
	struct list_head *_chunk, *_next_chunk;
	struct gen_pool_chunk *chunk;
	int order = pool->min_alloc_order;
	int bit, end_bit;

	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
		list_del(&chunk->next_chunk);

249
		end_bit = chunk_size(chunk) >> order;
S
Steve Wise 已提交
250 251 252 253 254 255 256 257 258 259
		bit = find_next_bit(chunk->bits, end_bit, 0);
		BUG_ON(bit < end_bit);

		kfree(chunk);
	}
	kfree(pool);
	return;
}
EXPORT_SYMBOL(gen_pool_destroy);

260 261
/**
 * gen_pool_alloc - allocate special memory from the pool
262 263
 * @pool: pool to allocate from
 * @size: number of bytes to allocate from the pool
264 265
 *
 * Allocate the requested number of bytes from the specified pool.
266 267 268
 * Uses the pool allocation function (with first-fit algorithm by default).
 * Can not be used in NMI handler on architectures without
 * NMI-safe cmpxchg implementation.
J
Jes Sorensen 已提交
269
 */
270
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
J
Jes Sorensen 已提交
271
{
272
	struct gen_pool_chunk *chunk;
273
	unsigned long addr = 0;
274
	int order = pool->min_alloc_order;
275 276 277 278 279
	int nbits, start_bit = 0, end_bit, remain;

#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
	BUG_ON(in_nmi());
#endif
J
Jes Sorensen 已提交
280

281 282
	if (size == 0)
		return 0;
J
Jes Sorensen 已提交
283

284
	nbits = (size + (1UL << order) - 1) >> order;
285 286 287 288
	rcu_read_lock();
	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
		if (size > atomic_read(&chunk->avail))
			continue;
289

290
		end_bit = chunk_size(chunk) >> order;
291
retry:
292 293
		start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
				pool->data);
294
		if (start_bit >= end_bit)
295
			continue;
296 297 298 299 300 301
		remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
		if (remain) {
			remain = bitmap_clear_ll(chunk->bits, start_bit,
						 nbits - remain);
			BUG_ON(remain);
			goto retry;
J
Jes Sorensen 已提交
302
		}
303 304

		addr = chunk->start_addr + ((unsigned long)start_bit << order);
305 306 307
		size = nbits << order;
		atomic_sub(size, &chunk->avail);
		break;
308
	}
309 310
	rcu_read_unlock();
	return addr;
311 312
}
EXPORT_SYMBOL(gen_pool_alloc);
J
Jes Sorensen 已提交
313

314 315 316 317
/**
 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
 * @pool: pool to allocate from
 * @size: number of bytes to allocate from the pool
318
 * @dma: dma-view physical address return value.  Use NULL if unneeded.
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
 *
 * Allocate the requested number of bytes from the specified pool.
 * Uses the pool allocation function (with first-fit algorithm by default).
 * Can not be used in NMI handler on architectures without
 * NMI-safe cmpxchg implementation.
 */
void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
{
	unsigned long vaddr;

	if (!pool)
		return NULL;

	vaddr = gen_pool_alloc(pool, size);
	if (!vaddr)
		return NULL;

336 337
	if (dma)
		*dma = gen_pool_virt_to_phys(pool, vaddr);
338 339 340 341 342

	return (void *)vaddr;
}
EXPORT_SYMBOL(gen_pool_dma_alloc);

343 344
/**
 * gen_pool_free - free allocated special memory back to the pool
345 346 347
 * @pool: pool to free to
 * @addr: starting address of memory to free back to pool
 * @size: size in bytes of memory to free
348
 *
349 350 351
 * Free previously allocated special memory back to the specified
 * pool.  Can not be used in NMI handler on architectures without
 * NMI-safe cmpxchg implementation.
352 353 354 355 356
 */
void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
{
	struct gen_pool_chunk *chunk;
	int order = pool->min_alloc_order;
357
	int start_bit, nbits, remain;
358

359 360 361
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
	BUG_ON(in_nmi());
#endif
362

363 364 365
	nbits = (size + (1UL << order) - 1) >> order;
	rcu_read_lock();
	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
366 367
		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
			BUG_ON(addr + size - 1 > chunk->end_addr);
368 369 370 371 372 373 374
			start_bit = (addr - chunk->start_addr) >> order;
			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
			BUG_ON(remain);
			size = nbits << order;
			atomic_add(size, &chunk->avail);
			rcu_read_unlock();
			return;
J
Jes Sorensen 已提交
375 376
		}
	}
377 378
	rcu_read_unlock();
	BUG();
J
Jes Sorensen 已提交
379 380
}
EXPORT_SYMBOL(gen_pool_free);
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403

/**
 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
 * @pool:	the generic memory pool
 * @func:	func to call
 * @data:	additional data used by @func
 *
 * Call @func for every chunk of generic memory pool.  The @func is
 * called with rcu_read_lock held.
 */
void gen_pool_for_each_chunk(struct gen_pool *pool,
	void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
	void *data)
{
	struct gen_pool_chunk *chunk;

	rcu_read_lock();
	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
		func(pool, chunk, data);
	rcu_read_unlock();
}
EXPORT_SYMBOL(gen_pool_for_each_chunk);

404 405 406 407 408 409 410 411 412 413 414 415 416
/**
 * addr_in_gen_pool - checks if an address falls within the range of a pool
 * @pool:	the generic memory pool
 * @start:	start address
 * @size:	size of the region
 *
 * Check if the range of addresses falls within the specified pool. Returns
 * true if the entire range is contained in the pool and false otherwise.
 */
bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
			size_t size)
{
	bool found = false;
417
	unsigned long end = start + size - 1;
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
	struct gen_pool_chunk *chunk;

	rcu_read_lock();
	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
		if (start >= chunk->start_addr && start <= chunk->end_addr) {
			if (end <= chunk->end_addr) {
				found = true;
				break;
			}
		}
	}
	rcu_read_unlock();
	return found;
}

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
/**
 * gen_pool_avail - get available free space of the pool
 * @pool: pool to get available free space
 *
 * Return available free space of the specified pool.
 */
size_t gen_pool_avail(struct gen_pool *pool)
{
	struct gen_pool_chunk *chunk;
	size_t avail = 0;

	rcu_read_lock();
	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
		avail += atomic_read(&chunk->avail);
	rcu_read_unlock();
	return avail;
}
EXPORT_SYMBOL_GPL(gen_pool_avail);

/**
 * gen_pool_size - get size in bytes of memory managed by the pool
 * @pool: pool to get size
 *
 * Return size in bytes of memory managed by the pool.
 */
size_t gen_pool_size(struct gen_pool *pool)
{
	struct gen_pool_chunk *chunk;
	size_t size = 0;

	rcu_read_lock();
	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
465
		size += chunk_size(chunk);
466 467 468 469
	rcu_read_unlock();
	return size;
}
EXPORT_SYMBOL_GPL(gen_pool_size);
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510

/**
 * gen_pool_set_algo - set the allocation algorithm
 * @pool: pool to change allocation algorithm
 * @algo: custom algorithm function
 * @data: additional data used by @algo
 *
 * Call @algo for each memory allocation in the pool.
 * If @algo is NULL use gen_pool_first_fit as default
 * memory allocation function.
 */
void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
{
	rcu_read_lock();

	pool->algo = algo;
	if (!pool->algo)
		pool->algo = gen_pool_first_fit;

	pool->data = data;

	rcu_read_unlock();
}
EXPORT_SYMBOL(gen_pool_set_algo);

/**
 * gen_pool_first_fit - find the first available region
 * of memory matching the size requirement (no alignment constraint)
 * @map: The address to base the search on
 * @size: The bitmap size in bits
 * @start: The bitnumber to start searching at
 * @nr: The number of zeroed bits we're looking for
 * @data: additional data - unused
 */
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
		unsigned long start, unsigned int nr, void *data)
{
	return bitmap_find_next_zero_area(map, size, start, nr, 0);
}
EXPORT_SYMBOL(gen_pool_first_fit);

511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
/**
 * gen_pool_first_fit_order_align - find the first available region
 * of memory matching the size requirement. The region will be aligned
 * to the order of the size specified.
 * @map: The address to base the search on
 * @size: The bitmap size in bits
 * @start: The bitnumber to start searching at
 * @nr: The number of zeroed bits we're looking for
 * @data: additional data - unused
 */
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
		unsigned long size, unsigned long start,
		unsigned int nr, void *data)
{
	unsigned long align_mask = roundup_pow_of_two(nr) - 1;

	return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
}
EXPORT_SYMBOL(gen_pool_first_fit_order_align);

531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
/**
 * gen_pool_best_fit - find the best fitting region of memory
 * macthing the size requirement (no alignment constraint)
 * @map: The address to base the search on
 * @size: The bitmap size in bits
 * @start: The bitnumber to start searching at
 * @nr: The number of zeroed bits we're looking for
 * @data: additional data - unused
 *
 * Iterate over the bitmap to find the smallest free region
 * which we can allocate the memory.
 */
unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
		unsigned long start, unsigned int nr, void *data)
{
	unsigned long start_bit = size;
	unsigned long len = size + 1;
	unsigned long index;

	index = bitmap_find_next_zero_area(map, size, start, nr, 0);

	while (index < size) {
		int next_bit = find_next_bit(map, size, index + nr);
		if ((next_bit - index) < len) {
			len = next_bit - index;
			start_bit = index;
			if (len == nr)
				return start_bit;
		}
		index = bitmap_find_next_zero_area(map, size,
						   next_bit + 1, nr, 0);
	}

	return start_bit;
}
EXPORT_SYMBOL(gen_pool_best_fit);
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588

static void devm_gen_pool_release(struct device *dev, void *res)
{
	gen_pool_destroy(*(struct gen_pool **)res);
}

/**
 * devm_gen_pool_create - managed gen_pool_create
 * @dev: device that provides the gen_pool
 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
 * @nid: node id of the node the pool structure should be allocated on, or -1
 *
 * Create a new special memory pool that can be used to manage special purpose
 * memory not managed by the regular kmalloc/kfree interface. The pool will be
 * automatically destroyed by the device management code.
 */
struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
		int nid)
{
	struct gen_pool **ptr, *pool;

	ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
589 590
	if (!ptr)
		return NULL;
591 592 593 594 595 596 597 598 599 600 601

	pool = gen_pool_create(min_alloc_order, nid);
	if (pool) {
		*ptr = pool;
		devres_add(dev, ptr);
	} else {
		devres_free(ptr);
	}

	return pool;
}
602
EXPORT_SYMBOL(devm_gen_pool_create);
603 604

/**
605
 * gen_pool_get - Obtain the gen_pool (if any) for a device
606 607 608 609
 * @dev: device to retrieve the gen_pool from
 *
 * Returns the gen_pool for the device if one is present, or NULL.
 */
610
struct gen_pool *gen_pool_get(struct device *dev)
611 612 613 614 615 616 617 618
{
	struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
					NULL);

	if (!p)
		return NULL;
	return *p;
}
619
EXPORT_SYMBOL_GPL(gen_pool_get);
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641

#ifdef CONFIG_OF
/**
 * of_get_named_gen_pool - find a pool by phandle property
 * @np: device node
 * @propname: property name containing phandle(s)
 * @index: index into the phandle array
 *
 * Returns the pool that contains the chunk starting at the physical
 * address of the device tree node pointed at by the phandle property,
 * or NULL if not found.
 */
struct gen_pool *of_get_named_gen_pool(struct device_node *np,
	const char *propname, int index)
{
	struct platform_device *pdev;
	struct device_node *np_pool;

	np_pool = of_parse_phandle(np, propname, index);
	if (!np_pool)
		return NULL;
	pdev = of_find_device_by_node(np_pool);
642
	of_node_put(np_pool);
643 644
	if (!pdev)
		return NULL;
645
	return gen_pool_get(&pdev->dev);
646 647 648
}
EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
#endif /* CONFIG_OF */