cma.c 13.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Contiguous Memory Allocator
 *
 * Copyright (c) 2010-2011 by Samsung Electronics.
 * Copyright IBM Corporation, 2013
 * Copyright LG Electronics Inc., 2014
 * Written by:
 *	Marek Szyprowski <m.szyprowski@samsung.com>
 *	Michal Nazarewicz <mina86@mina86.com>
 *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
 *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation; either version 2 of the
 * License or (at your optional) any later version of the license.
 */

#define pr_fmt(fmt) "cma: " fmt

#ifdef CONFIG_CMA_DEBUG
#ifndef DEBUG
#  define DEBUG
#endif
#endif
26
#define CREATE_TRACE_POINTS
27 28 29 30 31 32 33 34 35

#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/cma.h>
36
#include <linux/highmem.h>
37
#include <linux/io.h>
38
#include <linux/kmemleak.h>
39
#include <trace/events/cma.h>
40

S
Sasha Levin 已提交
41 42 43 44
#include "cma.h"

struct cma cma_areas[MAX_CMA_AREAS];
unsigned cma_area_count;
45 46
static DEFINE_MUTEX(cma_mutex);

47
phys_addr_t cma_get_base(const struct cma *cma)
48 49 50 51
{
	return PFN_PHYS(cma->base_pfn);
}

52
unsigned long cma_get_size(const struct cma *cma)
53 54 55 56
{
	return cma->count << PAGE_SHIFT;
}

57 58 59 60 61
const char *cma_get_name(const struct cma *cma)
{
	return cma->name ? cma->name : "(undefined)";
}

62
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
63
					     unsigned int align_order)
64
{
65 66 67
	if (align_order <= cma->order_per_bit)
		return 0;
	return (1UL << (align_order - cma->order_per_bit)) - 1;
68 69
}

70
/*
71 72
 * Find the offset of the base PFN from the specified align_order.
 * The value returned is represented in order_per_bits.
73
 */
74
static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
75
					       unsigned int align_order)
76
{
77 78
	return (cma->base_pfn & ((1UL << align_order) - 1))
		>> cma->order_per_bit;
79 80
}

81 82
static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
					      unsigned long pages)
83 84 85 86
{
	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
}

87 88
static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
			     unsigned int count)
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
{
	unsigned long bitmap_no, bitmap_count;

	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
	bitmap_count = cma_bitmap_pages_to_bits(cma, count);

	mutex_lock(&cma->lock);
	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
	mutex_unlock(&cma->lock);
}

static int __init cma_activate_area(struct cma *cma)
{
	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
	unsigned i = cma->count >> pageblock_order;
	struct zone *zone;

	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);

	if (!cma->bitmap)
		return -ENOMEM;

112 113 114
	WARN_ON_ONCE(!pfn_valid(pfn));
	zone = page_zone(pfn_to_page(pfn));

115 116 117 118 119
	do {
		unsigned j;

		base_pfn = pfn;
		for (j = pageblock_nr_pages; j; --j, pfn++) {
120
			WARN_ON_ONCE(!pfn_valid(pfn));
121
			/*
122 123 124 125
			 * alloc_contig_range requires the pfn range
			 * specified to be in the same zone. Make this
			 * simple by forcing the entire CMA resv range
			 * to be in the same zone.
126 127
			 */
			if (page_zone(pfn_to_page(pfn)) != zone)
128
				goto not_in_zone;
129 130 131 132 133
		}
		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
	} while (--i);

	mutex_init(&cma->lock);
S
Sasha Levin 已提交
134 135 136 137 138 139

#ifdef CONFIG_CMA_DEBUGFS
	INIT_HLIST_HEAD(&cma->mem_head);
	spin_lock_init(&cma->mem_head_lock);
#endif

140 141
	return 0;

142
not_in_zone:
143
	pr_err("CMA area %s could not be activated\n", cma->name);
144
	kfree(cma->bitmap);
145
	cma->count = 0;
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
	return -EINVAL;
}

static int __init cma_init_reserved_areas(void)
{
	int i;

	for (i = 0; i < cma_area_count; i++) {
		int ret = cma_activate_area(&cma_areas[i]);

		if (ret)
			return ret;
	}

	return 0;
}
162
core_initcall(cma_init_reserved_areas);
163

164 165 166 167 168
/**
 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
 * @base: Base address of the reserved area
 * @size: Size of the reserved area (in bytes),
 * @order_per_bit: Order of pages represented by one bit on bitmap.
169 170 171
 * @name: The name of the area. If this parameter is NULL, the name of
 *        the area will be set to "cmaN", where N is a running counter of
 *        used areas.
172 173 174 175 176
 * @res_cma: Pointer to store the created cma region.
 *
 * This function creates custom contiguous area from already reserved memory.
 */
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
177
				 unsigned int order_per_bit,
178
				 const char *name,
179
				 struct cma **res_cma)
180 181 182 183 184 185 186 187 188 189 190 191 192
{
	struct cma *cma;
	phys_addr_t alignment;

	/* Sanity checks */
	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
		pr_err("Not enough slots for CMA reserved regions!\n");
		return -ENOSPC;
	}

	if (!size || !memblock_is_region_reserved(base, size))
		return -EINVAL;

193
	/* ensure minimal alignment required by mm core */
194 195
	alignment = PAGE_SIZE <<
			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
196 197 198 199 200 201 202 203 204 205 206 207 208

	/* alignment should be aligned with order_per_bit */
	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
		return -EINVAL;

	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
		return -EINVAL;

	/*
	 * Each reserved area must be initialised later, when more kernel
	 * subsystems (like slab allocator) are available.
	 */
	cma = &cma_areas[cma_area_count];
209 210 211 212 213 214 215
	if (name) {
		cma->name = name;
	} else {
		cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
		if (!cma->name)
			return -ENOMEM;
	}
216 217 218 219 220
	cma->base_pfn = PFN_DOWN(base);
	cma->count = size >> PAGE_SHIFT;
	cma->order_per_bit = order_per_bit;
	*res_cma = cma;
	cma_area_count++;
221
	totalcma_pages += (size / PAGE_SIZE);
222 223 224 225

	return 0;
}

226 227 228
/**
 * cma_declare_contiguous() - reserve custom contiguous area
 * @base: Base address of the reserved area optional, use 0 for any
229
 * @size: Size of the reserved area (in bytes),
230 231 232 233
 * @limit: End address of the reserved memory (optional, 0 for any).
 * @alignment: Alignment for the CMA area, should be power of 2 or zero
 * @order_per_bit: Order of pages represented by one bit on bitmap.
 * @fixed: hint about where to place the reserved area
234
 * @name: The name of the area. See function cma_init_reserved_mem()
235
 * @res_cma: Pointer to store the created cma region.
236 237 238 239 240 241 242 243 244
 *
 * This function reserves memory from early allocator. It should be
 * called by arch specific code once the early allocator (memblock or bootmem)
 * has been activated and all other subsystems have already allocated/reserved
 * memory. This function allows to create custom reserved areas.
 *
 * If @fixed is true, reserve contiguous area at exactly @base.  If false,
 * reserve in range from @base to @limit.
 */
245 246
int __init cma_declare_contiguous(phys_addr_t base,
			phys_addr_t size, phys_addr_t limit,
247
			phys_addr_t alignment, unsigned int order_per_bit,
248
			bool fixed, const char *name, struct cma **res_cma)
249
{
250
	phys_addr_t memblock_end = memblock_end_of_DRAM();
251
	phys_addr_t highmem_start;
252 253
	int ret = 0;

254
	/*
L
Laura Abbott 已提交
255 256 257 258
	 * We can't use __pa(high_memory) directly, since high_memory
	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
	 * complain. Find the boundary by adding one to the last valid
	 * address.
259
	 */
L
Laura Abbott 已提交
260
	highmem_start = __pa(high_memory - 1) + 1;
261 262
	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
		__func__, &size, &base, &limit, &alignment);
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280

	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
		pr_err("Not enough slots for CMA reserved regions!\n");
		return -ENOSPC;
	}

	if (!size)
		return -EINVAL;

	if (alignment && !is_power_of_2(alignment))
		return -EINVAL;

	/*
	 * Sanitise input arguments.
	 * Pages both ends in CMA area could be merged into adjacent unmovable
	 * migratetype page by page allocator's buddy algorithm. In the case,
	 * you couldn't get a contiguous memory, which is not what we want.
	 */
281 282
	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
283 284 285 286
	base = ALIGN(base, alignment);
	size = ALIGN(size, alignment);
	limit &= ~(alignment - 1);

287 288 289
	if (!base)
		fixed = false;

290 291 292 293
	/* size should be aligned with order_per_bit */
	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
		return -EINVAL;

294
	/*
295 296
	 * If allocating at a fixed base the request region must not cross the
	 * low/high memory boundary.
297
	 */
298
	if (fixed && base < highmem_start && base + size > highmem_start) {
299
		ret = -EINVAL;
300 301
		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
			&base, &highmem_start);
302 303 304
		goto err;
	}

305 306 307 308 309 310 311 312
	/*
	 * If the limit is unspecified or above the memblock end, its effective
	 * value will be the memblock end. Set it explicitly to simplify further
	 * checks.
	 */
	if (limit == 0 || limit > memblock_end)
		limit = memblock_end;

313
	/* Reserve memory */
314
	if (fixed) {
315 316 317 318 319 320
		if (memblock_is_region_reserved(base, size) ||
		    memblock_reserve(base, size) < 0) {
			ret = -EBUSY;
			goto err;
		}
	} else {
321 322 323 324 325 326 327 328 329 330
		phys_addr_t addr = 0;

		/*
		 * All pages in the reserved area must come from the same zone.
		 * If the requested region crosses the low/high memory boundary,
		 * try allocating from high memory first and fall back to low
		 * memory in case of failure.
		 */
		if (base < highmem_start && limit > highmem_start) {
			addr = memblock_alloc_range(size, alignment,
331 332
						    highmem_start, limit,
						    MEMBLOCK_NONE);
333 334 335
			limit = highmem_start;
		}

336
		if (!addr) {
337
			addr = memblock_alloc_range(size, alignment, base,
338 339
						    limit,
						    MEMBLOCK_NONE);
340 341 342 343
			if (!addr) {
				ret = -ENOMEM;
				goto err;
			}
344
		}
345

346 347 348 349
		/*
		 * kmemleak scans/reads tracked objects for pointers to other
		 * objects but this address isn't mapped and accessible
		 */
350
		kmemleak_ignore_phys(addr);
351
		base = addr;
352 353
	}

354
	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
355 356
	if (ret)
		goto err;
357

358 359
	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
		&base);
360 361 362
	return 0;

err:
J
Joonsoo Kim 已提交
363
	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
364 365 366
	return ret;
}

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
#ifdef CONFIG_CMA_DEBUG
static void cma_debug_show_areas(struct cma *cma)
{
	unsigned long next_zero_bit, next_set_bit;
	unsigned long start = 0;
	unsigned int nr_zero, nr_total = 0;

	mutex_lock(&cma->lock);
	pr_info("number of available pages: ");
	for (;;) {
		next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
		if (next_zero_bit >= cma->count)
			break;
		next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
		nr_zero = next_set_bit - next_zero_bit;
		pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
		nr_total += nr_zero;
		start = next_zero_bit + nr_zero;
	}
	pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
	mutex_unlock(&cma->lock);
}
#else
static inline void cma_debug_show_areas(struct cma *cma) { }
#endif

393 394 395 396 397
/**
 * cma_alloc() - allocate pages from contiguous area
 * @cma:   Contiguous memory region for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
398
 * @gfp_mask:  GFP mask to use during compaction
399 400 401 402
 *
 * This function allocates part of contiguous memory on specific
 * contiguous memory area.
 */
403 404
struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
		       gfp_t gfp_mask)
405
{
A
Andrew Morton 已提交
406 407 408
	unsigned long mask, offset;
	unsigned long pfn = -1;
	unsigned long start = 0;
409 410
	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
	struct page *page = NULL;
411
	int ret = -ENOMEM;
412 413 414 415

	if (!cma || !cma->count)
		return NULL;

416
	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
417 418 419 420 421 422
		 count, align);

	if (!count)
		return NULL;

	mask = cma_bitmap_aligned_mask(cma, align);
423
	offset = cma_bitmap_aligned_offset(cma, align);
424 425 426
	bitmap_maxno = cma_bitmap_maxno(cma);
	bitmap_count = cma_bitmap_pages_to_bits(cma, count);

427 428 429
	if (bitmap_count > bitmap_maxno)
		return NULL;

430 431
	for (;;) {
		mutex_lock(&cma->lock);
432 433 434
		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
				bitmap_maxno, start, bitmap_count, mask,
				offset);
435 436 437 438 439 440 441 442 443 444 445 446 447 448
		if (bitmap_no >= bitmap_maxno) {
			mutex_unlock(&cma->lock);
			break;
		}
		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
		/*
		 * It's safe to drop the lock here. We've marked this region for
		 * our exclusive use. If the migration fails we will take the
		 * lock again and unmark it.
		 */
		mutex_unlock(&cma->lock);

		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
		mutex_lock(&cma_mutex);
449
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
450
					 gfp_mask);
451 452 453 454 455
		mutex_unlock(&cma_mutex);
		if (ret == 0) {
			page = pfn_to_page(pfn);
			break;
		}
456

457
		cma_clear_bitmap(cma, pfn, count);
458 459 460
		if (ret != -EBUSY)
			break;

461 462 463 464 465 466
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = bitmap_no + mask + 1;
	}

A
Andrew Morton 已提交
467
	trace_cma_alloc(pfn, page, count, align);
468

469
	if (ret && !(gfp_mask & __GFP_NOWARN)) {
470
		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
471 472 473 474
			__func__, count, ret);
		cma_debug_show_areas(cma);
	}

475 476 477 478 479 480 481 482 483 484 485 486 487 488
	pr_debug("%s(): returned %p\n", __func__, page);
	return page;
}

/**
 * cma_release() - release allocated pages
 * @cma:   Contiguous memory region for which the allocation is performed.
 * @pages: Allocated pages.
 * @count: Number of allocated pages.
 *
 * This function releases memory allocated by alloc_cma().
 * It returns false when provided pages do not belong to contiguous area and
 * true otherwise.
 */
489
bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
{
	unsigned long pfn;

	if (!cma || !pages)
		return false;

	pr_debug("%s(page %p)\n", __func__, (void *)pages);

	pfn = page_to_pfn(pages);

	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
		return false;

	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);

	free_contig_range(pfn, count);
	cma_clear_bitmap(cma, pfn, count);
507
	trace_cma_release(pfn, pages, count);
508 509 510

	return true;
}
L
Laura Abbott 已提交
511 512 513 514 515 516 517 518 519 520 521 522 523 524

int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
{
	int i;

	for (i = 0; i < cma_area_count; i++) {
		int ret = it(&cma_areas[i], data);

		if (ret)
			return ret;
	}

	return 0;
}