ttm_page_alloc.c 22.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright (c) Red Hat Inc.

 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sub license,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie <airlied@redhat.com>
 *          Jerome Glisse <jglisse@redhat.com>
 *          Pauli Nieminen <suokkos@gmail.com>
 */

/* simple list based uncached page pool
 * - Pool collects resently freed pages for reuse
 * - Use page->lru to keep a free list
 * - doesn't track currently in use pages
 */
J
Joe Perches 已提交
33 34 35

#define pr_fmt(fmt) "[TTM] " fmt

36 37 38 39
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/highmem.h>
#include <linux/mm_types.h>
40
#include <linux/module.h>
41
#include <linux/mm.h>
42
#include <linux/seq_file.h> /* for seq_printf */
43
#include <linux/slab.h>
44
#include <linux/dma-mapping.h>
45

A
Arun Sharma 已提交
46
#include <linux/atomic.h>
47

48 49
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
50

L
Luck, Tony 已提交
51 52 53
#ifdef TTM_HAS_AGP
#include <asm/agp.h>
#endif
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION		16
#define FREE_ALL_PAGES			(~0U)
/* times are in msecs */
#define PAGE_FREE_INTERVAL		1000

/**
 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
 *
 * @lock: Protects the shared pool from concurrnet access. Must be used with
 * irqsave/irqrestore variants because pool allocator maybe called from
 * delayed work.
 * @fill_lock: Prevent concurrent calls to fill.
 * @list: Pool of free uc/wc pages for fast reuse.
 * @gfp_flags: Flags to pass for alloc_page.
 * @npages: Number of pages in pool.
 */
struct ttm_page_pool {
	spinlock_t		lock;
	bool			fill_lock;
	struct list_head	list;
D
Daniel J Blueman 已提交
76
	gfp_t			gfp_flags;
77
	unsigned		npages;
78 79 80
	char			*name;
	unsigned long		nfrees;
	unsigned long		nrefills;
81 82
};

83 84 85
/**
 * Limits for the pool. They are handled without locks because only place where
 * they may change is in sysfs store. They won't have immediate effect anyway
T
Thomas Hellstrom 已提交
86
 * so forcing serialization to access them is pointless.
87 88
 */

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
struct ttm_pool_opts {
	unsigned	alloc_size;
	unsigned	max_size;
	unsigned	small;
};

#define NUM_POOLS 4

/**
 * struct ttm_pool_manager - Holds memory pools for fst allocation
 *
 * Manager is read only object for pool code so it doesn't need locking.
 *
 * @free_interval: minimum number of jiffies between freeing pages from pool.
 * @page_alloc_inited: reference counting for pool allocation.
 * @work: Work that is used to shrink the pool. Work is only run when there is
 * some pages to free.
 * @small_allocation: Limit in number of pages what is small allocation.
 *
 * @pools: All pool objects in use.
 **/
struct ttm_pool_manager {
111
	struct kobject		kobj;
112 113 114 115 116 117 118 119 120 121 122 123 124 125
	struct shrinker		mm_shrink;
	struct ttm_pool_opts	options;

	union {
		struct ttm_page_pool	pools[NUM_POOLS];
		struct {
			struct ttm_page_pool	wc_pool;
			struct ttm_page_pool	uc_pool;
			struct ttm_page_pool	wc_pool_dma32;
			struct ttm_page_pool	uc_pool_dma32;
		} ;
	};
};

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
static struct attribute ttm_page_pool_max = {
	.name = "pool_max_size",
	.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_small = {
	.name = "pool_small_allocation",
	.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_alloc_size = {
	.name = "pool_allocation_size",
	.mode = S_IRUGO | S_IWUSR
};

static struct attribute *ttm_pool_attrs[] = {
	&ttm_page_pool_max,
	&ttm_page_pool_small,
	&ttm_page_pool_alloc_size,
	NULL
};

static void ttm_pool_kobj_release(struct kobject *kobj)
{
	struct ttm_pool_manager *m =
		container_of(kobj, struct ttm_pool_manager, kobj);
150
	kfree(m);
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
}

static ssize_t ttm_pool_store(struct kobject *kobj,
		struct attribute *attr, const char *buffer, size_t size)
{
	struct ttm_pool_manager *m =
		container_of(kobj, struct ttm_pool_manager, kobj);
	int chars;
	unsigned val;
	chars = sscanf(buffer, "%u", &val);
	if (chars == 0)
		return size;

	/* Convert kb to number of pages */
	val = val / (PAGE_SIZE >> 10);

	if (attr == &ttm_page_pool_max)
		m->options.max_size = val;
	else if (attr == &ttm_page_pool_small)
		m->options.small = val;
	else if (attr == &ttm_page_pool_alloc_size) {
		if (val > NUM_PAGES_TO_ALLOC*8) {
J
Joe Perches 已提交
173
			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
T
Thomas Hellstrom 已提交
174 175
			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176 177
			return size;
		} else if (val > NUM_PAGES_TO_ALLOC) {
J
Joe Perches 已提交
178 179
			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
		}
		m->options.alloc_size = val;
	}

	return size;
}

static ssize_t ttm_pool_show(struct kobject *kobj,
		struct attribute *attr, char *buffer)
{
	struct ttm_pool_manager *m =
		container_of(kobj, struct ttm_pool_manager, kobj);
	unsigned val = 0;

	if (attr == &ttm_page_pool_max)
		val = m->options.max_size;
	else if (attr == &ttm_page_pool_small)
		val = m->options.small;
	else if (attr == &ttm_page_pool_alloc_size)
		val = m->options.alloc_size;

	val = val * (PAGE_SIZE >> 10);

	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
}

static const struct sysfs_ops ttm_pool_sysfs_ops = {
	.show = &ttm_pool_show,
	.store = &ttm_pool_store,
};

static struct kobj_type ttm_pool_kobj_type = {
	.release = &ttm_pool_kobj_release,
	.sysfs_ops = &ttm_pool_sysfs_ops,
	.default_attrs = ttm_pool_attrs,
};

217
static struct ttm_pool_manager *_manager;
218

219
#ifndef CONFIG_X86
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
static int set_pages_array_wb(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
	int i;

	for (i = 0; i < addrinarray; i++)
		unmap_page_from_agp(pages[i]);
#endif
	return 0;
}

static int set_pages_array_wc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
	int i;

	for (i = 0; i < addrinarray; i++)
		map_page_into_agp(pages[i]);
#endif
	return 0;
}

static int set_pages_array_uc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
	int i;

	for (i = 0; i < addrinarray; i++)
		map_page_into_agp(pages[i]);
#endif
	return 0;
}
#endif

/**
 * Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags,
		enum ttm_caching_state cstate)
{
	int pool_index;

	if (cstate == tt_cached)
		return NULL;

	if (cstate == tt_wc)
		pool_index = 0x0;
	else
		pool_index = 0x1;

	if (flags & TTM_PAGE_FLAG_DMA32)
		pool_index |= 0x2;

272
	return &_manager->pools[pool_index];
273 274 275 276 277 278 279
}

/* set memory back to wb and free the pages. */
static void ttm_pages_put(struct page *pages[], unsigned npages)
{
	unsigned i;
	if (set_pages_array_wb(pages, npages))
J
Joe Perches 已提交
280
		pr_err("Failed to set %d pages to wb!\n", npages);
281 282 283 284 285 286 287 288
	for (i = 0; i < npages; ++i)
		__free_page(pages[i]);
}

static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
		unsigned freed_pages)
{
	pool->npages -= freed_pages;
289
	pool->nfrees += freed_pages;
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
}

/**
 * Free pages from pool.
 *
 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
 * number of pages in one go.
 *
 * @pool: to free the pages from
 * @free_all: If set to true will free all pages in pool
 **/
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
{
	unsigned long irq_flags;
	struct page *p;
	struct page **pages_to_free;
	unsigned freed_pages = 0,
		 npages_to_free = nr_free;

	if (NUM_PAGES_TO_ALLOC < nr_free)
		npages_to_free = NUM_PAGES_TO_ALLOC;

	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
			GFP_KERNEL);
	if (!pages_to_free) {
J
Joe Perches 已提交
315
		pr_err("Failed to allocate memory for pool free operation\n");
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
		return 0;
	}

restart:
	spin_lock_irqsave(&pool->lock, irq_flags);

	list_for_each_entry_reverse(p, &pool->list, lru) {
		if (freed_pages >= npages_to_free)
			break;

		pages_to_free[freed_pages++] = p;
		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
			/* remove range of pages from the pool */
			__list_del(p->lru.prev, &pool->list);

			ttm_pool_update_free_locked(pool, freed_pages);
			/**
			 * Because changing page caching is costly
			 * we unlock the pool to prevent stalling.
			 */
			spin_unlock_irqrestore(&pool->lock, irq_flags);

			ttm_pages_put(pages_to_free, freed_pages);
			if (likely(nr_free != FREE_ALL_PAGES))
				nr_free -= freed_pages;

			if (NUM_PAGES_TO_ALLOC >= nr_free)
				npages_to_free = nr_free;
			else
				npages_to_free = NUM_PAGES_TO_ALLOC;

			freed_pages = 0;

			/* free all so restart the processing */
			if (nr_free)
				goto restart;

354
			/* Not allowed to fall through or break because
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
			 * following context is inside spinlock while we are
			 * outside here.
			 */
			goto out;

		}
	}

	/* remove range of pages from the pool */
	if (freed_pages) {
		__list_del(&p->lru, &pool->list);

		ttm_pool_update_free_locked(pool, freed_pages);
		nr_free -= freed_pages;
	}

	spin_unlock_irqrestore(&pool->lock, irq_flags);

	if (freed_pages)
		ttm_pages_put(pages_to_free, freed_pages);
out:
	kfree(pages_to_free);
	return nr_free;
}

/**
T
Thomas Hellstrom 已提交
381
 * Callback for mm to request pool to reduce number of page held.
382 383 384 385 386 387 388 389
 *
 * XXX: (dchinner) Deadlock warning!
 *
 * ttm_page_pool_free() does memory allocation using GFP_KERNEL.  that means
 * this can deadlock when called a sc->gfp_mask that is not equal to
 * GFP_KERNEL.
 *
 * This code is crying out for a shrinker per pool....
390
 */
391 392
static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
393
{
394 395
	static DEFINE_MUTEX(lock);
	static unsigned start_pool;
396
	unsigned i;
397
	unsigned pool_offset;
398
	struct ttm_page_pool *pool;
399
	int shrink_pages = sc->nr_to_scan;
400
	unsigned long freed = 0;
401

402 403 404
	if (!mutex_trylock(&lock))
		return SHRINK_STOP;
	pool_offset = ++start_pool % NUM_POOLS;
405 406 407 408 409
	/* select start pool in round robin fashion */
	for (i = 0; i < NUM_POOLS; ++i) {
		unsigned nr_free = shrink_pages;
		if (shrink_pages == 0)
			break;
410
		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
411
		shrink_pages = ttm_page_pool_free(pool, nr_free);
412
		freed += nr_free - shrink_pages;
413
	}
414
	mutex_unlock(&lock);
415 416 417 418 419 420 421 422 423 424 425 426 427 428
	return freed;
}


static unsigned long
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
	unsigned i;
	unsigned long count = 0;

	for (i = 0; i < NUM_POOLS; ++i)
		count += _manager->pools[i].npages;

	return count;
429 430 431 432
}

static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
433 434
	manager->mm_shrink.count_objects = ttm_pool_shrink_count;
	manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
	manager->mm_shrink.seeks = 1;
	register_shrinker(&manager->mm_shrink);
}

static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
{
	unregister_shrinker(&manager->mm_shrink);
}

static int ttm_set_pages_caching(struct page **pages,
		enum ttm_caching_state cstate, unsigned cpages)
{
	int r = 0;
	/* Set page caching */
	switch (cstate) {
	case tt_uncached:
		r = set_pages_array_uc(pages, cpages);
		if (r)
J
Joe Perches 已提交
453
			pr_err("Failed to set %d pages to uc!\n", cpages);
454 455 456 457
		break;
	case tt_wc:
		r = set_pages_array_wc(pages, cpages);
		if (r)
J
Joe Perches 已提交
458
			pr_err("Failed to set %d pages to wc!\n", cpages);
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
		break;
	default:
		break;
	}
	return r;
}

/**
 * Free pages the pages that failed to change the caching state. If there is
 * any pages that have changed their caching state already put them to the
 * pool.
 */
static void ttm_handle_caching_state_failure(struct list_head *pages,
		int ttm_flags, enum ttm_caching_state cstate,
		struct page **failed_pages, unsigned cpages)
{
	unsigned i;
T
Thomas Hellstrom 已提交
476
	/* Failed pages have to be freed */
477 478 479 480 481 482 483 484 485 486 487 488
	for (i = 0; i < cpages; ++i) {
		list_del(&failed_pages[i]->lru);
		__free_page(failed_pages[i]);
	}
}

/**
 * Allocate new pages with correct caching.
 *
 * This function is reentrant if caller updates count depending on number of
 * pages returned in pages array.
 */
D
Daniel J Blueman 已提交
489
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
490 491 492 493 494 495 496 497 498 499 500 501 502
		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
	struct page **caching_array;
	struct page *p;
	int r = 0;
	unsigned i, cpages;
	unsigned max_cpages = min(count,
			(unsigned)(PAGE_SIZE/sizeof(struct page *)));

	/* allocate array for page caching change */
	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);

	if (!caching_array) {
J
Joe Perches 已提交
503
		pr_err("Unable to allocate table for new pages\n");
504 505 506 507 508 509 510
		return -ENOMEM;
	}

	for (i = 0, cpages = 0; i < count; ++i) {
		p = alloc_page(gfp_flags);

		if (!p) {
J
Joe Perches 已提交
511
			pr_err("Unable to get page %u\n", i);
512 513 514 515

			/* store already allocated pages in the pool after
			 * setting the caching state */
			if (cpages) {
T
Thomas Hellstrom 已提交
516 517
				r = ttm_set_pages_caching(caching_array,
							  cstate, cpages);
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
				if (r)
					ttm_handle_caching_state_failure(pages,
						ttm_flags, cstate,
						caching_array, cpages);
			}
			r = -ENOMEM;
			goto out;
		}

#ifdef CONFIG_HIGHMEM
		/* gfp flags of highmem page should never be dma32 so we
		 * we should be fine in such case
		 */
		if (!PageHighMem(p))
#endif
		{
			caching_array[cpages++] = p;
			if (cpages == max_cpages) {

				r = ttm_set_pages_caching(caching_array,
						cstate, cpages);
				if (r) {
					ttm_handle_caching_state_failure(pages,
						ttm_flags, cstate,
						caching_array, cpages);
					goto out;
				}
				cpages = 0;
			}
		}

		list_add(&p->lru, pages);
	}

	if (cpages) {
		r = ttm_set_pages_caching(caching_array, cstate, cpages);
		if (r)
			ttm_handle_caching_state_failure(pages,
					ttm_flags, cstate,
					caching_array, cpages);
	}
out:
	kfree(caching_array);

	return r;
}

/**
566
 * Fill the given pool if there aren't enough pages and the requested number of
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
 * pages is small.
 */
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
		unsigned long *irq_flags)
{
	struct page *p;
	int r;
	unsigned cpages = 0;
	/**
	 * Only allow one pool fill operation at a time.
	 * If pool doesn't have enough pages for the allocation new pages are
	 * allocated from outside of pool.
	 */
	if (pool->fill_lock)
		return;

	pool->fill_lock = true;

586 587
	/* If allocation request is small and there are not enough
	 * pages in a pool we fill the pool up first. */
588
	if (count < _manager->options.small
589 590
		&& count > pool->npages) {
		struct list_head new_pages;
591
		unsigned alloc_size = _manager->options.alloc_size;
592 593 594 595 596 597 598 599 600 601 602 603 604 605

		/**
		 * Can't change page caching if in irqsave context. We have to
		 * drop the pool->lock.
		 */
		spin_unlock_irqrestore(&pool->lock, *irq_flags);

		INIT_LIST_HEAD(&new_pages);
		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
				cstate,	alloc_size);
		spin_lock_irqsave(&pool->lock, *irq_flags);

		if (!r) {
			list_splice(&new_pages, &pool->list);
606
			++pool->nrefills;
607 608
			pool->npages += alloc_size;
		} else {
J
Joe Perches 已提交
609
			pr_err("Failed to fill pool (%p)\n", pool);
610 611 612 613 614 615 616 617 618 619 620 621 622
			/* If we have any pages left put them to the pool. */
			list_for_each_entry(p, &pool->list, lru) {
				++cpages;
			}
			list_splice(&new_pages, &pool->list);
			pool->npages += cpages;
		}

	}
	pool->fill_lock = false;
}

/**
623
 * Cut 'count' number of pages from the pool and put them on the return list.
624
 *
625
 * @return count of pages still required to fulfill the request.
626 627
 */
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
628 629 630 631
					struct list_head *pages,
					int ttm_flags,
					enum ttm_caching_state cstate,
					unsigned count)
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
{
	unsigned long irq_flags;
	struct list_head *p;
	unsigned i;

	spin_lock_irqsave(&pool->lock, irq_flags);
	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);

	if (count >= pool->npages) {
		/* take all pages from the pool */
		list_splice_init(&pool->list, pages);
		count -= pool->npages;
		pool->npages = 0;
		goto out;
	}
	/* find the last pages to include for requested number of pages. Split
648
	 * pool to begin and halve it to reduce search space. */
649 650 651 652 653 654 655 656 657 658 659 660 661
	if (count <= pool->npages/2) {
		i = 0;
		list_for_each(p, &pool->list) {
			if (++i == count)
				break;
		}
	} else {
		i = pool->npages + 1;
		list_for_each_prev(p, &pool->list) {
			if (--i == count)
				break;
		}
	}
662
	/* Cut 'count' number of pages from the pool */
663 664 665 666 667 668 669 670
	list_cut_position(pages, &pool->list, p);
	pool->npages -= count;
	count = 0;
out:
	spin_unlock_irqrestore(&pool->lock, irq_flags);
	return count;
}

671 672 673 674 675 676 677 678 679 680 681 682 683
/* Put all pages in pages list to correct pool to wait for reuse */
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
			  enum ttm_caching_state cstate)
{
	unsigned long irq_flags;
	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
	unsigned i;

	if (pool == NULL) {
		/* No pool for this memory type so free the pages */
		for (i = 0; i < npages; i++) {
			if (pages[i]) {
				if (page_count(pages[i]) != 1)
J
Joe Perches 已提交
684
					pr_err("Erroneous page count. Leaking pages.\n");
685 686 687 688 689 690 691 692 693 694 695
				__free_page(pages[i]);
				pages[i] = NULL;
			}
		}
		return;
	}

	spin_lock_irqsave(&pool->lock, irq_flags);
	for (i = 0; i < npages; i++) {
		if (pages[i]) {
			if (page_count(pages[i]) != 1)
J
Joe Perches 已提交
696
				pr_err("Erroneous page count. Leaking pages.\n");
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
			list_add_tail(&pages[i]->lru, &pool->list);
			pages[i] = NULL;
			pool->npages++;
		}
	}
	/* Check that we don't go over the pool limit */
	npages = 0;
	if (pool->npages > _manager->options.max_size) {
		npages = pool->npages - _manager->options.max_size;
		/* free at least NUM_PAGES_TO_ALLOC number of pages
		 * to reduce calls to set_memory_wb */
		if (npages < NUM_PAGES_TO_ALLOC)
			npages = NUM_PAGES_TO_ALLOC;
	}
	spin_unlock_irqrestore(&pool->lock, irq_flags);
	if (npages)
		ttm_page_pool_free(pool, npages);
}

716 717 718 719
/*
 * On success pages list will hold count number of correctly
 * cached pages.
 */
720 721
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
			 enum ttm_caching_state cstate)
722 723
{
	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
724
	struct list_head plist;
725
	struct page *p = NULL;
D
Daniel J Blueman 已提交
726
	gfp_t gfp_flags = GFP_USER;
727
	unsigned count;
728 729 730 731 732 733 734 735 736 737 738
	int r;

	/* set zero flag for page allocation if required */
	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
		gfp_flags |= __GFP_ZERO;

	/* No pool for cached pages */
	if (pool == NULL) {
		if (flags & TTM_PAGE_FLAG_DMA32)
			gfp_flags |= GFP_DMA32;
		else
739
			gfp_flags |= GFP_HIGHUSER;
740

741
		for (r = 0; r < npages; ++r) {
742
			p = alloc_page(gfp_flags);
743 744
			if (!p) {

J
Joe Perches 已提交
745
				pr_err("Unable to allocate page\n");
746 747
				return -ENOMEM;
			}
748

749
			pages[r] = p;
750 751 752 753 754 755 756 757
		}
		return 0;
	}

	/* combine zero flag to pool flags */
	gfp_flags |= pool->gfp_flags;

	/* First we take pages from the pool */
758 759 760 761 762 763
	INIT_LIST_HEAD(&plist);
	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
	count = 0;
	list_for_each_entry(p, &plist, lru) {
		pages[count++] = p;
	}
764 765 766

	/* clear the pages coming from the pool if requested */
	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
767
		list_for_each_entry(p, &plist, lru) {
768 769 770 771
			if (PageHighMem(p))
				clear_highpage(p);
			else
				clear_page(page_address(p));
772 773 774 775
		}
	}

	/* If pool didn't have enough pages allocate new one. */
776
	if (npages > 0) {
777 778 779
		/* ttm_alloc_new_pages doesn't reference pool so we can run
		 * multiple requests in parallel.
		 **/
780 781 782 783 784
		INIT_LIST_HEAD(&plist);
		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
		list_for_each_entry(p, &plist, lru) {
			pages[count++] = p;
		}
785 786 787
		if (r) {
			/* If there is any pages in the list put them back to
			 * the pool. */
J
Joe Perches 已提交
788
			pr_err("Failed to allocate extra pages for large request\n");
789
			ttm_put_pages(pages, count, flags, cstate);
790 791 792 793 794 795 796
			return r;
		}
	}

	return 0;
}

797
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
798
		char *name)
799 800 801 802
{
	spin_lock_init(&pool->lock);
	pool->fill_lock = false;
	INIT_LIST_HEAD(&pool->list);
803
	pool->npages = pool->nfrees = 0;
804
	pool->gfp_flags = flags;
805
	pool->name = name;
806 807
}

808
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
809
{
810
	int ret;
811 812

	WARN_ON(_manager);
813

J
Joe Perches 已提交
814
	pr_info("Initializing pool allocator\n");
815

816
	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
817

818
	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
819

820
	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
821

822 823
	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
				  GFP_USER | GFP_DMA32, "wc dma");
824

825 826
	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
				  GFP_USER | GFP_DMA32, "uc dma");
827

828 829 830 831 832 833
	_manager->options.max_size = max_pages;
	_manager->options.small = SMALL_ALLOCATION;
	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;

	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
				   &glob->kobj, "pool");
834
	if (unlikely(ret != 0)) {
835 836
		kobject_put(&_manager->kobj);
		_manager = NULL;
837 838 839
		return ret;
	}

840
	ttm_pool_mm_shrink_init(_manager);
841 842 843 844

	return 0;
}

D
Daniel J Blueman 已提交
845
void ttm_page_alloc_fini(void)
846 847 848
{
	int i;

J
Joe Perches 已提交
849
	pr_info("Finalizing pool allocator\n");
850
	ttm_pool_mm_shrink_fini(_manager);
851 852

	for (i = 0; i < NUM_POOLS; ++i)
853
		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
854

855 856
	kobject_put(&_manager->kobj);
	_manager = NULL;
857
}
858

859 860 861 862 863 864 865 866 867 868
int ttm_pool_populate(struct ttm_tt *ttm)
{
	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
	unsigned i;
	int ret;

	if (ttm->state != tt_unpopulated)
		return 0;

	for (i = 0; i < ttm->num_pages; ++i) {
869 870 871
		ret = ttm_get_pages(&ttm->pages[i], 1,
				    ttm->page_flags,
				    ttm->caching_state);
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
		if (ret != 0) {
			ttm_pool_unpopulate(ttm);
			return -ENOMEM;
		}

		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
						false, false);
		if (unlikely(ret != 0)) {
			ttm_pool_unpopulate(ttm);
			return -ENOMEM;
		}
	}

	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
		ret = ttm_tt_swapin(ttm);
		if (unlikely(ret != 0)) {
			ttm_pool_unpopulate(ttm);
			return ret;
		}
	}

	ttm->state = tt_unbound;
	return 0;
}
EXPORT_SYMBOL(ttm_pool_populate);

void ttm_pool_unpopulate(struct ttm_tt *ttm)
{
	unsigned i;

	for (i = 0; i < ttm->num_pages; ++i) {
		if (ttm->pages[i]) {
			ttm_mem_global_free_page(ttm->glob->mem_glob,
						 ttm->pages[i]);
			ttm_put_pages(&ttm->pages[i], 1,
				      ttm->page_flags,
908
				      ttm->caching_state);
909 910 911 912 913 914
		}
	}
	ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL(ttm_pool_unpopulate);

915 916 917 918 919
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
	struct ttm_page_pool *p;
	unsigned i;
	char *h[] = {"pool", "refills", "pages freed", "size"};
920
	if (!_manager) {
921 922 923 924 925 926
		seq_printf(m, "No pool allocator running.\n");
		return 0;
	}
	seq_printf(m, "%6s %12s %13s %8s\n",
			h[0], h[1], h[2], h[3]);
	for (i = 0; i < NUM_POOLS; ++i) {
927
		p = &_manager->pools[i];
928 929 930 931 932 933 934 935

		seq_printf(m, "%6s %12ld %13ld %8d\n",
				p->name, p->nrefills,
				p->nfrees, p->npages);
	}
	return 0;
}
EXPORT_SYMBOL(ttm_page_alloc_debugfs);