ttm_page_alloc.c 21.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * Copyright (c) Red Hat Inc.

 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sub license,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie <airlied@redhat.com>
 *          Jerome Glisse <jglisse@redhat.com>
 *          Pauli Nieminen <suokkos@gmail.com>
 */

/* simple list based uncached page pool
 * - Pool collects resently freed pages for reuse
 * - Use page->lru to keep a free list
 * - doesn't track currently in use pages
 */
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/highmem.h>
#include <linux/mm_types.h>
37
#include <linux/module.h>
38
#include <linux/mm.h>
39
#include <linux/seq_file.h> /* for seq_printf */
40
#include <linux/slab.h>
41
#include <linux/dma-mapping.h>
42 43 44 45 46 47

#include <asm/atomic.h>

#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_page_alloc.h"

L
Luck, Tony 已提交
48 49 50
#ifdef TTM_HAS_AGP
#include <asm/agp.h>
#endif
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72

#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION		16
#define FREE_ALL_PAGES			(~0U)
/* times are in msecs */
#define PAGE_FREE_INTERVAL		1000

/**
 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
 *
 * @lock: Protects the shared pool from concurrnet access. Must be used with
 * irqsave/irqrestore variants because pool allocator maybe called from
 * delayed work.
 * @fill_lock: Prevent concurrent calls to fill.
 * @list: Pool of free uc/wc pages for fast reuse.
 * @gfp_flags: Flags to pass for alloc_page.
 * @npages: Number of pages in pool.
 */
struct ttm_page_pool {
	spinlock_t		lock;
	bool			fill_lock;
	struct list_head	list;
D
Daniel J Blueman 已提交
73
	gfp_t			gfp_flags;
74
	unsigned		npages;
75 76 77
	char			*name;
	unsigned long		nfrees;
	unsigned long		nrefills;
78 79
};

80 81 82
/**
 * Limits for the pool. They are handled without locks because only place where
 * they may change is in sysfs store. They won't have immediate effect anyway
T
Thomas Hellstrom 已提交
83
 * so forcing serialization to access them is pointless.
84 85
 */

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
struct ttm_pool_opts {
	unsigned	alloc_size;
	unsigned	max_size;
	unsigned	small;
};

#define NUM_POOLS 4

/**
 * struct ttm_pool_manager - Holds memory pools for fst allocation
 *
 * Manager is read only object for pool code so it doesn't need locking.
 *
 * @free_interval: minimum number of jiffies between freeing pages from pool.
 * @page_alloc_inited: reference counting for pool allocation.
 * @work: Work that is used to shrink the pool. Work is only run when there is
 * some pages to free.
 * @small_allocation: Limit in number of pages what is small allocation.
 *
 * @pools: All pool objects in use.
 **/
struct ttm_pool_manager {
108
	struct kobject		kobj;
109 110 111 112 113 114 115 116 117 118 119 120 121 122
	struct shrinker		mm_shrink;
	struct ttm_pool_opts	options;

	union {
		struct ttm_page_pool	pools[NUM_POOLS];
		struct {
			struct ttm_page_pool	wc_pool;
			struct ttm_page_pool	uc_pool;
			struct ttm_page_pool	wc_pool_dma32;
			struct ttm_page_pool	uc_pool_dma32;
		} ;
	};
};

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
static struct attribute ttm_page_pool_max = {
	.name = "pool_max_size",
	.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_small = {
	.name = "pool_small_allocation",
	.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_alloc_size = {
	.name = "pool_allocation_size",
	.mode = S_IRUGO | S_IWUSR
};

static struct attribute *ttm_pool_attrs[] = {
	&ttm_page_pool_max,
	&ttm_page_pool_small,
	&ttm_page_pool_alloc_size,
	NULL
};

static void ttm_pool_kobj_release(struct kobject *kobj)
{
	struct ttm_pool_manager *m =
		container_of(kobj, struct ttm_pool_manager, kobj);
147
	kfree(m);
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
}

static ssize_t ttm_pool_store(struct kobject *kobj,
		struct attribute *attr, const char *buffer, size_t size)
{
	struct ttm_pool_manager *m =
		container_of(kobj, struct ttm_pool_manager, kobj);
	int chars;
	unsigned val;
	chars = sscanf(buffer, "%u", &val);
	if (chars == 0)
		return size;

	/* Convert kb to number of pages */
	val = val / (PAGE_SIZE >> 10);

	if (attr == &ttm_page_pool_max)
		m->options.max_size = val;
	else if (attr == &ttm_page_pool_small)
		m->options.small = val;
	else if (attr == &ttm_page_pool_alloc_size) {
		if (val > NUM_PAGES_TO_ALLOC*8) {
T
Thomas Hellstrom 已提交
170 171 172 173 174 175
			printk(KERN_ERR TTM_PFX
			       "Setting allocation size to %lu "
			       "is not allowed. Recommended size is "
			       "%lu\n",
			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176 177
			return size;
		} else if (val > NUM_PAGES_TO_ALLOC) {
T
Thomas Hellstrom 已提交
178 179 180 181
			printk(KERN_WARNING TTM_PFX
			       "Setting allocation size to "
			       "larger than %lu is not recommended.\n",
			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
		}
		m->options.alloc_size = val;
	}

	return size;
}

static ssize_t ttm_pool_show(struct kobject *kobj,
		struct attribute *attr, char *buffer)
{
	struct ttm_pool_manager *m =
		container_of(kobj, struct ttm_pool_manager, kobj);
	unsigned val = 0;

	if (attr == &ttm_page_pool_max)
		val = m->options.max_size;
	else if (attr == &ttm_page_pool_small)
		val = m->options.small;
	else if (attr == &ttm_page_pool_alloc_size)
		val = m->options.alloc_size;

	val = val * (PAGE_SIZE >> 10);

	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
}

static const struct sysfs_ops ttm_pool_sysfs_ops = {
	.show = &ttm_pool_show,
	.store = &ttm_pool_store,
};

static struct kobj_type ttm_pool_kobj_type = {
	.release = &ttm_pool_kobj_release,
	.sysfs_ops = &ttm_pool_sysfs_ops,
	.default_attrs = ttm_pool_attrs,
};

219
static struct ttm_pool_manager *_manager;
220

221
#ifndef CONFIG_X86
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
static int set_pages_array_wb(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
	int i;

	for (i = 0; i < addrinarray; i++)
		unmap_page_from_agp(pages[i]);
#endif
	return 0;
}

static int set_pages_array_wc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
	int i;

	for (i = 0; i < addrinarray; i++)
		map_page_into_agp(pages[i]);
#endif
	return 0;
}

static int set_pages_array_uc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
	int i;

	for (i = 0; i < addrinarray; i++)
		map_page_into_agp(pages[i]);
#endif
	return 0;
}
#endif

/**
 * Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags,
		enum ttm_caching_state cstate)
{
	int pool_index;

	if (cstate == tt_cached)
		return NULL;

	if (cstate == tt_wc)
		pool_index = 0x0;
	else
		pool_index = 0x1;

	if (flags & TTM_PAGE_FLAG_DMA32)
		pool_index |= 0x2;

274
	return &_manager->pools[pool_index];
275 276 277 278 279 280 281
}

/* set memory back to wb and free the pages. */
static void ttm_pages_put(struct page *pages[], unsigned npages)
{
	unsigned i;
	if (set_pages_array_wb(pages, npages))
T
Thomas Hellstrom 已提交
282
		printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
283 284 285 286 287 288 289 290 291
				npages);
	for (i = 0; i < npages; ++i)
		__free_page(pages[i]);
}

static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
		unsigned freed_pages)
{
	pool->npages -= freed_pages;
292
	pool->nfrees += freed_pages;
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
}

/**
 * Free pages from pool.
 *
 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
 * number of pages in one go.
 *
 * @pool: to free the pages from
 * @free_all: If set to true will free all pages in pool
 **/
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
{
	unsigned long irq_flags;
	struct page *p;
	struct page **pages_to_free;
	unsigned freed_pages = 0,
		 npages_to_free = nr_free;

	if (NUM_PAGES_TO_ALLOC < nr_free)
		npages_to_free = NUM_PAGES_TO_ALLOC;

	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
			GFP_KERNEL);
	if (!pages_to_free) {
T
Thomas Hellstrom 已提交
318 319
		printk(KERN_ERR TTM_PFX
		       "Failed to allocate memory for pool free operation.\n");
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
		return 0;
	}

restart:
	spin_lock_irqsave(&pool->lock, irq_flags);

	list_for_each_entry_reverse(p, &pool->list, lru) {
		if (freed_pages >= npages_to_free)
			break;

		pages_to_free[freed_pages++] = p;
		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
			/* remove range of pages from the pool */
			__list_del(p->lru.prev, &pool->list);

			ttm_pool_update_free_locked(pool, freed_pages);
			/**
			 * Because changing page caching is costly
			 * we unlock the pool to prevent stalling.
			 */
			spin_unlock_irqrestore(&pool->lock, irq_flags);

			ttm_pages_put(pages_to_free, freed_pages);
			if (likely(nr_free != FREE_ALL_PAGES))
				nr_free -= freed_pages;

			if (NUM_PAGES_TO_ALLOC >= nr_free)
				npages_to_free = nr_free;
			else
				npages_to_free = NUM_PAGES_TO_ALLOC;

			freed_pages = 0;

			/* free all so restart the processing */
			if (nr_free)
				goto restart;

358
			/* Not allowed to fall through or break because
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
			 * following context is inside spinlock while we are
			 * outside here.
			 */
			goto out;

		}
	}

	/* remove range of pages from the pool */
	if (freed_pages) {
		__list_del(&p->lru, &pool->list);

		ttm_pool_update_free_locked(pool, freed_pages);
		nr_free -= freed_pages;
	}

	spin_unlock_irqrestore(&pool->lock, irq_flags);

	if (freed_pages)
		ttm_pages_put(pages_to_free, freed_pages);
out:
	kfree(pages_to_free);
	return nr_free;
}

/* Get good estimation how many pages are free in pools */
static int ttm_pool_get_num_unused_pages(void)
{
	unsigned i;
	int total = 0;
	for (i = 0; i < NUM_POOLS; ++i)
390
		total += _manager->pools[i].npages;
391 392 393 394 395

	return total;
}

/**
T
Thomas Hellstrom 已提交
396
 * Callback for mm to request pool to reduce number of page held.
397
 */
398 399
static int ttm_pool_mm_shrink(struct shrinker *shrink,
			      struct shrink_control *sc)
400 401 402 403 404
{
	static atomic_t start_pool = ATOMIC_INIT(0);
	unsigned i;
	unsigned pool_offset = atomic_add_return(1, &start_pool);
	struct ttm_page_pool *pool;
405
	int shrink_pages = sc->nr_to_scan;
406 407 408 409 410 411 412

	pool_offset = pool_offset % NUM_POOLS;
	/* select start pool in round robin fashion */
	for (i = 0; i < NUM_POOLS; ++i) {
		unsigned nr_free = shrink_pages;
		if (shrink_pages == 0)
			break;
413
		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
		shrink_pages = ttm_page_pool_free(pool, nr_free);
	}
	/* return estimated number of unused pages in pool */
	return ttm_pool_get_num_unused_pages();
}

static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
	manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
	manager->mm_shrink.seeks = 1;
	register_shrinker(&manager->mm_shrink);
}

static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
{
	unregister_shrinker(&manager->mm_shrink);
}

static int ttm_set_pages_caching(struct page **pages,
		enum ttm_caching_state cstate, unsigned cpages)
{
	int r = 0;
	/* Set page caching */
	switch (cstate) {
	case tt_uncached:
		r = set_pages_array_uc(pages, cpages);
		if (r)
T
Thomas Hellstrom 已提交
441 442 443
			printk(KERN_ERR TTM_PFX
			       "Failed to set %d pages to uc!\n",
			       cpages);
444 445 446 447
		break;
	case tt_wc:
		r = set_pages_array_wc(pages, cpages);
		if (r)
T
Thomas Hellstrom 已提交
448 449 450
			printk(KERN_ERR TTM_PFX
			       "Failed to set %d pages to wc!\n",
			       cpages);
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
		break;
	default:
		break;
	}
	return r;
}

/**
 * Free pages the pages that failed to change the caching state. If there is
 * any pages that have changed their caching state already put them to the
 * pool.
 */
static void ttm_handle_caching_state_failure(struct list_head *pages,
		int ttm_flags, enum ttm_caching_state cstate,
		struct page **failed_pages, unsigned cpages)
{
	unsigned i;
T
Thomas Hellstrom 已提交
468
	/* Failed pages have to be freed */
469 470 471 472 473 474 475 476 477 478 479 480
	for (i = 0; i < cpages; ++i) {
		list_del(&failed_pages[i]->lru);
		__free_page(failed_pages[i]);
	}
}

/**
 * Allocate new pages with correct caching.
 *
 * This function is reentrant if caller updates count depending on number of
 * pages returned in pages array.
 */
D
Daniel J Blueman 已提交
481
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
482 483 484 485 486 487 488 489 490 491 492 493 494
		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
	struct page **caching_array;
	struct page *p;
	int r = 0;
	unsigned i, cpages;
	unsigned max_cpages = min(count,
			(unsigned)(PAGE_SIZE/sizeof(struct page *)));

	/* allocate array for page caching change */
	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);

	if (!caching_array) {
T
Thomas Hellstrom 已提交
495 496
		printk(KERN_ERR TTM_PFX
		       "Unable to allocate table for new pages.");
497 498 499 500 501 502 503
		return -ENOMEM;
	}

	for (i = 0, cpages = 0; i < count; ++i) {
		p = alloc_page(gfp_flags);

		if (!p) {
T
Thomas Hellstrom 已提交
504
			printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
505 506 507 508

			/* store already allocated pages in the pool after
			 * setting the caching state */
			if (cpages) {
T
Thomas Hellstrom 已提交
509 510
				r = ttm_set_pages_caching(caching_array,
							  cstate, cpages);
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
				if (r)
					ttm_handle_caching_state_failure(pages,
						ttm_flags, cstate,
						caching_array, cpages);
			}
			r = -ENOMEM;
			goto out;
		}

#ifdef CONFIG_HIGHMEM
		/* gfp flags of highmem page should never be dma32 so we
		 * we should be fine in such case
		 */
		if (!PageHighMem(p))
#endif
		{
			caching_array[cpages++] = p;
			if (cpages == max_cpages) {

				r = ttm_set_pages_caching(caching_array,
						cstate, cpages);
				if (r) {
					ttm_handle_caching_state_failure(pages,
						ttm_flags, cstate,
						caching_array, cpages);
					goto out;
				}
				cpages = 0;
			}
		}

		list_add(&p->lru, pages);
	}

	if (cpages) {
		r = ttm_set_pages_caching(caching_array, cstate, cpages);
		if (r)
			ttm_handle_caching_state_failure(pages,
					ttm_flags, cstate,
					caching_array, cpages);
	}
out:
	kfree(caching_array);

	return r;
}

/**
559
 * Fill the given pool if there aren't enough pages and the requested number of
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
 * pages is small.
 */
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
		unsigned long *irq_flags)
{
	struct page *p;
	int r;
	unsigned cpages = 0;
	/**
	 * Only allow one pool fill operation at a time.
	 * If pool doesn't have enough pages for the allocation new pages are
	 * allocated from outside of pool.
	 */
	if (pool->fill_lock)
		return;

	pool->fill_lock = true;

579 580
	/* If allocation request is small and there are not enough
	 * pages in a pool we fill the pool up first. */
581
	if (count < _manager->options.small
582 583
		&& count > pool->npages) {
		struct list_head new_pages;
584
		unsigned alloc_size = _manager->options.alloc_size;
585 586 587 588 589 590 591 592 593 594 595 596 597 598

		/**
		 * Can't change page caching if in irqsave context. We have to
		 * drop the pool->lock.
		 */
		spin_unlock_irqrestore(&pool->lock, *irq_flags);

		INIT_LIST_HEAD(&new_pages);
		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
				cstate,	alloc_size);
		spin_lock_irqsave(&pool->lock, *irq_flags);

		if (!r) {
			list_splice(&new_pages, &pool->list);
599
			++pool->nrefills;
600 601
			pool->npages += alloc_size;
		} else {
T
Thomas Hellstrom 已提交
602 603
			printk(KERN_ERR TTM_PFX
			       "Failed to fill pool (%p).", pool);
604 605 606 607 608 609 610 611 612 613 614 615 616
			/* If we have any pages left put them to the pool. */
			list_for_each_entry(p, &pool->list, lru) {
				++cpages;
			}
			list_splice(&new_pages, &pool->list);
			pool->npages += cpages;
		}

	}
	pool->fill_lock = false;
}

/**
617
 * Cut 'count' number of pages from the pool and put them on the return list.
618
 *
619
 * @return count of pages still required to fulfill the request.
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
 */
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
		struct list_head *pages, int ttm_flags,
		enum ttm_caching_state cstate, unsigned count)
{
	unsigned long irq_flags;
	struct list_head *p;
	unsigned i;

	spin_lock_irqsave(&pool->lock, irq_flags);
	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);

	if (count >= pool->npages) {
		/* take all pages from the pool */
		list_splice_init(&pool->list, pages);
		count -= pool->npages;
		pool->npages = 0;
		goto out;
	}
	/* find the last pages to include for requested number of pages. Split
640
	 * pool to begin and halve it to reduce search space. */
641 642 643 644 645 646 647 648 649 650 651 652 653
	if (count <= pool->npages/2) {
		i = 0;
		list_for_each(p, &pool->list) {
			if (++i == count)
				break;
		}
	} else {
		i = pool->npages + 1;
		list_for_each_prev(p, &pool->list) {
			if (--i == count)
				break;
		}
	}
654
	/* Cut 'count' number of pages from the pool */
655 656 657 658 659 660 661 662 663 664 665 666 667
	list_cut_position(pages, &pool->list, p);
	pool->npages -= count;
	count = 0;
out:
	spin_unlock_irqrestore(&pool->lock, irq_flags);
	return count;
}

/*
 * On success pages list will hold count number of correctly
 * cached pages.
 */
int ttm_get_pages(struct list_head *pages, int flags,
668
		  enum ttm_caching_state cstate, unsigned count,
669
		  dma_addr_t *dma_address)
670 671 672
{
	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
	struct page *p = NULL;
D
Daniel J Blueman 已提交
673
	gfp_t gfp_flags = GFP_USER;
674 675 676 677 678 679 680 681 682 683 684
	int r;

	/* set zero flag for page allocation if required */
	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
		gfp_flags |= __GFP_ZERO;

	/* No pool for cached pages */
	if (pool == NULL) {
		if (flags & TTM_PAGE_FLAG_DMA32)
			gfp_flags |= GFP_DMA32;
		else
685
			gfp_flags |= GFP_HIGHUSER;
686 687

		for (r = 0; r < count; ++r) {
688
			p = alloc_page(gfp_flags);
689 690
			if (!p) {

T
Thomas Hellstrom 已提交
691 692
				printk(KERN_ERR TTM_PFX
				       "Unable to allocate page.");
693 694
				return -ENOMEM;
			}
695

696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
			list_add(&p->lru, pages);
		}
		return 0;
	}


	/* combine zero flag to pool flags */
	gfp_flags |= pool->gfp_flags;

	/* First we take pages from the pool */
	count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);

	/* clear the pages coming from the pool if requested */
	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
		list_for_each_entry(p, pages, lru) {
			clear_page(page_address(p));
		}
	}

	/* If pool didn't have enough pages allocate new one. */
	if (count > 0) {
		/* ttm_alloc_new_pages doesn't reference pool so we can run
		 * multiple requests in parallel.
		 **/
		r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
		if (r) {
			/* If there is any pages in the list put them back to
			 * the pool. */
T
Thomas Hellstrom 已提交
724 725 726
			printk(KERN_ERR TTM_PFX
			       "Failed to allocate extra pages "
			       "for large request.");
727
			ttm_put_pages(pages, 0, flags, cstate, NULL);
728 729 730 731 732 733 734 735 736 737
			return r;
		}
	}


	return 0;
}

/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
738
		   enum ttm_caching_state cstate, dma_addr_t *dma_address)
739 740 741 742 743 744 745 746 747
{
	unsigned long irq_flags;
	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
	struct page *p, *tmp;

	if (pool == NULL) {
		/* No pool for this memory type so free the pages */

		list_for_each_entry_safe(p, tmp, pages, lru) {
748
			__free_page(p);
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
		}
		/* Make the pages list empty */
		INIT_LIST_HEAD(pages);
		return;
	}
	if (page_count == 0) {
		list_for_each_entry_safe(p, tmp, pages, lru) {
			++page_count;
		}
	}

	spin_lock_irqsave(&pool->lock, irq_flags);
	list_splice_init(pages, &pool->list);
	pool->npages += page_count;
	/* Check that we don't go over the pool limit */
	page_count = 0;
765 766
	if (pool->npages > _manager->options.max_size) {
		page_count = pool->npages - _manager->options.max_size;
767 768 769 770 771 772 773 774 775 776
		/* free at least NUM_PAGES_TO_ALLOC number of pages
		 * to reduce calls to set_memory_wb */
		if (page_count < NUM_PAGES_TO_ALLOC)
			page_count = NUM_PAGES_TO_ALLOC;
	}
	spin_unlock_irqrestore(&pool->lock, irq_flags);
	if (page_count)
		ttm_page_pool_free(pool, page_count);
}

777 778
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
		char *name)
779 780 781 782
{
	spin_lock_init(&pool->lock);
	pool->fill_lock = false;
	INIT_LIST_HEAD(&pool->list);
783
	pool->npages = pool->nfrees = 0;
784
	pool->gfp_flags = flags;
785
	pool->name = name;
786 787
}

788
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
789
{
790
	int ret;
791 792

	WARN_ON(_manager);
793

T
Thomas Hellstrom 已提交
794
	printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
795

796
	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
797

798
	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
799

800
	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
801

802 803
	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
				  GFP_USER | GFP_DMA32, "wc dma");
804

805 806
	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
				  GFP_USER | GFP_DMA32, "uc dma");
807

808 809 810 811 812 813
	_manager->options.max_size = max_pages;
	_manager->options.small = SMALL_ALLOCATION;
	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;

	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
				   &glob->kobj, "pool");
814
	if (unlikely(ret != 0)) {
815 816
		kobject_put(&_manager->kobj);
		_manager = NULL;
817 818 819
		return ret;
	}

820
	ttm_pool_mm_shrink_init(_manager);
821 822 823 824

	return 0;
}

D
Daniel J Blueman 已提交
825
void ttm_page_alloc_fini(void)
826 827 828
{
	int i;

T
Thomas Hellstrom 已提交
829
	printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
830
	ttm_pool_mm_shrink_fini(_manager);
831 832

	for (i = 0; i < NUM_POOLS; ++i)
833
		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
834

835 836
	kobject_put(&_manager->kobj);
	_manager = NULL;
837
}
838 839 840 841 842 843

int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
	struct ttm_page_pool *p;
	unsigned i;
	char *h[] = {"pool", "refills", "pages freed", "size"};
844
	if (!_manager) {
845 846 847 848 849 850
		seq_printf(m, "No pool allocator running.\n");
		return 0;
	}
	seq_printf(m, "%6s %12s %13s %8s\n",
			h[0], h[1], h[2], h[3]);
	for (i = 0; i < NUM_POOLS; ++i) {
851
		p = &_manager->pools[i];
852 853 854 855 856 857 858 859

		seq_printf(m, "%6s %12ld %13ld %8d\n",
				p->name, p->nrefills,
				p->nfrees, p->npages);
	}
	return 0;
}
EXPORT_SYMBOL(ttm_page_alloc_debugfs);