mmu_context_iommu.c 9.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  IOMMU helpers in MMU context.
 *
 *  Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

13
#include <linux/sched/signal.h>
14 15 16 17
#include <linux/slab.h>
#include <linux/rculist.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
18 19 20
#include <linux/migrate.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
21
#include <asm/mmu_context.h>
22
#include <asm/pte-walk.h>
23 24 25 26 27 28 29 30

static DEFINE_MUTEX(mem_list_mutex);

struct mm_iommu_table_group_mem_t {
	struct list_head next;
	struct rcu_head rcu;
	unsigned long used;
	atomic64_t mapped;
31
	unsigned int pageshift;
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
	u64 ua;			/* userspace address */
	u64 entries;		/* number of entries in hpas[] */
	u64 *hpas;		/* vmalloc'ed */
};

static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
		unsigned long npages, bool incr)
{
	long ret = 0, locked, lock_limit;

	if (!npages)
		return 0;

	down_write(&mm->mmap_sem);

	if (incr) {
		locked = mm->locked_vm + npages;
		lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
			ret = -ENOMEM;
		else
			mm->locked_vm += npages;
	} else {
		if (WARN_ON_ONCE(npages > mm->locked_vm))
			npages = mm->locked_vm;
		mm->locked_vm -= npages;
	}

	pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
61
			current ? current->pid : 0,
62 63 64 65 66 67 68 69 70
			incr ? '+' : '-',
			npages << PAGE_SHIFT,
			mm->locked_vm << PAGE_SHIFT,
			rlimit(RLIMIT_MEMLOCK));
	up_write(&mm->mmap_sem);

	return ret;
}

71
bool mm_iommu_preregistered(struct mm_struct *mm)
72
{
73
	return !list_empty(&mm->context.iommu_group_mem_list);
74 75 76
}
EXPORT_SYMBOL_GPL(mm_iommu_preregistered);

77 78 79
/*
 * Taken from alloc_migrate_target with changes to remove CMA allocations
 */
80
struct page *new_iommu_non_cma_page(struct page *page, unsigned long private)
81 82 83 84
{
	gfp_t gfp_mask = GFP_USER;
	struct page *new_page;

85
	if (PageCompound(page))
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
		return NULL;

	if (PageHighMem(page))
		gfp_mask |= __GFP_HIGHMEM;

	/*
	 * We don't want the allocation to force an OOM if possibe
	 */
	new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
	return new_page;
}

static int mm_iommu_move_page_from_cma(struct page *page)
{
	int ret = 0;
	LIST_HEAD(cma_migrate_pages);

	/* Ignore huge pages for now */
104
	if (PageCompound(page))
105 106 107 108 109 110 111 112 113 114 115
		return -EBUSY;

	lru_add_drain();
	ret = isolate_lru_page(page);
	if (ret)
		return ret;

	list_add(&page->lru, &cma_migrate_pages);
	put_page(page); /* Drop the gup reference */

	ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
116
				NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
117 118 119 120 121 122 123 124
	if (ret) {
		if (!list_empty(&cma_migrate_pages))
			putback_movable_pages(&cma_migrate_pages);
	}

	return 0;
}

125
long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
126 127 128 129
		struct mm_iommu_table_group_mem_t **pmem)
{
	struct mm_iommu_table_group_mem_t *mem;
	long i, j, ret = 0, locked_entries = 0;
130 131
	unsigned int pageshift;
	unsigned long flags;
132 133 134 135
	struct page *page = NULL;

	mutex_lock(&mem_list_mutex);

136
	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
			next) {
		if ((mem->ua == ua) && (mem->entries == entries)) {
			++mem->used;
			*pmem = mem;
			goto unlock_exit;
		}

		/* Overlap? */
		if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
				(ua < (mem->ua +
				       (mem->entries << PAGE_SHIFT)))) {
			ret = -EINVAL;
			goto unlock_exit;
		}

	}

154
	ret = mm_iommu_adjust_locked_vm(mm, entries, true);
155 156 157 158 159 160 161 162 163 164 165
	if (ret)
		goto unlock_exit;

	locked_entries = entries;

	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
	if (!mem) {
		ret = -ENOMEM;
		goto unlock_exit;
	}

166 167 168 169 170 171
	/*
	 * For a starting point for a maximum page size calculation
	 * we use @ua and @entries natural alignment to allow IOMMU pages
	 * smaller than huge pages but still bigger than PAGE_SIZE.
	 */
	mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
172
	mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
173 174 175 176 177 178 179 180 181
	if (!mem->hpas) {
		kfree(mem);
		ret = -ENOMEM;
		goto unlock_exit;
	}

	for (i = 0; i < entries; ++i) {
		if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
					1/* pages */, 1/* iswrite */, &page)) {
182
			ret = -EFAULT;
183
			for (j = 0; j < i; ++j)
184 185
				put_page(pfn_to_page(mem->hpas[j] >>
						PAGE_SHIFT));
186 187 188 189
			vfree(mem->hpas);
			kfree(mem);
			goto unlock_exit;
		}
190 191 192 193 194 195
		/*
		 * If we get a page from the CMA zone, since we are going to
		 * be pinning these entries, we might as well move them out
		 * of the CMA zone if possible. NOTE: faulting in + migration
		 * can be expensive. Batching can be considered later
		 */
196
		if (is_migrate_cma_page(page)) {
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
			if (mm_iommu_move_page_from_cma(page))
				goto populate;
			if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
						1/* pages */, 1/* iswrite */,
						&page)) {
				ret = -EFAULT;
				for (j = 0; j < i; ++j)
					put_page(pfn_to_page(mem->hpas[j] >>
								PAGE_SHIFT));
				vfree(mem->hpas);
				kfree(mem);
				goto unlock_exit;
			}
		}
populate:
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
		pageshift = PAGE_SHIFT;
		if (PageCompound(page)) {
			pte_t *pte;
			struct page *head = compound_head(page);
			unsigned int compshift = compound_order(head);

			local_irq_save(flags); /* disables as well */
			pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
			local_irq_restore(flags);

			/* Double check it is still the same pinned page */
			if (pte && pte_page(*pte) == head &&
					pageshift == compshift)
				pageshift = max_t(unsigned int, pageshift,
						PAGE_SHIFT);
		}
		mem->pageshift = min(mem->pageshift, pageshift);
229 230 231 232 233 234 235 236 237
		mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
	}

	atomic64_set(&mem->mapped, 1);
	mem->used = 1;
	mem->ua = ua;
	mem->entries = entries;
	*pmem = mem;

238
	list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
239 240 241

unlock_exit:
	if (locked_entries && ret)
242
		mm_iommu_adjust_locked_vm(mm, locked_entries, false);
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289

	mutex_unlock(&mem_list_mutex);

	return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_get);

static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
{
	long i;
	struct page *page = NULL;

	for (i = 0; i < mem->entries; ++i) {
		if (!mem->hpas[i])
			continue;

		page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
		if (!page)
			continue;

		put_page(page);
		mem->hpas[i] = 0;
	}
}

static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
{

	mm_iommu_unpin(mem);
	vfree(mem->hpas);
	kfree(mem);
}

static void mm_iommu_free(struct rcu_head *head)
{
	struct mm_iommu_table_group_mem_t *mem = container_of(head,
			struct mm_iommu_table_group_mem_t, rcu);

	mm_iommu_do_free(mem);
}

static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
{
	list_del_rcu(&mem->next);
	call_rcu(&mem->rcu, mm_iommu_free);
}

290
long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
{
	long ret = 0;

	mutex_lock(&mem_list_mutex);

	if (mem->used == 0) {
		ret = -ENOENT;
		goto unlock_exit;
	}

	--mem->used;
	/* There are still users, exit */
	if (mem->used)
		goto unlock_exit;

	/* Are there still mappings? */
	if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
		++mem->used;
		ret = -EBUSY;
		goto unlock_exit;
	}

	/* @mapped became 0 so now mappings are disabled, release the region */
	mm_iommu_release(mem);

316 317
	mm_iommu_adjust_locked_vm(mm, mem->entries, false);

318 319 320 321 322 323 324
unlock_exit:
	mutex_unlock(&mem_list_mutex);

	return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_put);

325 326
struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
		unsigned long ua, unsigned long size)
327 328 329
{
	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;

330
	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
331 332 333 334 335 336 337 338 339 340 341 342
		if ((mem->ua <= ua) &&
				(ua + size <= mem->ua +
				 (mem->entries << PAGE_SHIFT))) {
			ret = mem;
			break;
		}
	}

	return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_lookup);

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
		unsigned long ua, unsigned long size)
{
	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;

	list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
			next) {
		if ((mem->ua <= ua) &&
				(ua + size <= mem->ua +
				 (mem->entries << PAGE_SHIFT))) {
			ret = mem;
			break;
		}
	}

	return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);

362 363
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
		unsigned long ua, unsigned long entries)
364 365 366
{
	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;

367
	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
368 369 370 371 372 373 374 375 376 377 378
		if ((mem->ua == ua) && (mem->entries == entries)) {
			ret = mem;
			break;
		}
	}

	return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_find);

long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
379
		unsigned long ua, unsigned int pageshift, unsigned long *hpa)
380 381 382 383 384 385 386
{
	const long entry = (ua - mem->ua) >> PAGE_SHIFT;
	u64 *va = &mem->hpas[entry];

	if (entry >= mem->entries)
		return -EFAULT;

387 388 389
	if (pageshift > mem->pageshift)
		return -EFAULT;

390 391 392 393 394 395
	*hpa = *va | (ua & ~PAGE_MASK);

	return 0;
}
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);

396
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
397
		unsigned long ua, unsigned int pageshift, unsigned long *hpa)
398 399 400 401 402 403 404 405
{
	const long entry = (ua - mem->ua) >> PAGE_SHIFT;
	void *va = &mem->hpas[entry];
	unsigned long *pa;

	if (entry >= mem->entries)
		return -EFAULT;

406 407 408
	if (pageshift > mem->pageshift)
		return -EFAULT;

409 410 411 412 413 414 415 416 417 418
	pa = (void *) vmalloc_to_phys(va);
	if (!pa)
		return -EFAULT;

	*hpa = *pa | (ua & ~PAGE_MASK);

	return 0;
}
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
{
	if (atomic64_inc_not_zero(&mem->mapped))
		return 0;

	/* Last mm_iommu_put() has been called, no more mappings allowed() */
	return -ENXIO;
}
EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);

void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
{
	atomic64_add_unless(&mem->mapped, -1, 1);
}
EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);

435
void mm_iommu_init(struct mm_struct *mm)
436
{
437
	INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
438
}