userswap.c 14.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 *
 * userswap core file include swap-in and swap-out core function
 */

#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
12
#include <linux/userswap.h>
13
#include <linux/userfaultfd_k.h>
14
#include <linux/security.h>
15 16 17

#include "internal.h"

18
DEFINE_STATIC_KEY_FALSE(userswap_enabled);
19

20 21
static bool vma_uswap_compatible(struct vm_area_struct *vma)
{
22 23 24
	if (!vma || !vma_is_anonymous(vma) || vma->vm_file ||
	    vma->vm_flags & (VM_SHARED | VM_LOCKED | VM_STACK | VM_IO |
	    VM_PFNMAP | VM_HUGETLB))
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
		return false;
	return true;
}

static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;

	pgd = pgd_offset(mm, addr);
	if (pgd_none_or_clear_bad(pgd))
		return NULL;

	p4d = p4d_offset(pgd, addr);
	if (p4d_none_or_clear_bad(p4d))
		return NULL;

	pud = pud_offset(p4d, addr);
	if (pud_none_or_clear_bad(pud))
		return NULL;

	return pud;
}

static bool is_thp_or_huge(struct mm_struct *mm, unsigned long addr)
{
	pud_t *pud;
	pmd_t *pmd;

	pud = get_old_pud(mm, addr);
	if (!pud)
		return false;
	else if (pud_huge(*pud))
		return true;

	pmd = pmd_offset(pud, addr);
	if (!pmd)
		return false;
	else if (pmd_huge(*pmd) || pmd_trans_huge(*pmd))
		return true;

	return false;
}

/*
 * Check if pages between 'addr ~ addr+len' can be user swapped. If so, get
 * the reference of the pages and return the pages through input parameters
 * 'ppages'.
 */
static unsigned long pages_can_be_swapped(struct mm_struct *mm,
					  unsigned long addr,
					  unsigned long len,
					  struct page ***ppages)
{
	struct vm_area_struct *vma;
	struct page *page = NULL;
	struct page **pages = NULL;
	unsigned long addr_end = addr + len;
	unsigned long ret;
85
	unsigned long i, page_num = 0;
86 87 88 89 90 91 92 93 94 95
	*ppages = NULL;


	pages = kmalloc(sizeof(struct page *) * (len / PAGE_SIZE), GFP_KERNEL);
	if (!pages)
		return -ENOMEM;

	while (addr < addr_end) {
		vma = find_vma(mm, addr);
		if (!vma || addr < vma->vm_start ||
96
		    !(vma->vm_flags & VM_USWAP) ||
97 98 99 100 101 102 103 104 105 106 107 108 109 110
		    !vma_uswap_compatible(vma)) {
			ret = -EINVAL;
			goto out_err;
		}

		if (!(vma->vm_flags & VM_UFFD_MISSING)) {
			ret = -EAGAIN;
			goto out_err;
		}
get_again:
		/*
		 * follow_page will inc page ref, dec the ref after we remap
		 * the page.
		 */
Z
ZhangPeng 已提交
111
		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
		if (IS_ERR_OR_NULL(page)) {
			ret = -ENODEV;
			goto out_err;
		}

		pages[page_num++] = page;
		if (!PageAnon(page) || !PageSwapBacked(page) ||
		    PageHuge(page) || PageSwapCache(page)) {
			ret = -EINVAL;
			goto out_err;
		}

		if (PageTransCompound(page)) {
			if (trylock_page(page)) {
				if (!split_huge_page(page)) {
					unlock_page(page);
					put_page(page);
					page_num--;
					goto get_again;
				} else
					unlock_page(page);
			}
			ret = -EINVAL;
			goto out_err;
		}

		/*
		 * Check that no O_DIRECT or similar I/O is in progress on the
		 * page
		 */
142 143
		if (page_mapcount(page) > 1 ||
		    page_mapcount(page) + 1 != page_count(page)) {
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
			ret = -EBUSY;
			goto out_err;
		}
		addr += PAGE_SIZE;
	}

	*ppages = pages;
	return 0;

out_err:
	for (i = 0; i < page_num; i++)
		put_page(pages[i]);
	kfree(pages);
	return ret;
}

160 161 162 163
static int uswap_unmap_anon_page(struct mm_struct *mm,
				 struct vm_area_struct *vma,
				 unsigned long addr, struct page *page,
				 pmd_t *pmd, pte_t *old_pte, bool set_to_swp)
164 165 166
{
	struct mmu_notifier_range range;
	spinlock_t *ptl;
167
	pte_t *pte, _old_pte;
168
	int ret = 0;
169 170 171 172 173

	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma,
				vma->vm_mm, addr, addr + PAGE_SIZE);
	mmu_notifier_invalidate_range_start(&range);
	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
174 175
	if (!pte_present(*pte)) {
		ret = -EINVAL;
176
		goto out_release_unlock;
177
	}
178
	flush_cache_page(vma, addr, pte_pfn(*pte));
179
	_old_pte = ptep_clear_flush(vma, addr, pte);
180 181
	if (old_pte)
		*old_pte = _old_pte;
182 183 184
	if (set_to_swp)
		set_pte_at(mm, addr, pte, swp_entry_to_pte(swp_entry(
			   SWP_USERSWAP_ENTRY, page_to_pfn(page))));
185 186 187 188

	dec_mm_counter(mm, MM_ANONPAGES);
	reliable_page_counter(page, mm, -1);
	page_remove_rmap(page, false);
189
	page->mapping = NULL;
190 191 192 193

out_release_unlock:
	pte_unmap_unlock(pte, ptl);
	mmu_notifier_invalidate_range_end(&range);
194
	return ret;
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
}

static void uswap_map_anon_page(struct mm_struct *mm,
				struct vm_area_struct *vma,
				unsigned long addr,
				struct page *page,
				pmd_t *pmd,
				pte_t old_pte)
{
	spinlock_t *ptl;
	pte_t *pte;

	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
	flush_cache_page(vma, addr, pte_pfn(*pte));
	set_pte_at(mm, addr, pte, old_pte);
	inc_mm_counter(mm, MM_ANONPAGES);
	reliable_page_counter(page, mm, 1);
	page_add_new_anon_rmap(page, vma, addr, false);
	pte_unmap_unlock(pte, ptl);
}

216 217 218 219 220
static unsigned long vm_insert_anon_page(struct vm_area_struct *vma,
					 unsigned long addr, struct page *page)
{
	struct mm_struct *mm = vma->vm_mm;
	int ret = 0;
221
	pte_t *pte, dst_pte;
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
	spinlock_t *ptl;

	if (unlikely(anon_vma_prepare(vma)))
		return -ENOMEM;

	flush_dcache_page(page);
	pte = get_locked_pte(mm, addr, &ptl);
	if (!pte)
		return -ENOMEM;
	if (!pte_none(*pte)) {
		ret = -EBUSY;
		goto out_unlock;
	}

	inc_mm_counter(mm, MM_ANONPAGES);
	reliable_page_counter(page, mm, 1);
	page_add_new_anon_rmap(page, vma, addr, false);
239 240 241 242
	dst_pte = mk_pte(page, vma->vm_page_prot);
	if (vma->vm_flags & VM_WRITE)
		dst_pte = pte_mkwrite(pte_mkdirty(dst_pte));
	set_pte_at(mm, addr, pte, dst_pte);
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258

out_unlock:
	pte_unmap_unlock(pte, ptl);
	return ret;
}

static void uswapout_recover(struct mm_struct *mm,
			     unsigned long old_addr_start, unsigned long len,
			     struct page **pages, unsigned long new_addr_start,
			     pte_t *ptes)
{
	unsigned long unmap_old_addr = old_addr_start;
	unsigned long unmap_new_addr = new_addr_start;
	struct page *page;
	pmd_t *old_pmd, *new_pmd;
	pte_t pte;
259
	unsigned long i;
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298

	for (i = 0; i < len; i++) {
		page = pages[i];
		pte = ptes[i];
		new_pmd = mm_find_pmd(mm, new_addr_start);
		old_pmd = mm_find_pmd(mm, unmap_old_addr);

		uswap_unmap_anon_page(mm, find_vma(mm, unmap_new_addr),
				      unmap_new_addr, page, new_pmd, NULL,
				      false);
		uswap_map_anon_page(mm, find_vma(mm, unmap_old_addr),
				    unmap_old_addr, page, old_pmd, pte);
		unmap_old_addr += PAGE_SIZE;
		unmap_new_addr += PAGE_SIZE;
	}
	if (pte_val(ptes[len]) != 0) {
		page = pages[len];
		pte = ptes[len];
		old_pmd = mm_find_pmd(mm, unmap_old_addr);

		uswap_map_anon_page(mm, find_vma(mm, unmap_old_addr),
				    unmap_old_addr, page, old_pmd, pte);
		get_page(page);
	}
}

/* unmap the pages between 'addr ~ addr+len' and remap them to a new address */
static unsigned long do_user_swap(struct mm_struct *mm,
				  unsigned long old_addr_start,
				  unsigned long len, struct page **pages,
				  unsigned long new_addr_start)
{
	struct vm_area_struct *old_vma, *new_vma;
	unsigned long old_addr = old_addr_start;
	unsigned long new_addr = new_addr_start;
	struct page *page;
	pmd_t *pmd;
	pte_t old_pte, *ptes;
	bool pages_dirty = false;
299
	unsigned long i = 0, j;
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	int ret;

	ptes = kmalloc(sizeof(pte_t) * (len / PAGE_SIZE), GFP_KERNEL);
	if (!ptes)
		return -ENOMEM;
	memset(ptes, 0, sizeof(pte_t) * (len / PAGE_SIZE));
	lru_add_drain();
	for (j = 0; j < len; j += PAGE_SIZE) {
		page = pages[i];
		ret = -EINVAL;
		if (!page)
			goto out_recover;
		if (is_thp_or_huge(mm, new_addr))
			goto out_recover;
		old_vma = find_vma(mm, old_addr);
		if (!old_vma || old_addr < old_vma->vm_start)
			goto out_recover;
		new_vma = find_vma(mm, new_addr);
		if (!new_vma || new_addr < new_vma->vm_start)
			goto out_recover;
320 321
		if (!vma_uswap_compatible(new_vma))
			goto out_recover;
322 323

		ret = -EACCES;
324 325
		if (!(old_vma->vm_flags & VM_WRITE) &&
		    (new_vma->vm_flags & VM_WRITE))
326 327 328 329 330 331
			goto out_recover;

		ret = -ENXIO;
		pmd = mm_find_pmd(mm, old_addr);
		if (!pmd)
			goto out_recover;
332 333 334 335
		ret = uswap_unmap_anon_page(mm, old_vma, old_addr, page, pmd,
					    &old_pte, true);
		if (ret)
			goto out_recover;
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
		ptes[i] = old_pte;
		if (pte_dirty(old_pte)  || PageDirty(page))
			pages_dirty = true;
		put_page(page);

		ret = vm_insert_anon_page(new_vma, new_addr, page);
		if (ret)
			goto out_recover;
		get_page(page);

		old_addr += PAGE_SIZE;
		new_addr += PAGE_SIZE;
		i++;
	}

	if (pages_dirty)
352
		new_addr_start = new_addr_start | USWAP_PAGES_DIRTY;
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
	kfree(ptes);
	return new_addr_start;

out_recover:
	uswapout_recover(mm, old_addr_start, i, pages, new_addr_start, ptes);
	kfree(ptes);
	return ret;
}


/*
 * When flags is MREMAP_USWAP_SET_PTE, uswap_mremap() is called in syscall
 * mremap.
 * Unmap the pages between 'addr ~addr+old_len' and remap them to 'new_addr
 * ~ new_addr+new_len'. Set the pte of old_addr to SWP_USERSWAP_ENTRY.
 */
unsigned long uswap_mremap(unsigned long old_addr, unsigned long old_len,
			   unsigned long new_addr, unsigned long new_len)
{
	struct page **pages = NULL;
	struct mm_struct *mm = current->mm;
	unsigned long len = old_len;
	unsigned long ret = -EINVAL;
376
	unsigned long i;
377 378

	if (!len || old_len != new_len || offset_in_page(old_addr) ||
379 380 381
	    offset_in_page(new_addr) || (len % PAGE_SIZE))
		return ret;

382 383
	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len ||
	    old_addr > TASK_SIZE - old_len)
384 385 386 387
		return ret;

	/* Ensure the old/new locations do not overlap */
	if (old_addr + old_len > new_addr && new_addr + new_len > old_addr)
388 389
		return ret;

390
	lru_add_drain_all();
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	down_read(&mm->mmap_lock);
	ret = pages_can_be_swapped(mm, old_addr, len, &pages);
	if (ret) {
		up_read(&mm->mmap_lock);
		return ret;
	}

	ret = do_user_swap(mm, old_addr, len, pages, new_addr);
	up_read(&mm->mmap_lock);
	/* follow_page() above increased the reference*/
	for (i = 0; i < len / PAGE_SIZE; i++)
		if (pages[i])
			put_page(pages[i]);
	kfree(pages);
	return ret;
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
int mfill_atomic_pte_nocopy(struct mm_struct *mm,
			    pmd_t *dst_pmd,
			    struct vm_area_struct *dst_vma,
			    unsigned long dst_addr,
			    unsigned long src_addr)
{
	struct vm_area_struct *src_vma;
	pte_t dst_pte, *pte, src_pte;
	pmd_t *src_pmd;
	spinlock_t *ptl;
	int ret = 0;
	struct page *page;

	src_vma = find_vma(mm, src_addr);
	if (!src_vma || src_addr < src_vma->vm_start)
423
		return -EINVAL;
424

425
	if (!vma_uswap_compatible(src_vma))
426
		return -EINVAL;
Z
ZhangPeng 已提交
427 428 429
	page = follow_page(src_vma, src_addr, FOLL_GET | FOLL_MIGRATION |
			   FOLL_DUMP);
	if (IS_ERR_OR_NULL(page))
430 431 432 433 434 435 436
		return -ENODEV;

	src_pmd = mm_find_pmd(mm, src_addr);
	if (!src_pmd) {
		ret = -ENXIO;
		goto out_put_page;
	}
437 438 439 440
	ret = uswap_unmap_anon_page(mm, src_vma, src_addr, page, src_pmd,
				    &src_pte, false);
	if (ret)
		goto out_put_page;
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	if (dst_vma->vm_flags & VM_USWAP)
		ClearPageDirty(page);
	/*
	 * The memory barrier inside __SetPageUptodate makes sure that
	 * preceding stores to the page contents become visible before
	 * the set_pte_at() write.
	 */
	__SetPageUptodate(page);

	dst_pte = mk_pte(page, dst_vma->vm_page_prot);
	if (dst_vma->vm_flags & VM_WRITE)
		dst_pte = pte_mkwrite(pte_mkdirty(dst_pte));
	if (dst_vma->vm_flags & VM_USWAP)
		dst_pte = pte_mkclean(dst_pte);

	pte = pte_offset_map_lock(mm, dst_pmd, dst_addr, &ptl);

	/*
	 * The userspace may swap in a large area. Part of the area is not
	 * swapped out. If concurrent execution, PTE may be present. Skip those
	 * pages (pte_present).
	 * No other scenes should be handled except first pagefault (pte_none)
	 * and after userswap out (SWP_USERSWAP_ENTRY).
	 */
	if (pte_present(*pte) || (!pte_none(*pte) &&
	    swp_type(pte_to_swp_entry(*pte)) != SWP_USERSWAP_ENTRY)) {
		pte_unmap_unlock(pte, ptl);
		uswap_map_anon_page(mm, src_vma, src_addr, page, src_pmd,
				    src_pte);
		ret = -EEXIST;
		goto out_put_page;
	}

	inc_mm_counter(mm, MM_ANONPAGES);
	reliable_page_counter(page, mm, 1);
	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
	set_pte_at(mm, dst_addr, pte, dst_pte);

	/* No need to invalidate - it was non-present before */
	update_mmu_cache(dst_vma, dst_addr, pte);
	pte_unmap_unlock(pte, ptl);

out_put_page:
	put_page(page);
	return ret;
}
487

488
bool uswap_register(struct uffdio_register *uffdio_register, bool *uswap_mode)
489
{
490
	if (!static_branch_unlikely(&userswap_enabled))
491 492 493 494 495 496
		return true;
	if (!(uffdio_register->mode & UFFDIO_REGISTER_MODE_USWAP))
		return true;
	uffdio_register->mode &= ~UFFDIO_REGISTER_MODE_USWAP;
	if (!uffdio_register->mode)
		return false;
497 498 499
	*uswap_mode = true;
	return true;
}
500

501 502 503 504 505 506 507
/*
 * register the whole vma overlapping with the address range to avoid splitting
 * the vma which could reduce fragmentation.
 */
bool uswap_adjust_uffd_range(struct uffdio_register *uffdio_register,
			     unsigned long *vm_flags, struct mm_struct *mm)
{
508
	struct vm_area_struct *vma, *cur;
509
	unsigned long end;
510
	bool ret = false;
511 512 513

	if (!static_branch_unlikely(&userswap_enabled))
		return true;
514
	end = uffdio_register->range.start + uffdio_register->range.len - 1;
515 516

	mmap_read_lock(mm);
517
	vma = find_vma(mm, uffdio_register->range.start);
518
	if (!vma || vma->vm_start >= end)
519
		goto out_unlock;
520 521 522
	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next)
		if (!vma_uswap_compatible(cur))
			goto out_unlock;
523 524
	uffdio_register->range.start = vma->vm_start;
	vma = find_vma(mm, end);
525 526
	if (vma && end >= vma->vm_start)
		uffdio_register->range.len = vma->vm_end - uffdio_register->range.start;
527 528 529

	*vm_flags |= VM_USWAP;

530 531 532 533
	ret = true;
out_unlock:
	mmap_read_unlock(mm);
	return ret;
534 535 536 537 538
}

bool do_uswap_page(swp_entry_t entry, struct vm_fault *vmf,
		   struct vm_area_struct *vma, vm_fault_t *ret)
{
539 540 541
	if (!static_branch_unlikely(&userswap_enabled))
		return true;

542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	if (swp_type(entry) != SWP_USERSWAP_ENTRY)
		return true;

	/* print error if we come across a nested fault */
	if (!strncmp(current->comm, "uswap", 5)) {
		pr_err("USWAP: fault %lx is triggered by %s\n", vmf->address,
		       current->comm);
		*ret = VM_FAULT_SIGBUS;
		return false;
	}

	if (!(vma->vm_flags & VM_UFFD_MISSING)) {
		pr_err("USWAP: addr %lx flags %lx is not a user swap page",
				vmf->address, vma->vm_flags);
		return true;
	}

	*ret = handle_userfault(vmf, VM_UFFD_MISSING | VM_USWAP);
	return false;
}

static int __init enable_userswap_setup(char *str)
{
565
	static_branch_enable(&userswap_enabled);
566 567 568
	return 1;
}
__setup("enable_userswap", enable_userswap_setup);