dax.c 45.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * fs/dax.c - Direct Access filesystem code
 * Copyright (c) 2013-2014 Intel Corporation
 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <linux/atomic.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
20
#include <linux/dax.h>
21 22
#include <linux/fs.h>
#include <linux/genhd.h>
23 24 25
#include <linux/highmem.h>
#include <linux/memcontrol.h>
#include <linux/mm.h>
26
#include <linux/mutex.h>
R
Ross Zwisler 已提交
27
#include <linux/pagevec.h>
28
#include <linux/sched.h>
29
#include <linux/sched/signal.h>
30
#include <linux/uio.h>
31
#include <linux/vmstat.h>
D
Dan Williams 已提交
32
#include <linux/pfn_t.h>
33
#include <linux/sizes.h>
34
#include <linux/mmu_notifier.h>
35 36
#include <linux/iomap.h>
#include "internal.h"
37

38 39 40
#define CREATE_TRACE_POINTS
#include <trace/events/fs_dax.h>

41 42 43 44 45 46 47 48 49 50 51
static inline unsigned int pe_order(enum page_entry_size pe_size)
{
	if (pe_size == PE_SIZE_PTE)
		return PAGE_SHIFT - PAGE_SHIFT;
	if (pe_size == PE_SIZE_PMD)
		return PMD_SHIFT - PAGE_SHIFT;
	if (pe_size == PE_SIZE_PUD)
		return PUD_SHIFT - PAGE_SHIFT;
	return ~0;
}

J
Jan Kara 已提交
52 53 54 55
/* We choose 4096 entries - same as per-zone page wait tables */
#define DAX_WAIT_TABLE_BITS 12
#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)

56 57
/* The 'colour' (ie low bits) within a PMD of a page offset.  */
#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
M
Matthew Wilcox 已提交
58
#define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
59

60 61 62
/* The order of a PMD entry */
#define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)

63
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
J
Jan Kara 已提交
64 65 66 67 68 69 70 71 72 73 74

static int __init init_dax_wait_table(void)
{
	int i;

	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
		init_waitqueue_head(wait_table + i);
	return 0;
}
fs_initcall(init_dax_wait_table);

75
/*
76 77 78 79
 * DAX pagecache entries use XArray value entries so they can't be mistaken
 * for pages.  We use one bit for locking, one bit for the entry size (PMD)
 * and two more to tell us if the entry is a zero page or an empty entry that
 * is just used for locking.  In total four special bits.
80 81 82 83 84
 *
 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
 * block allocation.
 */
85 86 87 88 89
#define DAX_SHIFT	(4)
#define DAX_LOCKED	(1UL << 0)
#define DAX_PMD		(1UL << 1)
#define DAX_ZERO_PAGE	(1UL << 2)
#define DAX_EMPTY	(1UL << 3)
90

M
Matthew Wilcox 已提交
91
static unsigned long dax_to_pfn(void *entry)
92
{
93
	return xa_to_value(entry) >> DAX_SHIFT;
94 95
}

96 97 98 99 100
static void *dax_make_entry(pfn_t pfn, unsigned long flags)
{
	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
}

101 102 103 104 105
static bool dax_is_locked(void *entry)
{
	return xa_to_value(entry) & DAX_LOCKED;
}

M
Matthew Wilcox 已提交
106
static unsigned int dax_entry_order(void *entry)
107
{
108
	if (xa_to_value(entry) & DAX_PMD)
109
		return PMD_ORDER;
110 111 112
	return 0;
}

113
static unsigned long dax_is_pmd_entry(void *entry)
114
{
115
	return xa_to_value(entry) & DAX_PMD;
116 117
}

118
static bool dax_is_pte_entry(void *entry)
119
{
120
	return !(xa_to_value(entry) & DAX_PMD);
121 122
}

123
static int dax_is_zero_entry(void *entry)
124
{
125
	return xa_to_value(entry) & DAX_ZERO_PAGE;
126 127
}

128
static int dax_is_empty_entry(void *entry)
129
{
130
	return xa_to_value(entry) & DAX_EMPTY;
131 132
}

J
Jan Kara 已提交
133
/*
M
Matthew Wilcox 已提交
134
 * DAX page cache entry locking
J
Jan Kara 已提交
135 136
 */
struct exceptional_entry_key {
137
	struct xarray *xa;
138
	pgoff_t entry_start;
J
Jan Kara 已提交
139 140 141
};

struct wait_exceptional_entry_queue {
142
	wait_queue_entry_t wait;
J
Jan Kara 已提交
143 144 145
	struct exceptional_entry_key key;
};

146 147
static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
		void *entry, struct exceptional_entry_key *key)
148 149
{
	unsigned long hash;
150
	unsigned long index = xas->xa_index;
151 152 153 154 155 156

	/*
	 * If 'entry' is a PMD, align the 'index' that we use for the wait
	 * queue to the start of that PMD.  This ensures that all offsets in
	 * the range covered by the PMD map to the same bit lock.
	 */
157
	if (dax_is_pmd_entry(entry))
158
		index &= ~PG_PMD_COLOUR;
159
	key->xa = xas->xa;
160 161
	key->entry_start = index;

162
	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
163 164 165
	return wait_table + hash;
}

166 167
static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
		unsigned int mode, int sync, void *keyp)
J
Jan Kara 已提交
168 169 170 171 172
{
	struct exceptional_entry_key *key = keyp;
	struct wait_exceptional_entry_queue *ewait =
		container_of(wait, struct wait_exceptional_entry_queue, wait);

173
	if (key->xa != ewait->key.xa ||
174
	    key->entry_start != ewait->key.entry_start)
J
Jan Kara 已提交
175 176 177 178
		return 0;
	return autoremove_wake_function(wait, mode, sync, NULL);
}

R
Ross Zwisler 已提交
179
/*
M
Matthew Wilcox 已提交
180 181 182
 * @entry may no longer be the entry at the index in the mapping.
 * The important information it's conveying is whether the entry at
 * this index used to be a PMD entry.
R
Ross Zwisler 已提交
183
 */
184
static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
R
Ross Zwisler 已提交
185 186 187 188
{
	struct exceptional_entry_key key;
	wait_queue_head_t *wq;

189
	wq = dax_entry_waitqueue(xas, entry, &key);
R
Ross Zwisler 已提交
190 191 192

	/*
	 * Checking for locked entry and prepare_to_wait_exclusive() happens
M
Matthew Wilcox 已提交
193
	 * under the i_pages lock, ditto for entry handling in our callers.
R
Ross Zwisler 已提交
194 195 196 197 198 199 200
	 * So at this point all tasks that could have seen our entry locked
	 * must be in the waitqueue and the following check will see them.
	 */
	if (waitqueue_active(wq))
		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
/*
 * Look up entry in page cache, wait for it to become unlocked if it
 * is a DAX entry and return it.  The caller must subsequently call
 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
 * if it did.
 *
 * Must be called with the i_pages lock held.
 */
static void *get_unlocked_entry(struct xa_state *xas)
{
	void *entry;
	struct wait_exceptional_entry_queue ewait;
	wait_queue_head_t *wq;

	init_wait(&ewait.wait);
	ewait.wait.func = wake_exceptional_entry_func;

	for (;;) {
M
Matthew Wilcox 已提交
219 220
		entry = xas_find_conflict(xas);
		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
221 222 223
				!dax_is_locked(entry))
			return entry;

224
		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
225 226 227 228 229 230 231 232 233 234
		prepare_to_wait_exclusive(wq, &ewait.wait,
					  TASK_UNINTERRUPTIBLE);
		xas_unlock_irq(xas);
		xas_reset(xas);
		schedule();
		finish_wait(wq, &ewait.wait);
		xas_lock_irq(xas);
	}
}

M
Matthew Wilcox 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
/*
 * The only thing keeping the address space around is the i_pages lock
 * (it's cycled in clear_inode() after removing the entries from i_pages)
 * After we call xas_unlock_irq(), we cannot touch xas->xa.
 */
static void wait_entry_unlocked(struct xa_state *xas, void *entry)
{
	struct wait_exceptional_entry_queue ewait;
	wait_queue_head_t *wq;

	init_wait(&ewait.wait);
	ewait.wait.func = wake_exceptional_entry_func;

	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
	prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
	xas_unlock_irq(xas);
	schedule();
	finish_wait(wq, &ewait.wait);

	/*
	 * Entry lock waits are exclusive. Wake up the next waiter since
	 * we aren't sure we will acquire the entry lock and thus wake
	 * the next waiter up on unlock.
	 */
	if (waitqueue_active(wq))
		__wake_up(wq, TASK_NORMAL, 1, &ewait.key);
}

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
	/* If we were the only waiter woken, wake the next one */
	if (entry)
		dax_wake_entry(xas, entry, false);
}

/*
 * We used the xa_state to get the entry, but then we locked the entry and
 * dropped the xa_lock, so we know the xa_state is stale and must be reset
 * before use.
 */
static void dax_unlock_entry(struct xa_state *xas, void *entry)
{
	void *old;

279
	BUG_ON(dax_is_locked(entry));
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	xas_reset(xas);
	xas_lock_irq(xas);
	old = xas_store(xas, entry);
	xas_unlock_irq(xas);
	BUG_ON(!dax_is_locked(old));
	dax_wake_entry(xas, entry, false);
}

/*
 * Return: The entry stored at this location before it was locked.
 */
static void *dax_lock_entry(struct xa_state *xas, void *entry)
{
	unsigned long v = xa_to_value(entry);
	return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
}

297 298 299 300 301 302 303 304 305 306 307 308
static unsigned long dax_entry_size(void *entry)
{
	if (dax_is_zero_entry(entry))
		return 0;
	else if (dax_is_empty_entry(entry))
		return 0;
	else if (dax_is_pmd_entry(entry))
		return PMD_SIZE;
	else
		return PAGE_SIZE;
}

M
Matthew Wilcox 已提交
309
static unsigned long dax_end_pfn(void *entry)
310
{
M
Matthew Wilcox 已提交
311
	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
312 313 314 315 316 317 318
}

/*
 * Iterate through all mapped pfns represented by an entry, i.e. skip
 * 'empty' and 'zero' entries.
 */
#define for_each_mapped_pfn(entry, pfn) \
M
Matthew Wilcox 已提交
319 320
	for (pfn = dax_to_pfn(entry); \
			pfn < dax_end_pfn(entry); pfn++)
321

D
Dan Williams 已提交
322 323 324 325 326 327 328
/*
 * TODO: for reflink+dax we need a way to associate a single page with
 * multiple address_space instances at different linear_page_index()
 * offsets.
 */
static void dax_associate_entry(void *entry, struct address_space *mapping,
		struct vm_area_struct *vma, unsigned long address)
329
{
D
Dan Williams 已提交
330 331
	unsigned long size = dax_entry_size(entry), pfn, index;
	int i = 0;
332 333 334 335

	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
		return;

D
Dan Williams 已提交
336
	index = linear_page_index(vma, address & ~(size - 1));
337 338 339 340 341
	for_each_mapped_pfn(entry, pfn) {
		struct page *page = pfn_to_page(pfn);

		WARN_ON_ONCE(page->mapping);
		page->mapping = mapping;
D
Dan Williams 已提交
342
		page->index = index + i++;
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
	}
}

static void dax_disassociate_entry(void *entry, struct address_space *mapping,
		bool trunc)
{
	unsigned long pfn;

	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
		return;

	for_each_mapped_pfn(entry, pfn) {
		struct page *page = pfn_to_page(pfn);

		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
		page->mapping = NULL;
D
Dan Williams 已提交
360
		page->index = 0;
361 362 363
	}
}

364 365 366 367 368 369 370 371 372 373 374 375 376
static struct page *dax_busy_page(void *entry)
{
	unsigned long pfn;

	for_each_mapped_pfn(entry, pfn) {
		struct page *page = pfn_to_page(pfn);

		if (page_ref_count(page) > 1)
			return page;
	}
	return NULL;
}

377 378 379 380 381
/*
 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
 * @page: The page whose entry we want to lock
 *
 * Context: Process context.
382 383
 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
 * not be locked.
384
 */
385
dax_entry_t dax_lock_page(struct page *page)
386
{
387 388
	XA_STATE(xas, NULL, 0);
	void *entry;
389

390 391
	/* Ensure page->mapping isn't freed while we look at it */
	rcu_read_lock();
392
	for (;;) {
393
		struct address_space *mapping = READ_ONCE(page->mapping);
394

395
		entry = NULL;
396
		if (!mapping || !dax_mapping(mapping))
397
			break;
398 399 400 401 402 403 404 405

		/*
		 * In the device-dax case there's no need to lock, a
		 * struct dev_pagemap pin is sufficient to keep the
		 * inode alive, and we assume we have dev_pagemap pin
		 * otherwise we would not have a valid pfn_to_page()
		 * translation.
		 */
406
		entry = (void *)~0UL;
407
		if (S_ISCHR(mapping->host->i_mode))
408
			break;
409

410 411
		xas.xa = &mapping->i_pages;
		xas_lock_irq(&xas);
412
		if (mapping != page->mapping) {
413
			xas_unlock_irq(&xas);
414 415
			continue;
		}
416 417 418
		xas_set(&xas, page->index);
		entry = xas_load(&xas);
		if (dax_is_locked(entry)) {
419
			rcu_read_unlock();
M
Matthew Wilcox 已提交
420
			wait_entry_unlocked(&xas, entry);
421
			rcu_read_lock();
422
			continue;
423
		}
424 425
		dax_lock_entry(&xas, entry);
		xas_unlock_irq(&xas);
426
		break;
427
	}
428
	rcu_read_unlock();
429
	return (dax_entry_t)entry;
430 431
}

432
void dax_unlock_page(struct page *page, dax_entry_t cookie)
433 434
{
	struct address_space *mapping = page->mapping;
435
	XA_STATE(xas, &mapping->i_pages, page->index);
436

437
	if (S_ISCHR(mapping->host->i_mode))
438 439
		return;

440
	dax_unlock_entry(&xas, (void *)cookie);
441 442
}

J
Jan Kara 已提交
443
/*
M
Matthew Wilcox 已提交
444 445 446
 * Find page cache entry at given index. If it is a DAX entry, return it
 * with the entry locked. If the page cache doesn't contain an entry at
 * that index, add a locked empty entry.
J
Jan Kara 已提交
447
 *
448
 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
449 450 451
 * either return that locked entry or will return VM_FAULT_FALLBACK.
 * This will happen if there are any PTE entries within the PMD range
 * that we are requesting.
452
 *
453 454 455 456 457 458
 * We always favor PTE entries over PMD entries. There isn't a flow where we
 * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
 * insertion will fail if it finds any PTE entries already in the tree, and a
 * PTE insertion will cause an existing PMD entry to be unmapped and
 * downgraded to PTE entries.  This happens for both PMD zero pages as
 * well as PMD empty entries.
459
 *
460 461 462
 * The exception to this downgrade path is for PMD entries that have
 * real storage backing them.  We will leave these real PMD entries in
 * the tree, and PTE writes will simply dirty the entire PMD entry.
463
 *
J
Jan Kara 已提交
464 465 466
 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 * persistent memory the benefit is doubtful. We can add that later if we can
 * show it helps.
467 468 469 470
 *
 * On error, this function does not return an ERR_PTR.  Instead it returns
 * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
 * overlap with xarray value entries.
J
Jan Kara 已提交
471
 */
472 473
static void *grab_mapping_entry(struct xa_state *xas,
		struct address_space *mapping, unsigned long size_flag)
J
Jan Kara 已提交
474
{
475 476 477
	unsigned long index = xas->xa_index;
	bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
	void *entry;
478

479 480 481
retry:
	xas_lock_irq(xas);
	entry = get_unlocked_entry(xas);
482

483
	if (entry) {
M
Matthew Wilcox 已提交
484
		if (!xa_is_value(entry)) {
485 486 487 488
			xas_set_err(xas, EIO);
			goto out_unlock;
		}

489
		if (size_flag & DAX_PMD) {
490
			if (dax_is_pte_entry(entry)) {
491 492
				put_unlocked_entry(xas, entry);
				goto fallback;
493 494
			}
		} else { /* trying to grab a PTE entry */
495
			if (dax_is_pmd_entry(entry) &&
496 497 498 499 500 501 502
			    (dax_is_zero_entry(entry) ||
			     dax_is_empty_entry(entry))) {
				pmd_downgrade = true;
			}
		}
	}

503 504 505 506 507 508
	if (pmd_downgrade) {
		/*
		 * Make sure 'entry' remains valid while we drop
		 * the i_pages lock.
		 */
		dax_lock_entry(xas, entry);
509 510 511 512 513 514

		/*
		 * Besides huge zero pages the only other thing that gets
		 * downgraded are empty entries which don't need to be
		 * unmapped.
		 */
515 516 517 518 519 520 521
		if (dax_is_zero_entry(entry)) {
			xas_unlock_irq(xas);
			unmap_mapping_pages(mapping,
					xas->xa_index & ~PG_PMD_COLOUR,
					PG_PMD_NR, false);
			xas_reset(xas);
			xas_lock_irq(xas);
522 523
		}

524 525 526 527 528 529 530
		dax_disassociate_entry(entry, mapping, false);
		xas_store(xas, NULL);	/* undo the PMD join */
		dax_wake_entry(xas, entry, true);
		mapping->nrexceptional--;
		entry = NULL;
		xas_set(xas, index);
	}
531

532 533 534 535 536 537 538
	if (entry) {
		dax_lock_entry(xas, entry);
	} else {
		entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
		dax_lock_entry(xas, entry);
		if (xas_error(xas))
			goto out_unlock;
J
Jan Kara 已提交
539 540
		mapping->nrexceptional++;
	}
541 542 543 544 545 546 547 548 549

out_unlock:
	xas_unlock_irq(xas);
	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
		goto retry;
	if (xas->xa_node == XA_ERROR(-ENOMEM))
		return xa_mk_internal(VM_FAULT_OOM);
	if (xas_error(xas))
		return xa_mk_internal(VM_FAULT_SIGBUS);
550
	return entry;
551 552 553
fallback:
	xas_unlock_irq(xas);
	return xa_mk_internal(VM_FAULT_FALLBACK);
J
Jan Kara 已提交
554 555
}

556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
/**
 * dax_layout_busy_page - find first pinned page in @mapping
 * @mapping: address space to scan for a page with ref count > 1
 *
 * DAX requires ZONE_DEVICE mapped pages. These pages are never
 * 'onlined' to the page allocator so they are considered idle when
 * page->count == 1. A filesystem uses this interface to determine if
 * any page in the mapping is busy, i.e. for DMA, or other
 * get_user_pages() usages.
 *
 * It is expected that the filesystem is holding locks to block the
 * establishment of new mappings in this address_space. I.e. it expects
 * to be able to run unmap_mapping_range() and subsequently not race
 * mapping_mapped() becoming true.
 */
struct page *dax_layout_busy_page(struct address_space *mapping)
{
573 574 575
	XA_STATE(xas, &mapping->i_pages, 0);
	void *entry;
	unsigned int scanned = 0;
576 577 578 579 580 581 582 583 584 585 586 587 588
	struct page *page = NULL;

	/*
	 * In the 'limited' case get_user_pages() for dax is disabled.
	 */
	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
		return NULL;

	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
		return NULL;

	/*
	 * If we race get_user_pages_fast() here either we'll see the
589
	 * elevated page count in the iteration and wait, or
590 591 592 593 594 595 596 597 598 599 600
	 * get_user_pages_fast() will see that the page it took a reference
	 * against is no longer mapped in the page tables and bail to the
	 * get_user_pages() slow path.  The slow path is protected by
	 * pte_lock() and pmd_lock(). New references are not taken without
	 * holding those locks, and unmap_mapping_range() will not zero the
	 * pte or pmd without holding the respective lock, so we are
	 * guaranteed to either see new references or prevent new
	 * references from being established.
	 */
	unmap_mapping_range(mapping, 0, 0, 1);

601 602 603 604 605 606 607 608 609
	xas_lock_irq(&xas);
	xas_for_each(&xas, entry, ULONG_MAX) {
		if (WARN_ON_ONCE(!xa_is_value(entry)))
			continue;
		if (unlikely(dax_is_locked(entry)))
			entry = get_unlocked_entry(&xas);
		if (entry)
			page = dax_busy_page(entry);
		put_unlocked_entry(&xas, entry);
610 611
		if (page)
			break;
612 613 614 615 616 617 618
		if (++scanned % XA_CHECK_SCHED)
			continue;

		xas_pause(&xas);
		xas_unlock_irq(&xas);
		cond_resched();
		xas_lock_irq(&xas);
619
	}
620
	xas_unlock_irq(&xas);
621 622 623 624
	return page;
}
EXPORT_SYMBOL_GPL(dax_layout_busy_page);

M
Matthew Wilcox 已提交
625
static int __dax_invalidate_entry(struct address_space *mapping,
626 627
					  pgoff_t index, bool trunc)
{
628
	XA_STATE(xas, &mapping->i_pages, index);
629 630 631
	int ret = 0;
	void *entry;

632 633
	xas_lock_irq(&xas);
	entry = get_unlocked_entry(&xas);
634
	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
635 636
		goto out;
	if (!trunc &&
637 638
	    (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
	     xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
639
		goto out;
640
	dax_disassociate_entry(entry, mapping, trunc);
641
	xas_store(&xas, NULL);
642 643 644
	mapping->nrexceptional--;
	ret = 1;
out:
645 646
	put_unlocked_entry(&xas, entry);
	xas_unlock_irq(&xas);
647 648
	return ret;
}
649

J
Jan Kara 已提交
650
/*
651 652
 * Delete DAX entry at @index from @mapping.  Wait for it
 * to be unlocked before deleting it.
J
Jan Kara 已提交
653 654 655
 */
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
{
M
Matthew Wilcox 已提交
656
	int ret = __dax_invalidate_entry(mapping, index, true);
J
Jan Kara 已提交
657 658 659 660

	/*
	 * This gets called from truncate / punch_hole path. As such, the caller
	 * must hold locks protecting against concurrent modifications of the
M
Matthew Wilcox 已提交
661
	 * page cache (usually fs-private i_mmap_sem for writing). Since the
662
	 * caller has seen a DAX entry for this index, we better find it
J
Jan Kara 已提交
663 664
	 * at that index as well...
	 */
665 666 667 668 669
	WARN_ON_ONCE(!ret);
	return ret;
}

/*
670
 * Invalidate DAX entry if it is clean.
671 672 673 674
 */
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
				      pgoff_t index)
{
M
Matthew Wilcox 已提交
675
	return __dax_invalidate_entry(mapping, index, false);
J
Jan Kara 已提交
676 677
}

678 679 680
static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
		sector_t sector, size_t size, struct page *to,
		unsigned long vaddr)
681
{
682 683 684 685 686 687 688 689 690 691
	void *vto, *kaddr;
	pgoff_t pgoff;
	long rc;
	int id;

	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
	if (rc)
		return rc;

	id = dax_read_lock();
692
	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
693 694 695 696
	if (rc < 0) {
		dax_read_unlock(id);
		return rc;
	}
697
	vto = kmap_atomic(to);
698
	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
699
	kunmap_atomic(vto);
700
	dax_read_unlock(id);
701 702 703
	return 0;
}

704 705 706 707 708 709 710
/*
 * By this point grab_mapping_entry() has ensured that we have a locked entry
 * of the appropriate size so we don't have to worry about downgrading PMDs to
 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 * already in the tree, we will skip the insertion and just dirty the PMD as
 * appropriate.
 */
711 712 713
static void *dax_insert_entry(struct xa_state *xas,
		struct address_space *mapping, struct vm_fault *vmf,
		void *entry, pfn_t pfn, unsigned long flags, bool dirty)
R
Ross Zwisler 已提交
714
{
715
	void *new_entry = dax_make_entry(pfn, flags);
R
Ross Zwisler 已提交
716

717
	if (dirty)
718
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
R
Ross Zwisler 已提交
719

720
	if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
721
		unsigned long index = xas->xa_index;
722 723
		/* we are replacing a zero page with block mapping */
		if (dax_is_pmd_entry(entry))
M
Matthew Wilcox 已提交
724
			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
725
					PG_PMD_NR, false);
726
		else /* pte entry */
727
			unmap_mapping_pages(mapping, index, 1, false);
R
Ross Zwisler 已提交
728 729
	}

730 731
	xas_reset(xas);
	xas_lock_irq(xas);
732 733
	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
		dax_disassociate_entry(entry, mapping, false);
D
Dan Williams 已提交
734
		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
735
	}
736

737
	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
738
		/*
M
Matthew Wilcox 已提交
739
		 * Only swap our new entry into the page cache if the current
740
		 * entry is a zero page or an empty entry.  If a normal PTE or
M
Matthew Wilcox 已提交
741
		 * PMD entry is already in the cache, we leave it alone.  This
742 743 744 745
		 * means that if we are trying to insert a PTE and the
		 * existing entry is a PMD, we will just leave the PMD in the
		 * tree and dirty it if necessary.
		 */
746 747 748
		void *old = dax_lock_entry(xas, new_entry);
		WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
					DAX_LOCKED));
749
		entry = new_entry;
750 751
	} else {
		xas_load(xas);	/* Walk the xa_state */
R
Ross Zwisler 已提交
752
	}
753

754
	if (dirty)
755
		xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
756

757
	xas_unlock_irq(xas);
758
	return entry;
R
Ross Zwisler 已提交
759 760
}

M
Matthew Wilcox 已提交
761 762
static inline
unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
763 764 765 766 767 768 769 770 771
{
	unsigned long address;

	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
	return address;
}

/* Walk all mappings of a given index of a file and writeprotect them */
M
Matthew Wilcox 已提交
772 773
static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
		unsigned long pfn)
774 775
{
	struct vm_area_struct *vma;
776 777
	pte_t pte, *ptep = NULL;
	pmd_t *pmdp = NULL;
778 779 780 781
	spinlock_t *ptl;

	i_mmap_lock_read(mapping);
	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
782 783
		struct mmu_notifier_range range;
		unsigned long address;
784 785 786 787 788 789 790

		cond_resched();

		if (!(vma->vm_flags & VM_SHARED))
			continue;

		address = pgoff_address(index, vma);
791 792 793 794 795 796

		/*
		 * Note because we provide start/end to follow_pte_pmd it will
		 * call mmu_notifier_invalidate_range_start() on our behalf
		 * before taking any lock.
		 */
797 798
		if (follow_pte_pmd(vma->vm_mm, address, &range,
				   &ptep, &pmdp, &ptl))
799 800
			continue;

801 802 803 804 805
		/*
		 * No need to call mmu_notifier_invalidate_range() as we are
		 * downgrading page table protection not changing it to point
		 * to a new page.
		 *
806
		 * See Documentation/vm/mmu_notifier.rst
807
		 */
808 809 810 811 812 813
		if (pmdp) {
#ifdef CONFIG_FS_DAX_PMD
			pmd_t pmd;

			if (pfn != pmd_pfn(*pmdp))
				goto unlock_pmd;
814
			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
815 816 817 818 819 820 821 822 823
				goto unlock_pmd;

			flush_cache_page(vma, address, pfn);
			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
			pmd = pmd_wrprotect(pmd);
			pmd = pmd_mkclean(pmd);
			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
unlock_pmd:
#endif
824
			spin_unlock(ptl);
825 826 827 828 829 830 831 832 833 834 835 836 837 838
		} else {
			if (pfn != pte_pfn(*ptep))
				goto unlock_pte;
			if (!pte_dirty(*ptep) && !pte_write(*ptep))
				goto unlock_pte;

			flush_cache_page(vma, address, pfn);
			pte = ptep_clear_flush(vma, address, ptep);
			pte = pte_wrprotect(pte);
			pte = pte_mkclean(pte);
			set_pte_at(vma->vm_mm, address, ptep, pte);
unlock_pte:
			pte_unmap_unlock(ptep, ptl);
		}
839

840
		mmu_notifier_invalidate_range_end(&range);
841 842 843 844
	}
	i_mmap_unlock_read(mapping);
}

845 846
static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
		struct address_space *mapping, void *entry)
R
Ross Zwisler 已提交
847
{
D
Dan Williams 已提交
848 849
	unsigned long pfn;
	long ret = 0;
850
	size_t size;
R
Ross Zwisler 已提交
851 852

	/*
853 854
	 * A page got tagged dirty in DAX mapping? Something is seriously
	 * wrong.
R
Ross Zwisler 已提交
855
	 */
856
	if (WARN_ON(!xa_is_value(entry)))
857
		return -EIO;
R
Ross Zwisler 已提交
858

859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
	if (unlikely(dax_is_locked(entry))) {
		void *old_entry = entry;

		entry = get_unlocked_entry(xas);

		/* Entry got punched out / reallocated? */
		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
			goto put_unlocked;
		/*
		 * Entry got reallocated elsewhere? No need to writeback.
		 * We have to compare pfns as we must not bail out due to
		 * difference in lockbit or entry type.
		 */
		if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
			goto put_unlocked;
		if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
					dax_is_zero_entry(entry))) {
			ret = -EIO;
			goto put_unlocked;
		}

		/* Another fsync thread may have already done this entry */
		if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
			goto put_unlocked;
R
Ross Zwisler 已提交
883 884
	}

885
	/* Lock the entry to serialize with page faults */
886 887
	dax_lock_entry(xas, entry);

888 889 890 891
	/*
	 * We can clear the tag now but we have to be careful so that concurrent
	 * dax_writeback_one() calls for the same index cannot finish before we
	 * actually flush the caches. This is achieved as the calls will look
M
Matthew Wilcox 已提交
892 893
	 * at the entry only under the i_pages lock and once they do that
	 * they will see the entry locked and wait for it to unlock.
894
	 */
895 896
	xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
	xas_unlock_irq(xas);
897

898 899 900
	/*
	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
	 * in the middle of a PMD, the 'index' we are given will be aligned to
D
Dan Williams 已提交
901 902 903
	 * the start index of the PMD, as will the pfn we pull from 'entry'.
	 * This allows us to flush for PMD_SIZE and not have to worry about
	 * partial PMD writebacks.
904
	 */
M
Matthew Wilcox 已提交
905 906
	pfn = dax_to_pfn(entry);
	size = PAGE_SIZE << dax_entry_order(entry);
907

908
	dax_entry_mkclean(mapping, xas->xa_index, pfn);
D
Dan Williams 已提交
909
	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
910 911 912 913 914 915
	/*
	 * After we have flushed the cache, we can clear the dirty tag. There
	 * cannot be new dirty data in the pfn after the flush has completed as
	 * the pfn mappings are writeprotected and fault waits for mapping
	 * entry lock.
	 */
916 917 918 919 920 921 922 923
	xas_reset(xas);
	xas_lock_irq(xas);
	xas_store(xas, entry);
	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
	dax_wake_entry(xas, entry, false);

	trace_dax_writeback_one(mapping->host, xas->xa_index,
			size >> PAGE_SHIFT);
R
Ross Zwisler 已提交
924 925
	return ret;

926
 put_unlocked:
927
	put_unlocked_entry(xas, entry);
R
Ross Zwisler 已提交
928 929 930 931 932 933 934 935
	return ret;
}

/*
 * Flush the mapping to the persistent domain within the byte range of [start,
 * end]. This is required by data integrity operations to ensure file data is
 * on persistent storage prior to completion of the operation.
 */
936 937
int dax_writeback_mapping_range(struct address_space *mapping,
		struct block_device *bdev, struct writeback_control *wbc)
R
Ross Zwisler 已提交
938
{
939
	XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
R
Ross Zwisler 已提交
940
	struct inode *inode = mapping->host;
941
	pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
942
	struct dax_device *dax_dev;
943 944 945
	void *entry;
	int ret = 0;
	unsigned int scanned = 0;
R
Ross Zwisler 已提交
946 947 948 949

	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
		return -EIO;

950 951 952
	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
		return 0;

953 954 955 956
	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
	if (!dax_dev)
		return -EIO;

957
	trace_dax_writeback_range(inode, xas.xa_index, end_index);
R
Ross Zwisler 已提交
958

959
	tag_pages_for_writeback(mapping, xas.xa_index, end_index);
R
Ross Zwisler 已提交
960

961 962 963 964 965
	xas_lock_irq(&xas);
	xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
		ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
		if (ret < 0) {
			mapping_set_error(mapping, ret);
R
Ross Zwisler 已提交
966 967
			break;
		}
968 969 970 971 972 973 974
		if (++scanned % XA_CHECK_SCHED)
			continue;

		xas_pause(&xas);
		xas_unlock_irq(&xas);
		cond_resched();
		xas_lock_irq(&xas);
R
Ross Zwisler 已提交
975
	}
976
	xas_unlock_irq(&xas);
977
	put_dax(dax_dev);
978 979
	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
	return ret;
R
Ross Zwisler 已提交
980 981 982
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);

983
static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
984
{
985
	return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
986 987
}

988 989
static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
			 pfn_t *pfnp)
990
{
991
	const sector_t sector = dax_iomap_sector(iomap, pos);
992 993
	pgoff_t pgoff;
	int id, rc;
994
	long length;
995

996
	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
997 998 999
	if (rc)
		return rc;
	id = dax_read_lock();
1000
	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1001
				   NULL, pfnp);
1002 1003 1004
	if (length < 0) {
		rc = length;
		goto out;
1005
	}
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
	rc = -EINVAL;
	if (PFN_PHYS(length) < size)
		goto out;
	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
		goto out;
	/* For larger pages we need devmap */
	if (length > 1 && !pfn_t_devmap(*pfnp))
		goto out;
	rc = 0;
out:
1016
	dax_read_unlock(id);
1017
	return rc;
1018 1019
}

R
Ross Zwisler 已提交
1020
/*
1021 1022 1023 1024 1025
 * The user has performed a load from a hole in the file.  Allocating a new
 * page in the file would cause excessive storage usage for workloads with
 * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
 * If this page is ever written to we will re-fault and change the mapping to
 * point to real DAX storage instead.
R
Ross Zwisler 已提交
1026
 */
1027 1028 1029
static vm_fault_t dax_load_hole(struct xa_state *xas,
		struct address_space *mapping, void **entry,
		struct vm_fault *vmf)
R
Ross Zwisler 已提交
1030 1031
{
	struct inode *inode = mapping->host;
1032
	unsigned long vaddr = vmf->address;
1033 1034
	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
	vm_fault_t ret;
R
Ross Zwisler 已提交
1035

1036
	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1037 1038
			DAX_ZERO_PAGE, false);

1039
	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
R
Ross Zwisler 已提交
1040 1041 1042 1043
	trace_dax_load_hole(inode, vmf, ret);
	return ret;
}

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
static bool dax_range_is_aligned(struct block_device *bdev,
				 unsigned int offset, unsigned int length)
{
	unsigned short sector_size = bdev_logical_block_size(bdev);

	if (!IS_ALIGNED(offset, sector_size))
		return false;
	if (!IS_ALIGNED(length, sector_size))
		return false;

	return true;
}

1057 1058 1059
int __dax_zero_page_range(struct block_device *bdev,
		struct dax_device *dax_dev, sector_t sector,
		unsigned int offset, unsigned int size)
1060
{
1061 1062
	if (dax_range_is_aligned(bdev, offset, size)) {
		sector_t start_sector = sector + (offset >> 9);
1063 1064

		return blkdev_issue_zeroout(bdev, start_sector,
1065
				size >> 9, GFP_NOFS, 0);
1066
	} else {
1067 1068 1069 1070
		pgoff_t pgoff;
		long rc, id;
		void *kaddr;

1071
		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1072 1073 1074 1075
		if (rc)
			return rc;

		id = dax_read_lock();
1076
		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1077 1078 1079 1080
		if (rc < 0) {
			dax_read_unlock(id);
			return rc;
		}
1081
		memset(kaddr + offset, 0, size);
1082
		dax_flush(dax_dev, kaddr + offset, size);
1083
		dax_read_unlock(id);
1084
	}
1085 1086 1087 1088
	return 0;
}
EXPORT_SYMBOL_GPL(__dax_zero_page_range);

1089
static loff_t
1090
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1091 1092
		struct iomap *iomap)
{
1093 1094
	struct block_device *bdev = iomap->bdev;
	struct dax_device *dax_dev = iomap->dax_dev;
1095 1096 1097
	struct iov_iter *iter = data;
	loff_t end = pos + length, done = 0;
	ssize_t ret = 0;
1098
	size_t xfer;
1099
	int id;
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112

	if (iov_iter_rw(iter) == READ) {
		end = min(end, i_size_read(inode));
		if (pos >= end)
			return 0;

		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
			return iov_iter_zero(min(length, end - pos), iter);
	}

	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
		return -EIO;

1113 1114 1115 1116 1117
	/*
	 * Write can allocate block for an area which has a hole page mapped
	 * into page tables. We have to tear down these mappings so that data
	 * written by write(2) is visible in mmap.
	 */
1118
	if (iomap->flags & IOMAP_F_NEW) {
1119 1120 1121 1122 1123
		invalidate_inode_pages2_range(inode->i_mapping,
					      pos >> PAGE_SHIFT,
					      (end - 1) >> PAGE_SHIFT);
	}

1124
	id = dax_read_lock();
1125 1126
	while (pos < end) {
		unsigned offset = pos & (PAGE_SIZE - 1);
1127 1128
		const size_t size = ALIGN(length + offset, PAGE_SIZE);
		const sector_t sector = dax_iomap_sector(iomap, pos);
1129
		ssize_t map_len;
1130 1131
		pgoff_t pgoff;
		void *kaddr;
1132

1133 1134 1135 1136 1137
		if (fatal_signal_pending(current)) {
			ret = -EINTR;
			break;
		}

1138 1139 1140 1141 1142
		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
		if (ret)
			break;

		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1143
				&kaddr, NULL);
1144 1145 1146 1147 1148
		if (map_len < 0) {
			ret = map_len;
			break;
		}

1149 1150
		map_len = PFN_PHYS(map_len);
		kaddr += offset;
1151 1152 1153 1154
		map_len -= offset;
		if (map_len > end - pos)
			map_len = end - pos;

1155 1156 1157 1158 1159
		/*
		 * The userspace address for the memory copy has already been
		 * validated via access_ok() in either vfs_read() or
		 * vfs_write(), depending on which operation we are doing.
		 */
1160
		if (iov_iter_rw(iter) == WRITE)
1161
			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1162
					map_len, iter);
1163
		else
1164
			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1165
					map_len, iter);
1166

1167 1168 1169 1170 1171 1172 1173 1174
		pos += xfer;
		length -= xfer;
		done += xfer;

		if (xfer == 0)
			ret = -EFAULT;
		if (xfer < map_len)
			break;
1175
	}
1176
	dax_read_unlock(id);
1177 1178 1179 1180 1181

	return done ? done : ret;
}

/**
1182
 * dax_iomap_rw - Perform I/O to a DAX file
1183 1184 1185 1186 1187 1188 1189 1190 1191
 * @iocb:	The control block for this I/O
 * @iter:	The addresses to do I/O from or to
 * @ops:	iomap ops passed from the file system
 *
 * This function performs read and write operations to directly mapped
 * persistent memory.  The callers needs to take care of read/write exclusion
 * and evicting any page cache pages in the region under I/O.
 */
ssize_t
1192
dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1193
		const struct iomap_ops *ops)
1194 1195 1196 1197 1198 1199
{
	struct address_space *mapping = iocb->ki_filp->f_mapping;
	struct inode *inode = mapping->host;
	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
	unsigned flags = 0;

1200 1201
	if (iov_iter_rw(iter) == WRITE) {
		lockdep_assert_held_exclusive(&inode->i_rwsem);
1202
		flags |= IOMAP_WRITE;
1203 1204 1205
	} else {
		lockdep_assert_held(&inode->i_rwsem);
	}
1206 1207 1208

	while (iov_iter_count(iter)) {
		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1209
				iter, dax_iomap_actor);
1210 1211 1212 1213 1214 1215 1216 1217 1218
		if (ret <= 0)
			break;
		pos += ret;
		done += ret;
	}

	iocb->ki_pos += done;
	return done ? done : ret;
}
1219
EXPORT_SYMBOL_GPL(dax_iomap_rw);
1220

1221
static vm_fault_t dax_fault_return(int error)
1222 1223 1224 1225 1226 1227 1228 1229
{
	if (error == 0)
		return VM_FAULT_NOPAGE;
	if (error == -ENOMEM)
		return VM_FAULT_OOM;
	return VM_FAULT_SIGBUS;
}

1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
/*
 * MAP_SYNC on a dax mapping guarantees dirty metadata is
 * flushed on write-faults (non-cow), but not read-faults.
 */
static bool dax_fault_is_synchronous(unsigned long flags,
		struct vm_area_struct *vma, struct iomap *iomap)
{
	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
		&& (iomap->flags & IOMAP_F_DIRTY);
}

1241
static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1242
			       int *iomap_errp, const struct iomap_ops *ops)
1243
{
1244 1245
	struct vm_area_struct *vma = vmf->vma;
	struct address_space *mapping = vma->vm_file->f_mapping;
1246
	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1247
	struct inode *inode = mapping->host;
1248
	unsigned long vaddr = vmf->address;
1249 1250
	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
	struct iomap iomap = { 0 };
J
Jan Kara 已提交
1251
	unsigned flags = IOMAP_FAULT;
1252
	int error, major = 0;
1253
	bool write = vmf->flags & FAULT_FLAG_WRITE;
1254
	bool sync;
1255
	vm_fault_t ret = 0;
1256
	void *entry;
1257
	pfn_t pfn;
1258

1259
	trace_dax_pte_fault(inode, vmf, ret);
1260 1261 1262 1263 1264
	/*
	 * Check whether offset isn't beyond end of file now. Caller is supposed
	 * to hold locks serializing us with truncate / punch hole so this is
	 * a reliable test.
	 */
1265
	if (pos >= i_size_read(inode)) {
1266
		ret = VM_FAULT_SIGBUS;
1267 1268
		goto out;
	}
1269

1270
	if (write && !vmf->cow_page)
1271 1272
		flags |= IOMAP_WRITE;

1273 1274 1275
	entry = grab_mapping_entry(&xas, mapping, 0);
	if (xa_is_internal(entry)) {
		ret = xa_to_internal(entry);
1276 1277 1278
		goto out;
	}

1279 1280 1281 1282 1283 1284 1285
	/*
	 * It is possible, particularly with mixed reads & writes to private
	 * mappings, that we have raced with a PMD fault that overlaps with
	 * the PTE we need to set up.  If so just return and the fault will be
	 * retried.
	 */
	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1286
		ret = VM_FAULT_NOPAGE;
1287 1288 1289
		goto unlock_entry;
	}

1290 1291 1292 1293 1294 1295
	/*
	 * Note that we don't bother to use iomap_apply here: DAX required
	 * the file system block size to be equal the page size, which means
	 * that we never have to deal with more than a single extent here.
	 */
	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1296 1297
	if (iomap_errp)
		*iomap_errp = error;
1298
	if (error) {
1299
		ret = dax_fault_return(error);
1300
		goto unlock_entry;
1301
	}
1302
	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1303 1304
		error = -EIO;	/* fs corruption? */
		goto error_finish_iomap;
1305 1306 1307
	}

	if (vmf->cow_page) {
1308 1309
		sector_t sector = dax_iomap_sector(&iomap, pos);

1310 1311 1312 1313 1314 1315
		switch (iomap.type) {
		case IOMAP_HOLE:
		case IOMAP_UNWRITTEN:
			clear_user_highpage(vmf->cow_page, vaddr);
			break;
		case IOMAP_MAPPED:
1316 1317
			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1318 1319 1320 1321 1322 1323 1324 1325
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EIO;
			break;
		}

		if (error)
1326
			goto error_finish_iomap;
1327 1328

		__SetPageUptodate(vmf->cow_page);
1329 1330 1331
		ret = finish_fault(vmf);
		if (!ret)
			ret = VM_FAULT_DONE_COW;
1332
		goto finish_iomap;
1333 1334
	}

1335
	sync = dax_fault_is_synchronous(flags, vma, &iomap);
1336

1337 1338 1339 1340
	switch (iomap.type) {
	case IOMAP_MAPPED:
		if (iomap.flags & IOMAP_F_NEW) {
			count_vm_event(PGMAJFAULT);
1341
			count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1342 1343
			major = VM_FAULT_MAJOR;
		}
1344 1345 1346 1347
		error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
		if (error < 0)
			goto error_finish_iomap;

1348
		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1349
						 0, write && !sync);
1350

1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
		/*
		 * If we are doing synchronous page fault and inode needs fsync,
		 * we can insert PTE into page tables only after that happens.
		 * Skip insertion for now and return the pfn so that caller can
		 * insert it after fsync is done.
		 */
		if (sync) {
			if (WARN_ON_ONCE(!pfnp)) {
				error = -EIO;
				goto error_finish_iomap;
			}
			*pfnp = pfn;
1363
			ret = VM_FAULT_NEEDDSYNC | major;
1364 1365
			goto finish_iomap;
		}
1366 1367
		trace_dax_insert_mapping(inode, vmf, entry);
		if (write)
1368
			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1369
		else
1370
			ret = vmf_insert_mixed(vma, vaddr, pfn);
1371

1372
		goto finish_iomap;
1373 1374
	case IOMAP_UNWRITTEN:
	case IOMAP_HOLE:
1375
		if (!write) {
1376
			ret = dax_load_hole(&xas, mapping, &entry, vmf);
1377
			goto finish_iomap;
1378
		}
1379 1380 1381 1382 1383 1384 1385
		/*FALLTHRU*/
	default:
		WARN_ON_ONCE(1);
		error = -EIO;
		break;
	}

1386
 error_finish_iomap:
1387
	ret = dax_fault_return(error);
1388 1389 1390 1391
 finish_iomap:
	if (ops->iomap_end) {
		int copied = PAGE_SIZE;

1392
		if (ret & VM_FAULT_ERROR)
1393 1394 1395 1396 1397 1398 1399 1400
			copied = 0;
		/*
		 * The fault is done by now and there's no way back (other
		 * thread may be already happily using PTE we have installed).
		 * Just ignore error from ->iomap_end since we cannot do much
		 * with it.
		 */
		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1401
	}
1402
 unlock_entry:
1403
	dax_unlock_entry(&xas, entry);
1404
 out:
1405 1406
	trace_dax_pte_fault_done(inode, vmf, ret);
	return ret | major;
1407
}
1408 1409

#ifdef CONFIG_FS_DAX_PMD
1410 1411
static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
		struct iomap *iomap, void **entry)
1412
{
1413 1414
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
	unsigned long pmd_addr = vmf->address & PMD_MASK;
1415
	struct inode *inode = mapping->host;
1416 1417 1418
	struct page *zero_page;
	spinlock_t *ptl;
	pmd_t pmd_entry;
D
Dan Williams 已提交
1419
	pfn_t pfn;
1420

1421
	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1422 1423

	if (unlikely(!zero_page))
1424
		goto fallback;
1425

D
Dan Williams 已提交
1426
	pfn = page_to_pfn_t(zero_page);
1427
	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1428
			DAX_PMD | DAX_ZERO_PAGE, false);
1429

1430 1431
	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
	if (!pmd_none(*(vmf->pmd))) {
1432
		spin_unlock(ptl);
1433
		goto fallback;
1434 1435
	}

1436
	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1437
	pmd_entry = pmd_mkhuge(pmd_entry);
1438
	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1439
	spin_unlock(ptl);
1440
	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1441
	return VM_FAULT_NOPAGE;
1442 1443

fallback:
1444
	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1445
	return VM_FAULT_FALLBACK;
1446 1447
}

1448
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1449
			       const struct iomap_ops *ops)
1450
{
1451
	struct vm_area_struct *vma = vmf->vma;
1452
	struct address_space *mapping = vma->vm_file->f_mapping;
1453
	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1454 1455
	unsigned long pmd_addr = vmf->address & PMD_MASK;
	bool write = vmf->flags & FAULT_FLAG_WRITE;
1456
	bool sync;
J
Jan Kara 已提交
1457
	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1458
	struct inode *inode = mapping->host;
1459
	vm_fault_t result = VM_FAULT_FALLBACK;
1460
	struct iomap iomap = { 0 };
1461
	pgoff_t max_pgoff;
1462 1463 1464
	void *entry;
	loff_t pos;
	int error;
1465
	pfn_t pfn;
1466

1467 1468 1469 1470 1471
	/*
	 * Check whether offset isn't beyond end of file now. Caller is
	 * supposed to hold locks serializing us with truncate / punch hole so
	 * this is a reliable test.
	 */
1472
	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1473

1474
	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1475

1476 1477 1478 1479
	/*
	 * Make sure that the faulting address's PMD offset (color) matches
	 * the PMD offset from the start of the file.  This is necessary so
	 * that a PMD range in the page table overlaps exactly with a PMD
M
Matthew Wilcox 已提交
1480
	 * range in the page cache.
1481 1482 1483 1484 1485
	 */
	if ((vmf->pgoff & PG_PMD_COLOUR) !=
	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
		goto fallback;

1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
	/* Fall back to PTEs if we're going to COW */
	if (write && !(vma->vm_flags & VM_SHARED))
		goto fallback;

	/* If the PMD would extend outside the VMA */
	if (pmd_addr < vma->vm_start)
		goto fallback;
	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
		goto fallback;

1496
	if (xas.xa_index >= max_pgoff) {
1497 1498 1499
		result = VM_FAULT_SIGBUS;
		goto out;
	}
1500 1501

	/* If the PMD would extend beyond the file size */
1502
	if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
1503 1504
		goto fallback;

1505
	/*
1506 1507 1508 1509
	 * grab_mapping_entry() will make sure we get an empty PMD entry,
	 * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
	 * entry is already in the array, for instance), it will return
	 * VM_FAULT_FALLBACK.
1510
	 */
1511 1512 1513
	entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
	if (xa_is_internal(entry)) {
		result = xa_to_internal(entry);
1514
		goto fallback;
1515
	}
1516

1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
	/*
	 * It is possible, particularly with mixed reads & writes to private
	 * mappings, that we have raced with a PTE fault that overlaps with
	 * the PMD we need to set up.  If so just return and the fault will be
	 * retried.
	 */
	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
			!pmd_devmap(*vmf->pmd)) {
		result = 0;
		goto unlock_entry;
	}

1529 1530 1531 1532 1533
	/*
	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
	 * setting up a mapping, so really we're using iomap_begin() as a way
	 * to look up our filesystem block.
	 */
1534
	pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1535 1536
	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
	if (error)
1537
		goto unlock_entry;
1538

1539 1540 1541
	if (iomap.offset + iomap.length < pos + PMD_SIZE)
		goto finish_iomap;

1542
	sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1543

1544 1545
	switch (iomap.type) {
	case IOMAP_MAPPED:
1546 1547 1548 1549
		error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
		if (error < 0)
			goto finish_iomap;

1550
		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1551
						DAX_PMD, write && !sync);
1552

1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
		/*
		 * If we are doing synchronous page fault and inode needs fsync,
		 * we can insert PMD into page tables only after that happens.
		 * Skip insertion for now and return the pfn so that caller can
		 * insert it after fsync is done.
		 */
		if (sync) {
			if (WARN_ON_ONCE(!pfnp))
				goto finish_iomap;
			*pfnp = pfn;
			result = VM_FAULT_NEEDDSYNC;
			goto finish_iomap;
		}

1567 1568 1569
		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
		result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
					    write);
1570 1571 1572 1573
		break;
	case IOMAP_UNWRITTEN:
	case IOMAP_HOLE:
		if (WARN_ON_ONCE(write))
1574
			break;
1575
		result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
1576 1577 1578 1579 1580 1581 1582 1583
		break;
	default:
		WARN_ON_ONCE(1);
		break;
	}

 finish_iomap:
	if (ops->iomap_end) {
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
		int copied = PMD_SIZE;

		if (result == VM_FAULT_FALLBACK)
			copied = 0;
		/*
		 * The fault is done by now and there's no way back (other
		 * thread may be already happily using PMD we have installed).
		 * Just ignore error from ->iomap_end since we cannot do much
		 * with it.
		 */
		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
				&iomap);
1596
	}
1597
 unlock_entry:
1598
	dax_unlock_entry(&xas, entry);
1599 1600
 fallback:
	if (result == VM_FAULT_FALLBACK) {
1601
		split_huge_pmd(vma, vmf->pmd, vmf->address);
1602 1603
		count_vm_event(THP_FAULT_FALLBACK);
	}
1604
out:
1605
	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1606 1607
	return result;
}
1608
#else
1609
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1610
			       const struct iomap_ops *ops)
1611 1612 1613
{
	return VM_FAULT_FALLBACK;
}
1614
#endif /* CONFIG_FS_DAX_PMD */
1615 1616 1617 1618

/**
 * dax_iomap_fault - handle a page fault on a DAX file
 * @vmf: The description of the fault
1619
 * @pe_size: Size of the page to fault in
1620
 * @pfnp: PFN to insert for synchronous faults if fsync is required
1621
 * @iomap_errp: Storage for detailed error code in case of error
1622
 * @ops: Iomap ops passed from the file system
1623 1624 1625 1626 1627 1628
 *
 * When a page fault occurs, filesystems may call this helper in
 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
 * has done all the necessary locking for page fault to proceed
 * successfully.
 */
1629
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1630
		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1631
{
1632 1633
	switch (pe_size) {
	case PE_SIZE_PTE:
1634
		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1635
	case PE_SIZE_PMD:
1636
		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1637 1638 1639 1640 1641
	default:
		return VM_FAULT_FALLBACK;
	}
}
EXPORT_SYMBOL_GPL(dax_iomap_fault);
J
Jan Kara 已提交
1642

M
Matthew Wilcox 已提交
1643
/*
J
Jan Kara 已提交
1644 1645 1646
 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
 * @vmf: The description of the fault
 * @pfn: PFN to insert
1647
 * @order: Order of entry to insert.
J
Jan Kara 已提交
1648
 *
M
Matthew Wilcox 已提交
1649 1650
 * This function inserts a writeable PTE or PMD entry into the page tables
 * for an mmaped DAX file.  It also marks the page cache entry as dirty.
J
Jan Kara 已提交
1651
 */
1652 1653
static vm_fault_t
dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
J
Jan Kara 已提交
1654 1655
{
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1656 1657
	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
	void *entry;
1658
	vm_fault_t ret;
J
Jan Kara 已提交
1659

1660 1661
	xas_lock_irq(&xas);
	entry = get_unlocked_entry(&xas);
J
Jan Kara 已提交
1662 1663
	/* Did we race with someone splitting entry or so? */
	if (!entry ||
1664
	    (order == 0 && !dax_is_pte_entry(entry)) ||
M
Matthew Wilcox 已提交
1665
	    (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
1666 1667
		put_unlocked_entry(&xas, entry);
		xas_unlock_irq(&xas);
J
Jan Kara 已提交
1668 1669 1670 1671
		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
						      VM_FAULT_NOPAGE);
		return VM_FAULT_NOPAGE;
	}
1672 1673 1674 1675
	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
	dax_lock_entry(&xas, entry);
	xas_unlock_irq(&xas);
	if (order == 0)
1676
		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
J
Jan Kara 已提交
1677
#ifdef CONFIG_FS_DAX_PMD
1678
	else if (order == PMD_ORDER)
1679
		ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
J
Jan Kara 已提交
1680 1681
			pfn, true);
#endif
1682
	else
1683
		ret = VM_FAULT_FALLBACK;
1684
	dax_unlock_entry(&xas, entry);
1685 1686
	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
	return ret;
J
Jan Kara 已提交
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
}

/**
 * dax_finish_sync_fault - finish synchronous page fault
 * @vmf: The description of the fault
 * @pe_size: Size of entry to be inserted
 * @pfn: PFN to insert
 *
 * This function ensures that the file range touched by the page fault is
 * stored persistently on the media and handles inserting of appropriate page
 * table entry.
 */
1699 1700
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
		enum page_entry_size pe_size, pfn_t pfn)
J
Jan Kara 已提交
1701 1702 1703
{
	int err;
	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1704 1705
	unsigned int order = pe_order(pe_size);
	size_t len = PAGE_SIZE << order;
J
Jan Kara 已提交
1706 1707 1708 1709

	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
	if (err)
		return VM_FAULT_SIGBUS;
1710
	return dax_insert_pfn_mkwrite(vmf, pfn, order);
J
Jan Kara 已提交
1711 1712
}
EXPORT_SYMBOL_GPL(dax_finish_sync_fault);