dax.c 24.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * fs/dax.c - Direct Access filesystem code
 * Copyright (c) 2013-2014 Intel Corporation
 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <linux/atomic.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
20
#include <linux/dax.h>
21 22
#include <linux/fs.h>
#include <linux/genhd.h>
23 24 25
#include <linux/highmem.h>
#include <linux/memcontrol.h>
#include <linux/mm.h>
26
#include <linux/mutex.h>
27
#include <linux/pmem.h>
28
#include <linux/sched.h>
29
#include <linux/uio.h>
30
#include <linux/vmstat.h>
D
Dan Williams 已提交
31
#include <linux/pfn_t.h>
32
#include <linux/sizes.h>
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
{
	struct request_queue *q = bdev->bd_queue;
	long rc = -EIO;

	dax->addr = (void __pmem *) ERR_PTR(-EIO);
	if (blk_queue_enter(q, true) != 0)
		return rc;

	rc = bdev_direct_access(bdev, dax);
	if (rc < 0) {
		dax->addr = (void __pmem *) ERR_PTR(rc);
		blk_queue_exit(q);
		return rc;
	}
	return rc;
}

static void dax_unmap_atomic(struct block_device *bdev,
		const struct blk_dax_ctl *dax)
{
	if (IS_ERR(dax->addr))
		return;
	blk_queue_exit(bdev->bd_queue);
}

60 61 62 63 64
/*
 * dax_clear_blocks() is called from within transaction context from XFS,
 * and hence this means the stack from this point must follow GFP_NOFS
 * semantics for all operations.
 */
65
int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
66 67
{
	struct block_device *bdev = inode->i_sb->s_bdev;
68 69 70 71
	struct blk_dax_ctl dax = {
		.sector = block << (inode->i_blkbits - 9),
		.size = _size,
	};
72 73 74

	might_sleep();
	do {
75
		long count, sz;
76

77
		count = dax_map_atomic(bdev, &dax);
78 79
		if (count < 0)
			return count;
80
		sz = min_t(long, count, SZ_128K);
81 82 83 84
		clear_pmem(dax.addr, sz);
		dax.size -= sz;
		dax.sector += sz / 512;
		dax_unmap_atomic(bdev, &dax);
85
		cond_resched();
86
	} while (dax.size);
87

88
	wmb_pmem();
89 90 91 92
	return 0;
}
EXPORT_SYMBOL_GPL(dax_clear_blocks);

93
/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
94 95
static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
		loff_t pos, loff_t end)
96 97 98 99
{
	loff_t final = end - pos + first; /* The final byte of the buffer */

	if (first > 0)
100
		clear_pmem(addr, first);
101
	if (final < size)
102
		clear_pmem(addr + final, size - final);
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
}

static bool buffer_written(struct buffer_head *bh)
{
	return buffer_mapped(bh) && !buffer_unwritten(bh);
}

/*
 * When ext4 encounters a hole, it returns without modifying the buffer_head
 * which means that we can't trust b_size.  To cope with this, we set b_state
 * to 0 before calling get_block and, if any bit is set, we know we can trust
 * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
 * and would save us time calling get_block repeatedly.
 */
static bool buffer_size_valid(struct buffer_head *bh)
{
	return bh->b_state != 0;
}

122 123 124 125 126 127 128 129 130

static sector_t to_sector(const struct buffer_head *bh,
		const struct inode *inode)
{
	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);

	return sector;
}

O
Omar Sandoval 已提交
131 132 133
static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
		      loff_t start, loff_t end, get_block_t get_block,
		      struct buffer_head *bh)
134
{
135 136 137 138 139 140 141 142 143 144
	loff_t pos = start, max = start, bh_max = start;
	bool hole = false, need_wmb = false;
	struct block_device *bdev = NULL;
	int rw = iov_iter_rw(iter), rc;
	long map_len = 0;
	struct blk_dax_ctl dax = {
		.addr = (void __pmem *) ERR_PTR(-EIO),
	};

	if (rw == READ)
145 146 147
		end = min(end, i_size_read(inode));

	while (pos < end) {
148
		size_t len;
149 150
		if (pos == max) {
			unsigned blkbits = inode->i_blkbits;
151 152
			long page = pos >> PAGE_SHIFT;
			sector_t block = page << (PAGE_SHIFT - blkbits);
153 154 155 156 157 158
			unsigned first = pos - (block << blkbits);
			long size;

			if (pos == bh_max) {
				bh->b_size = PAGE_ALIGN(end - pos);
				bh->b_state = 0;
159 160
				rc = get_block(inode, block, bh, rw == WRITE);
				if (rc)
161 162 163 164
					break;
				if (!buffer_size_valid(bh))
					bh->b_size = 1 << blkbits;
				bh_max = pos - first + bh->b_size;
165
				bdev = bh->b_bdev;
166 167 168 169 170 171 172
			} else {
				unsigned done = bh->b_size -
						(bh_max - (pos - first));
				bh->b_blocknr += done >> blkbits;
				bh->b_size -= done;
			}

173
			hole = rw == READ && !buffer_written(bh);
174 175 176
			if (hole) {
				size = bh->b_size - first;
			} else {
177 178 179 180 181 182
				dax_unmap_atomic(bdev, &dax);
				dax.sector = to_sector(bh, inode);
				dax.size = bh->b_size;
				map_len = dax_map_atomic(bdev, &dax);
				if (map_len < 0) {
					rc = map_len;
183
					break;
184
				}
185
				if (buffer_unwritten(bh) || buffer_new(bh)) {
186 187
					dax_new_buf(dax.addr, map_len, first,
							pos, end);
188 189
					need_wmb = true;
				}
190 191
				dax.addr += first;
				size = map_len - first;
192 193 194 195
			}
			max = min(pos + size, end);
		}

196
		if (iov_iter_rw(iter) == WRITE) {
197
			len = copy_from_iter_pmem(dax.addr, max - pos, iter);
198 199
			need_wmb = true;
		} else if (!hole)
200
			len = copy_to_iter((void __force *) dax.addr, max - pos,
201
					iter);
202 203 204
		else
			len = iov_iter_zero(max - pos, iter);

205
		if (!len) {
206
			rc = -EFAULT;
207
			break;
208
		}
209 210

		pos += len;
211 212
		if (!IS_ERR(dax.addr))
			dax.addr += len;
213 214
	}

215 216
	if (need_wmb)
		wmb_pmem();
217
	dax_unmap_atomic(bdev, &dax);
218

219
	return (pos == start) ? rc : pos - start;
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
}

/**
 * dax_do_io - Perform I/O to a DAX file
 * @iocb: The control block for this I/O
 * @inode: The file which the I/O is directed at
 * @iter: The addresses to do I/O from or to
 * @pos: The file offset where the I/O starts
 * @get_block: The filesystem method used to translate file offsets to blocks
 * @end_io: A filesystem callback for I/O completion
 * @flags: See below
 *
 * This function uses the same locking scheme as do_blockdev_direct_IO:
 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
 * caller for writes.  For reads, we take and release the i_mutex ourselves.
 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
 * is in progress.
 */
O
Omar Sandoval 已提交
239 240 241
ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
		  struct iov_iter *iter, loff_t pos, get_block_t get_block,
		  dio_iodone_t end_io, int flags)
242 243 244 245 246 247 248
{
	struct buffer_head bh;
	ssize_t retval = -EINVAL;
	loff_t end = pos + iov_iter_count(iter);

	memset(&bh, 0, sizeof(bh));

O
Omar Sandoval 已提交
249
	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
250 251 252 253 254 255 256 257 258 259
		struct address_space *mapping = inode->i_mapping;
		mutex_lock(&inode->i_mutex);
		retval = filemap_write_and_wait_range(mapping, pos, end - 1);
		if (retval) {
			mutex_unlock(&inode->i_mutex);
			goto out;
		}
	}

	/* Protects against truncate */
260 261
	if (!(flags & DIO_SKIP_DIO_COUNT))
		inode_dio_begin(inode);
262

O
Omar Sandoval 已提交
263
	retval = dax_io(inode, iter, pos, end, get_block, &bh);
264

O
Omar Sandoval 已提交
265
	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
266 267 268 269 270
		mutex_unlock(&inode->i_mutex);

	if ((retval > 0) && end_io)
		end_io(iocb, pos, retval, bh.b_private);

271 272
	if (!(flags & DIO_SKIP_DIO_COUNT))
		inode_dio_end(inode);
273 274 275 276
 out:
	return retval;
}
EXPORT_SYMBOL_GPL(dax_do_io);
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307

/*
 * The user has performed a load from a hole in the file.  Allocating
 * a new page in the file would cause excessive storage usage for
 * workloads with sparse files.  We allocate a page cache page instead.
 * We'll kick it out of the page cache if it's ever written to,
 * otherwise it will simply fall out of the page cache under memory
 * pressure without ever having been dirtied.
 */
static int dax_load_hole(struct address_space *mapping, struct page *page,
							struct vm_fault *vmf)
{
	unsigned long size;
	struct inode *inode = mapping->host;
	if (!page)
		page = find_or_create_page(mapping, vmf->pgoff,
						GFP_KERNEL | __GFP_ZERO);
	if (!page)
		return VM_FAULT_OOM;
	/* Recheck i_size under page lock to avoid truncate race */
	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (vmf->pgoff >= size) {
		unlock_page(page);
		page_cache_release(page);
		return VM_FAULT_SIGBUS;
	}

	vmf->page = page;
	return VM_FAULT_LOCKED;
}

308 309
static int copy_user_bh(struct page *to, struct inode *inode,
		struct buffer_head *bh, unsigned long vaddr)
310
{
311 312 313 314 315
	struct blk_dax_ctl dax = {
		.sector = to_sector(bh, inode),
		.size = bh->b_size,
	};
	struct block_device *bdev = bh->b_bdev;
316 317
	void *vto;

318 319
	if (dax_map_atomic(bdev, &dax) < 0)
		return PTR_ERR(dax.addr);
320
	vto = kmap_atomic(to);
321
	copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
322
	kunmap_atomic(vto);
323
	dax_unmap_atomic(bdev, &dax);
324 325 326 327 328 329 330
	return 0;
}

static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
			struct vm_area_struct *vma, struct vm_fault *vmf)
{
	unsigned long vaddr = (unsigned long)vmf->virtual_address;
331 332 333 334 335 336
	struct address_space *mapping = inode->i_mapping;
	struct block_device *bdev = bh->b_bdev;
	struct blk_dax_ctl dax = {
		.sector = to_sector(bh, inode),
		.size = bh->b_size,
	};
337 338 339
	pgoff_t size;
	int error;

R
Ross Zwisler 已提交
340 341
	i_mmap_lock_read(mapping);

342 343 344 345 346 347 348 349 350 351 352 353 354
	/*
	 * Check truncate didn't happen while we were allocating a block.
	 * If it did, this block may or may not be still allocated to the
	 * file.  We can't tell the filesystem to free it because we can't
	 * take i_mutex here.  In the worst case, the file still has blocks
	 * allocated past the end of the file.
	 */
	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (unlikely(vmf->pgoff >= size)) {
		error = -EIO;
		goto out;
	}

355 356
	if (dax_map_atomic(bdev, &dax) < 0) {
		error = PTR_ERR(dax.addr);
357 358 359
		goto out;
	}

360
	if (buffer_unwritten(bh) || buffer_new(bh)) {
361
		clear_pmem(dax.addr, PAGE_SIZE);
362 363
		wmb_pmem();
	}
364
	dax_unmap_atomic(bdev, &dax);
365

366
	error = vm_insert_mixed(vma, vaddr, dax.pfn);
367 368

 out:
R
Ross Zwisler 已提交
369 370
	i_mmap_unlock_read(mapping);

371 372 373
	return error;
}

374 375 376 377 378
/**
 * __dax_fault - handle a page fault on a DAX file
 * @vma: The virtual memory area where the fault occurred
 * @vmf: The description of the fault
 * @get_block: The filesystem method used to translate file offsets to blocks
379 380 381 382 383 384
 * @complete_unwritten: The filesystem method used to convert unwritten blocks
 *	to written so the data written to them is exposed. This is required for
 *	required by write faults for filesystems that will return unwritten
 *	extent mappings from @get_block, but it is optional for reads as
 *	dax_insert_mapping() will always zero unwritten blocks. If the fs does
 *	not support unwritten extents, the it should pass NULL.
385 386 387 388 389 390
 *
 * When a page fault occurs, filesystems may call this helper in their
 * fault handler for DAX files. __dax_fault() assumes the caller has done all
 * the necessary locking for the page fault to proceed successfully.
 */
int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
391
			get_block_t get_block, dax_iodone_t complete_unwritten)
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
{
	struct file *file = vma->vm_file;
	struct address_space *mapping = file->f_mapping;
	struct inode *inode = mapping->host;
	struct page *page;
	struct buffer_head bh;
	unsigned long vaddr = (unsigned long)vmf->virtual_address;
	unsigned blkbits = inode->i_blkbits;
	sector_t block;
	pgoff_t size;
	int error;
	int major = 0;

	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (vmf->pgoff >= size)
		return VM_FAULT_SIGBUS;

	memset(&bh, 0, sizeof(bh));
	block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
	bh.b_size = PAGE_SIZE;

 repeat:
	page = find_get_page(mapping, vmf->pgoff);
	if (page) {
		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
			page_cache_release(page);
			return VM_FAULT_RETRY;
		}
		if (unlikely(page->mapping != mapping)) {
			unlock_page(page);
			page_cache_release(page);
			goto repeat;
		}
		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
		if (unlikely(vmf->pgoff >= size)) {
			/*
			 * We have a struct page covering a hole in the file
			 * from a read fault and we've raced with a truncate
			 */
			error = -EIO;
R
Ross Zwisler 已提交
432
			goto unlock_page;
433 434 435 436 437 438 439
		}
	}

	error = get_block(inode, block, &bh, 0);
	if (!error && (bh.b_size < PAGE_SIZE))
		error = -EIO;		/* fs corruption? */
	if (error)
R
Ross Zwisler 已提交
440
		goto unlock_page;
441 442 443 444 445 446 447 448 449 450

	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
		if (vmf->flags & FAULT_FLAG_WRITE) {
			error = get_block(inode, block, &bh, 1);
			count_vm_event(PGMAJFAULT);
			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
			major = VM_FAULT_MAJOR;
			if (!error && (bh.b_size < PAGE_SIZE))
				error = -EIO;
			if (error)
R
Ross Zwisler 已提交
451
				goto unlock_page;
452 453 454 455 456 457 458 459
		} else {
			return dax_load_hole(mapping, page, vmf);
		}
	}

	if (vmf->cow_page) {
		struct page *new_page = vmf->cow_page;
		if (buffer_written(&bh))
460
			error = copy_user_bh(new_page, inode, &bh, vaddr);
461 462 463
		else
			clear_user_highpage(new_page, vaddr);
		if (error)
R
Ross Zwisler 已提交
464
			goto unlock_page;
465 466
		vmf->page = page;
		if (!page) {
R
Ross Zwisler 已提交
467
			i_mmap_lock_read(mapping);
468 469 470 471
			/* Check we didn't race with truncate */
			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
								PAGE_SHIFT;
			if (vmf->pgoff >= size) {
R
Ross Zwisler 已提交
472
				i_mmap_unlock_read(mapping);
473
				error = -EIO;
R
Ross Zwisler 已提交
474
				goto out;
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
			}
		}
		return VM_FAULT_LOCKED;
	}

	/* Check we didn't race with a read fault installing a new page */
	if (!page && major)
		page = find_lock_page(mapping, vmf->pgoff);

	if (page) {
		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
							PAGE_CACHE_SIZE, 0);
		delete_from_page_cache(page);
		unlock_page(page);
		page_cache_release(page);
	}

492 493 494 495 496 497 498 499 500 501
	/*
	 * If we successfully insert the new mapping over an unwritten extent,
	 * we need to ensure we convert the unwritten extent. If there is an
	 * error inserting the mapping, the filesystem needs to leave it as
	 * unwritten to prevent exposure of the stale underlying data to
	 * userspace, but we still need to call the completion function so
	 * the private resources on the mapping buffer can be released. We
	 * indicate what the callback should do via the uptodate variable, same
	 * as for normal BH based IO completions.
	 */
502
	error = dax_insert_mapping(inode, &bh, vma, vmf);
503 504 505 506 507 508
	if (buffer_unwritten(&bh)) {
		if (complete_unwritten)
			complete_unwritten(&bh, !error);
		else
			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
	}
509 510 511 512 513 514 515 516 517

 out:
	if (error == -ENOMEM)
		return VM_FAULT_OOM | major;
	/* -EBUSY is fine, somebody else faulted on the same PTE */
	if ((error < 0) && (error != -EBUSY))
		return VM_FAULT_SIGBUS | major;
	return VM_FAULT_NOPAGE | major;

R
Ross Zwisler 已提交
518
 unlock_page:
519 520 521 522 523 524
	if (page) {
		unlock_page(page);
		page_cache_release(page);
	}
	goto out;
}
525
EXPORT_SYMBOL(__dax_fault);
526 527 528 529 530 531 532 533 534 535 536

/**
 * dax_fault - handle a page fault on a DAX file
 * @vma: The virtual memory area where the fault occurred
 * @vmf: The description of the fault
 * @get_block: The filesystem method used to translate file offsets to blocks
 *
 * When a page fault occurs, filesystems may call this helper in their
 * fault handler for DAX files.
 */
int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
537
	      get_block_t get_block, dax_iodone_t complete_unwritten)
538 539 540 541 542 543 544 545
{
	int result;
	struct super_block *sb = file_inode(vma->vm_file)->i_sb;

	if (vmf->flags & FAULT_FLAG_WRITE) {
		sb_start_pagefault(sb);
		file_update_time(vma->vm_file);
	}
546
	result = __dax_fault(vma, vmf, get_block, complete_unwritten);
547 548 549 550 551 552
	if (vmf->flags & FAULT_FLAG_WRITE)
		sb_end_pagefault(sb);

	return result;
}
EXPORT_SYMBOL_GPL(dax_fault);
553

554 555 556 557 558 559 560
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
 * more often than one might expect in the below function.
 */
#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)

561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
static void __dax_dbg(struct buffer_head *bh, unsigned long address,
		const char *reason, const char *fn)
{
	if (bh) {
		char bname[BDEVNAME_SIZE];
		bdevname(bh->b_bdev, bname);
		pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
			"length %zd fallback: %s\n", fn, current->comm,
			address, bname, bh->b_state, (u64)bh->b_blocknr,
			bh->b_size, reason);
	} else {
		pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
			current->comm, address, reason);
	}
}

#define dax_pmd_dbg(bh, address, reason)	__dax_dbg(bh, address, reason, "dax_pmd")

579 580 581 582 583 584 585 586 587 588 589
int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
		pmd_t *pmd, unsigned int flags, get_block_t get_block,
		dax_iodone_t complete_unwritten)
{
	struct file *file = vma->vm_file;
	struct address_space *mapping = file->f_mapping;
	struct inode *inode = mapping->host;
	struct buffer_head bh;
	unsigned blkbits = inode->i_blkbits;
	unsigned long pmd_addr = address & PMD_MASK;
	bool write = flags & FAULT_FLAG_WRITE;
590
	struct block_device *bdev;
591
	pgoff_t size, pgoff;
592
	sector_t block;
593 594
	int result = 0;

D
Dan Williams 已提交
595
	/* dax pmd mappings require pfn_t_devmap() */
D
Dan Williams 已提交
596 597 598
	if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
		return VM_FAULT_FALLBACK;

599
	/* Fall back to PTEs if we're going to COW */
600 601
	if (write && !(vma->vm_flags & VM_SHARED)) {
		split_huge_pmd(vma, pmd, address);
602
		dax_pmd_dbg(NULL, address, "cow write");
603
		return VM_FAULT_FALLBACK;
604
	}
605
	/* If the PMD would extend outside the VMA */
606 607
	if (pmd_addr < vma->vm_start) {
		dax_pmd_dbg(NULL, address, "vma start unaligned");
608
		return VM_FAULT_FALLBACK;
609 610 611
	}
	if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
		dax_pmd_dbg(NULL, address, "vma end unaligned");
612
		return VM_FAULT_FALLBACK;
613
	}
614

M
Matthew Wilcox 已提交
615
	pgoff = linear_page_index(vma, pmd_addr);
616 617 618 619
	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (pgoff >= size)
		return VM_FAULT_SIGBUS;
	/* If the PMD would cover blocks out of the file */
620 621 622
	if ((pgoff | PG_PMD_COLOUR) >= size) {
		dax_pmd_dbg(NULL, address,
				"offset + huge page size > file size");
623
		return VM_FAULT_FALLBACK;
624
	}
625 626 627 628 629

	memset(&bh, 0, sizeof(bh));
	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);

	bh.b_size = PMD_SIZE;
630
	if (get_block(inode, block, &bh, write) != 0)
631
		return VM_FAULT_SIGBUS;
632
	bdev = bh.b_bdev;
R
Ross Zwisler 已提交
633
	i_mmap_lock_read(mapping);
634 635 636 637 638 639

	/*
	 * If the filesystem isn't willing to tell us the length of a hole,
	 * just fall back to PTEs.  Calling get_block 512 times in a loop
	 * would be silly.
	 */
640 641
	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
		dax_pmd_dbg(&bh, address, "allocated block too small");
642
		goto fallback;
643
	}
644

645 646 647 648 649
	/*
	 * If we allocated new storage, make sure no process has any
	 * zero pages covering this hole
	 */
	if (buffer_new(&bh)) {
R
Ross Zwisler 已提交
650
		i_mmap_unlock_read(mapping);
651
		unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
R
Ross Zwisler 已提交
652
		i_mmap_lock_read(mapping);
653 654
	}

655 656 657 658 659 660
	/*
	 * If a truncate happened while we were allocating blocks, we may
	 * leave blocks allocated to the file that are beyond EOF.  We can't
	 * take i_mutex here, so just leave them hanging; they'll be freed
	 * when the file is deleted.
	 */
661 662 663 664 665
	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (pgoff >= size) {
		result = VM_FAULT_SIGBUS;
		goto out;
	}
666 667
	if ((pgoff | PG_PMD_COLOUR) >= size) {
		dax_pmd_dbg(&bh, address, "pgoff unaligned");
668
		goto fallback;
669
	}
670 671 672

	if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
		spinlock_t *ptl;
673
		pmd_t entry;
674
		struct page *zero_page = get_huge_zero_page();
675

676 677
		if (unlikely(!zero_page)) {
			dax_pmd_dbg(&bh, address, "no zero page");
678
			goto fallback;
679
		}
680

681 682 683
		ptl = pmd_lock(vma->vm_mm, pmd);
		if (!pmd_none(*pmd)) {
			spin_unlock(ptl);
684
			dax_pmd_dbg(&bh, address, "pmd already present");
685 686 687
			goto fallback;
		}

688 689 690 691 692
		dev_dbg(part_to_dev(bdev->bd_part),
				"%s: %s addr: %lx pfn: <zero> sect: %llx\n",
				__func__, current->comm, address,
				(unsigned long long) to_sector(&bh, inode));

693 694 695
		entry = mk_pmd(zero_page, vma->vm_page_prot);
		entry = pmd_mkhuge(entry);
		set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
696
		result = VM_FAULT_NOPAGE;
697
		spin_unlock(ptl);
698
	} else {
699 700 701 702 703 704
		struct blk_dax_ctl dax = {
			.sector = to_sector(&bh, inode),
			.size = PMD_SIZE,
		};
		long length = dax_map_atomic(bdev, &dax);

705 706 707 708
		if (length < 0) {
			result = VM_FAULT_SIGBUS;
			goto out;
		}
709 710 711 712 713 714 715
		if (length < PMD_SIZE) {
			dax_pmd_dbg(&bh, address, "dax-length too small");
			dax_unmap_atomic(bdev, &dax);
			goto fallback;
		}
		if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
			dax_pmd_dbg(&bh, address, "pfn unaligned");
716
			dax_unmap_atomic(bdev, &dax);
717
			goto fallback;
718
		}
719

D
Dan Williams 已提交
720
		if (!pfn_t_devmap(dax.pfn)) {
721
			dax_unmap_atomic(bdev, &dax);
722
			dax_pmd_dbg(&bh, address, "pfn not in memmap");
D
Dan Williams 已提交
723
			goto fallback;
724
		}
D
Dan Williams 已提交
725

R
Ross Zwisler 已提交
726
		if (buffer_unwritten(&bh) || buffer_new(&bh)) {
727
			clear_pmem(dax.addr, PMD_SIZE);
R
Ross Zwisler 已提交
728 729 730 731 732
			wmb_pmem();
			count_vm_event(PGMAJFAULT);
			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
			result |= VM_FAULT_MAJOR;
		}
733
		dax_unmap_atomic(bdev, &dax);
R
Ross Zwisler 已提交
734

735 736 737 738 739
		dev_dbg(part_to_dev(bdev->bd_part),
				"%s: %s addr: %lx pfn: %lx sect: %llx\n",
				__func__, current->comm, address,
				pfn_t_to_pfn(dax.pfn),
				(unsigned long long) dax.sector);
D
Dan Williams 已提交
740
		result |= vmf_insert_pfn_pmd(vma, address, pmd,
741
				dax.pfn, write);
742 743 744
	}

 out:
R
Ross Zwisler 已提交
745 746
	i_mmap_unlock_read(mapping);

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	if (buffer_unwritten(&bh))
		complete_unwritten(&bh, !(result & VM_FAULT_ERROR));

	return result;

 fallback:
	count_vm_event(THP_FAULT_FALLBACK);
	result = VM_FAULT_FALLBACK;
	goto out;
}
EXPORT_SYMBOL_GPL(__dax_pmd_fault);

/**
 * dax_pmd_fault - handle a PMD fault on a DAX file
 * @vma: The virtual memory area where the fault occurred
 * @vmf: The description of the fault
 * @get_block: The filesystem method used to translate file offsets to blocks
 *
 * When a page fault occurs, filesystems may call this helper in their
 * pmd_fault handler for DAX files.
 */
int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
			pmd_t *pmd, unsigned int flags, get_block_t get_block,
			dax_iodone_t complete_unwritten)
{
	int result;
	struct super_block *sb = file_inode(vma->vm_file)->i_sb;

	if (flags & FAULT_FLAG_WRITE) {
		sb_start_pagefault(sb);
		file_update_time(vma->vm_file);
	}
	result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
				complete_unwritten);
	if (flags & FAULT_FLAG_WRITE)
		sb_end_pagefault(sb);

	return result;
}
EXPORT_SYMBOL_GPL(dax_pmd_fault);
787
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
788

789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
/**
 * dax_pfn_mkwrite - handle first write to DAX page
 * @vma: The virtual memory area where the fault occurred
 * @vmf: The description of the fault
 *
 */
int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct super_block *sb = file_inode(vma->vm_file)->i_sb;

	sb_start_pagefault(sb);
	file_update_time(vma->vm_file);
	sb_end_pagefault(sb);
	return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);

806
/**
M
Matthew Wilcox 已提交
807
 * dax_zero_page_range - zero a range within a page of a DAX file
808 809
 * @inode: The file being truncated
 * @from: The file offset that is being truncated to
M
Matthew Wilcox 已提交
810
 * @length: The number of bytes to zero
811 812
 * @get_block: The filesystem method used to translate file offsets to blocks
 *
M
Matthew Wilcox 已提交
813 814 815 816
 * This function can be called by a filesystem when it is zeroing part of a
 * page in a DAX file.  This is intended for hole-punch operations.  If
 * you are truncating a file, the helper function dax_truncate_page() may be
 * more convenient.
817 818 819 820 821
 *
 * We work in terms of PAGE_CACHE_SIZE here for commonality with
 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
 * took care of disposing of the unnecessary blocks.  Even if the filesystem
 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
M
Matthew Wilcox 已提交
822
 * since the file might be mmapped.
823
 */
M
Matthew Wilcox 已提交
824 825
int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
							get_block_t get_block)
826 827 828 829 830 831 832 833 834
{
	struct buffer_head bh;
	pgoff_t index = from >> PAGE_CACHE_SHIFT;
	unsigned offset = from & (PAGE_CACHE_SIZE-1);
	int err;

	/* Block boundary? Nothing to do */
	if (!length)
		return 0;
M
Matthew Wilcox 已提交
835
	BUG_ON((offset + length) > PAGE_CACHE_SIZE);
836 837 838 839 840 841 842

	memset(&bh, 0, sizeof(bh));
	bh.b_size = PAGE_CACHE_SIZE;
	err = get_block(inode, index, &bh, 0);
	if (err < 0)
		return err;
	if (buffer_written(&bh)) {
843 844 845 846 847 848 849 850 851
		struct block_device *bdev = bh.b_bdev;
		struct blk_dax_ctl dax = {
			.sector = to_sector(&bh, inode),
			.size = PAGE_CACHE_SIZE,
		};

		if (dax_map_atomic(bdev, &dax) < 0)
			return PTR_ERR(dax.addr);
		clear_pmem(dax.addr + offset, length);
852
		wmb_pmem();
853
		dax_unmap_atomic(bdev, &dax);
854 855 856 857
	}

	return 0;
}
M
Matthew Wilcox 已提交
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
EXPORT_SYMBOL_GPL(dax_zero_page_range);

/**
 * dax_truncate_page - handle a partial page being truncated in a DAX file
 * @inode: The file being truncated
 * @from: The file offset that is being truncated to
 * @get_block: The filesystem method used to translate file offsets to blocks
 *
 * Similar to block_truncate_page(), this function can be called by a
 * filesystem when it is truncating a DAX file to handle the partial page.
 *
 * We work in terms of PAGE_CACHE_SIZE here for commonality with
 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
 * took care of disposing of the unnecessary blocks.  Even if the filesystem
 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
 * since the file might be mmapped.
 */
int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
{
	unsigned length = PAGE_CACHE_ALIGN(from) - from;
	return dax_zero_page_range(inode, from, length, get_block);
}
880
EXPORT_SYMBOL_GPL(dax_truncate_page);