filemap.h 2.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *	linux/mm/filemap.h
 *
 * Copyright (C) 1994-1999  Linus Torvalds
 */

#ifndef __FILEMAP_H
#define __FILEMAP_H

#include <linux/types.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/uio.h>
#include <linux/config.h>
16
#include <linux/uaccess.h>
17

18
size_t
19 20 21 22
__filemap_copy_from_user_iovec_inatomic(char *vaddr,
					const struct iovec *iov,
					size_t base,
					size_t bytes);
23 24 25 26 27

/*
 * Copy as much as we can into the page and return the number of bytes which
 * were sucessfully copied.  If a fault is encountered then clear the page
 * out to (offset+bytes) and return the number of bytes which were copied.
28 29 30 31 32 33 34 35
 *
 * NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache
 * to *NOT* zero any tail of the buffer that it failed to copy.  If it does,
 * and if the following non-atomic copy succeeds, then there is a small window
 * where the target page contains neither the data before the write, nor the
 * data after the write (it contains zero).  A read at this time will see
 * data that is inconsistent with any ordering of the read and the write.
 * (This has been detected in practice).
36 37 38 39 40 41 42 43 44
 */
static inline size_t
filemap_copy_from_user(struct page *page, unsigned long offset,
			const char __user *buf, unsigned bytes)
{
	char *kaddr;
	int left;

	kaddr = kmap_atomic(page, KM_USER0);
45
	left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
46 47 48 49 50
	kunmap_atomic(kaddr, KM_USER0);

	if (left != 0) {
		/* Do it the slow way */
		kaddr = kmap(page);
51
		left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
		kunmap(page);
	}
	return bytes - left;
}

/*
 * This has the same sideeffects and return value as filemap_copy_from_user().
 * The difference is that on a fault we need to memset the remainder of the
 * page (out to offset+bytes), to emulate filemap_copy_from_user()'s
 * single-segment behaviour.
 */
static inline size_t
filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
			const struct iovec *iov, size_t base, size_t bytes)
{
	char *kaddr;
	size_t copied;

	kaddr = kmap_atomic(page, KM_USER0);
71 72
	copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
							 base, bytes);
73 74 75
	kunmap_atomic(kaddr, KM_USER0);
	if (copied != bytes) {
		kaddr = kmap(page);
76 77 78 79
		copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
								 base, bytes);
		if (bytes - copied)
			memset(kaddr + offset + copied, 0, bytes - copied);
80 81 82 83 84 85 86 87 88 89 90
		kunmap(page);
	}
	return copied;
}

static inline void
filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
{
	const struct iovec *iov = *iovp;
	size_t base = *basep;

91
	do {
92 93 94 95 96 97 98 99
		int copy = min(bytes, iov->iov_len - base);

		bytes -= copy;
		base += copy;
		if (iov->iov_len == base) {
			iov++;
			base = 0;
		}
100
	} while (bytes);
101 102 103 104
	*iovp = iov;
	*basep = base;
}
#endif