gup.c 6.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Lockless get_user_pages_fast for powerpc
 *
 * Copyright (C) 2008 Nick Piggin
 * Copyright (C) 2008 Novell Inc.
 */
#undef DEBUG

#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/vmstat.h>
#include <linux/pagemap.h>
#include <linux/rwsem.h>
#include <asm/pgtable.h>

17 18
#ifdef __HAVE_ARCH_PTE_SPECIAL

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
/*
 * The performance critical leaf functions are made noinline otherwise gcc
 * inlines everything into a single function which results in too much
 * register pressure.
 */
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask, result;
	pte_t *ptep;

	result = _PAGE_PRESENT|_PAGE_USER;
	if (write)
		result |= _PAGE_RW;
	mask = result | _PAGE_SPECIAL;

	ptep = pte_offset_kernel(&pmd, addr);
	do {
		pte_t pte = *ptep;
		struct page *page;

		if ((pte_val(pte) & mask) != result)
			return 0;
		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
		page = pte_page(pte);
		if (!page_cache_get_speculative(page))
			return 0;
46
		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
			put_page(page);
			return 0;
		}
		pages[*nr] = page;
		(*nr)++;

	} while (ptep++, addr += PAGE_SIZE, addr != end);

	return 1;
}

#ifdef CONFIG_HUGETLB_PAGE
static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate,
				 unsigned long *addr, unsigned long end,
				 int write, struct page **pages, int *nr)
{
	unsigned long mask;
	unsigned long pte_end;
	struct page *head, *page;
	pte_t pte;
	int refs;

	pte_end = (*addr + huge_page_size(hstate)) & huge_page_mask(hstate);
	if (pte_end < end)
		end = pte_end;

	pte = *ptep;
	mask = _PAGE_PRESENT|_PAGE_USER;
	if (write)
		mask |= _PAGE_RW;
	if ((pte_val(pte) & mask) != mask)
		return 0;
	/* hugepages are never "special" */
	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));

	refs = 0;
	head = pte_page(pte);
	page = head + ((*addr & ~huge_page_mask(hstate)) >> PAGE_SHIFT);
	do {
		VM_BUG_ON(compound_head(page) != head);
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (*addr += PAGE_SIZE, *addr != end);

	if (!page_cache_add_speculative(head, refs)) {
		*nr -= refs;
		return 0;
	}
97
	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
		/* Could be optimized better */
		while (*nr) {
			put_page(page);
			(*nr)--;
		}
	}

	return 1;
}
#endif /* CONFIG_HUGETLB_PAGE */

static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
		int write, struct page **pages, int *nr)
{
	unsigned long next;
	pmd_t *pmdp;

	pmdp = pmd_offset(&pud, addr);
	do {
		pmd_t pmd = *pmdp;

		next = pmd_addr_end(addr, end);
		if (pmd_none(pmd))
			return 0;
		if (!gup_pte_range(pmd, addr, next, write, pages, nr))
			return 0;
	} while (pmdp++, addr = next, addr != end);

	return 1;
}

static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
		int write, struct page **pages, int *nr)
{
	unsigned long next;
	pud_t *pudp;

	pudp = pud_offset(&pgd, addr);
	do {
		pud_t pud = *pudp;

		next = pud_addr_end(addr, end);
		if (pud_none(pud))
			return 0;
		if (!gup_pmd_range(pud, addr, next, write, pages, nr))
			return 0;
	} while (pudp++, addr = next, addr != end);

	return 1;
}

int get_user_pages_fast(unsigned long start, int nr_pages, int write,
			struct page **pages)
{
	struct mm_struct *mm = current->mm;
	unsigned long addr, len, end;
	unsigned long next;
	pgd_t *pgdp;
156 157
	int nr = 0;
#ifdef CONFIG_PPC64
158
	unsigned int shift;
159 160
	int psize;
#endif
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212

	pr_debug("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");

	start &= PAGE_MASK;
	addr = start;
	len = (unsigned long) nr_pages << PAGE_SHIFT;
	end = start + len;

	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
					start, len)))
		goto slow_irqon;

	pr_debug("  aligned: %lx .. %lx\n", start, end);

#ifdef CONFIG_HUGETLB_PAGE
	/* We bail out on slice boundary crossing when hugetlb is
	 * enabled in order to not have to deal with two different
	 * page table formats
	 */
	if (addr < SLICE_LOW_TOP) {
		if (end > SLICE_LOW_TOP)
			goto slow_irqon;

		if (unlikely(GET_LOW_SLICE_INDEX(addr) !=
			     GET_LOW_SLICE_INDEX(end - 1)))
			goto slow_irqon;
	} else {
		if (unlikely(GET_HIGH_SLICE_INDEX(addr) !=
			     GET_HIGH_SLICE_INDEX(end - 1)))
			goto slow_irqon;
	}
#endif /* CONFIG_HUGETLB_PAGE */

	/*
	 * XXX: batch / limit 'nr', to avoid large irq off latency
	 * needs some instrumenting to determine the common sizes used by
	 * important workloads (eg. DB2), and whether limiting the batch size
	 * will decrease performance.
	 *
	 * It seems like we're in the clear for the moment. Direct-IO is
	 * the main guy that batches up lots of get_user_pages, and even
	 * they are limited to 64-at-a-time which is not so many.
	 */
	/*
	 * This doesn't prevent pagetable teardown, but does prevent
	 * the pagetables from being freed on powerpc.
	 *
	 * So long as we atomically load page table pointers versus teardown,
	 * we can follow the address down to the the page and take a ref on it.
	 */
	local_irq_disable();

213 214 215 216
#ifdef CONFIG_PPC64
	/* Those bits are related to hugetlbfs implementation and only exist
	 * on 64-bit for now
	 */
217 218
	psize = get_slice_psize(mm, addr);
	shift = mmu_psize_defs[psize].shift;
219
#endif /* CONFIG_PPC64 */
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248

#ifdef CONFIG_HUGETLB_PAGE
	if (unlikely(mmu_huge_psizes[psize])) {
		pte_t *ptep;
		unsigned long a = addr;
		unsigned long sz = ((1UL) << shift);
		struct hstate *hstate = size_to_hstate(sz);

		BUG_ON(!hstate);
		/*
		 * XXX: could be optimized to avoid hstate
		 * lookup entirely (just use shift)
		 */

		do {
			VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift);
			ptep = huge_pte_offset(mm, a);
			pr_debug(" %016lx: huge ptep %p\n", a, ptep);
			if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages,
						   &nr))
				goto slow;
		} while (a != end);
	} else
#endif /* CONFIG_HUGETLB_PAGE */
	{
		pgdp = pgd_offset(mm, addr);
		do {
			pgd_t pgd = *pgdp;

249
#ifdef CONFIG_PPC64
250
			VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
251
#endif
252 253
			pr_debug("  %016lx: normal pgd %p\n", addr,
				 (void *)pgd_val(pgd));
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
			next = pgd_addr_end(addr, end);
			if (pgd_none(pgd))
				goto slow;
			if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
				goto slow;
		} while (pgdp++, addr = next, addr != end);
	}
	local_irq_enable();

	VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
	return nr;

	{
		int ret;

slow:
		local_irq_enable();
slow_irqon:
		pr_debug("  slow path ! nr = %d\n", nr);

		/* Try to get the remaining pages with get_user_pages */
		start += nr << PAGE_SHIFT;
		pages += nr;

		down_read(&mm->mmap_sem);
		ret = get_user_pages(current, mm, start,
			(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
		up_read(&mm->mmap_sem);

		/* Have to be a bit careful with return values */
		if (nr > 0) {
			if (ret < 0)
				ret = nr;
			else
				ret += nr;
		}

		return ret;
	}
}
294 295

#endif /* __HAVE_ARCH_PTE_SPECIAL */