hugetlbpage-hash64.c 3.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
 *
 * Based on the IA-32 version:
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/machdep.h>

17 18 19 20
extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
				  unsigned long pa, unsigned long rlags,
				  unsigned long vflags, int psize, int ssize);

21
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
22 23
		     pte_t *ptep, unsigned long trap, unsigned long flags,
		     int ssize, unsigned int shift, unsigned int mmu_psize)
24
{
25
	unsigned long vpn;
26
	unsigned long old_pte, new_pte;
27
	unsigned long rflags, pa, sz;
28 29 30 31 32
	long slot;

	BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);

	/* Search the Linux page table for a match with va */
33
	vpn = hpt_vpn(ea, vsid, ssize);
34

35
	/* At this point, we have a pte (old_pte) which can be used to build
36 37 38 39 40 41 42 43 44 45 46 47 48
	 * or update an HPTE. There are 2 cases:
	 *
	 * 1. There is a valid (present) pte with no associated HPTE (this is
	 *	the most common case)
	 * 2. There is a valid (present) pte with an associated HPTE. The
	 *	current values of the pp bits in the HPTE prevent access
	 *	because we are doing software DIRTY bit management and the
	 *	page is currently not DIRTY.
	 */


	do {
		old_pte = pte_val(*ptep);
49 50 51 52 53 54 55 56
		/* If PTE busy, retry the access */
		if (unlikely(old_pte & _PAGE_BUSY))
			return 0;
		/* If PTE permissions don't match, take page fault */
		if (unlikely(access & ~old_pte))
			return 1;
		/* Try to lock the PTE, add ACCESSED and DIRTY if it was
		 * a write access */
57
		new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
58 59
		if (access & _PAGE_RW)
			new_pte |= _PAGE_DIRTY;
60 61
	} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
					 old_pte, new_pte));
62
	rflags = htab_convert_pte_flags(new_pte);
63 64 65 66 67

	sz = ((1UL) << shift);
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		/* No CPU has hugepages but lacks no execute, so we
		 * don't need to worry about that case */
68
		rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
69 70 71 72 73 74

	/* Check if pte already has an hpte (case 2) */
	if (unlikely(old_pte & _PAGE_HASHPTE)) {
		/* There MIGHT be an HPTE for this pte */
		unsigned long hash, slot;

75
		hash = hpt_hash(vpn, shift, ssize);
76 77 78 79 80
		if (old_pte & _PAGE_F_SECOND)
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot += (old_pte & _PAGE_F_GIX) >> 12;

81
		if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
82
					 mmu_psize, ssize, flags) == -1)
83 84 85 86
			old_pte &= ~_PAGE_HPTEFLAGS;
	}

	if (likely(!(old_pte & _PAGE_HASHPTE))) {
87
		unsigned long hash = hpt_hash(vpn, shift, ssize);
88 89 90 91 92

		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;

		/* clear HPTE slot informations in new PTE */
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
93

94 95 96
		/* Add in WIMG bits */
		rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
				      _PAGE_COHERENT | _PAGE_GUARDED));
97 98 99 100
		/*
		 * enable the memory coherence always
		 */
		rflags |= HPTE_R_M;
101

102 103
		slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
					     mmu_psize, ssize);
104

105 106 107 108 109 110
		/*
		 * Hypervisor failure. Restore old pte and return -1
		 * similar to __hash_page_*
		 */
		if (unlikely(slot == -2)) {
			*ptep = __pte(old_pte);
111
			hash_failure_debug(ea, access, vsid, trap, ssize,
112
					   mmu_psize, mmu_psize, old_pte);
113
			return -1;
114
		}
115 116 117 118 119 120 121 122

		new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
	}

	/*
	 * No need to use ldarx/stdcx here
	 */
	*ptep = __pte(new_pte & ~_PAGE_BUSY);
123
	return 0;
124
}