hash64_64k.c 9.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright IBM Corporation, 2015
 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU Lesser General Public License
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 *
 */

#include <linux/mm.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
18 19 20 21 22 23 24 25
/*
 * index from 0 - 15
 */
bool __rpte_sub_valid(real_pte_t rpte, unsigned long index)
{
	unsigned long g_idx;
	unsigned long ptev = pte_val(rpte.pte);

26
	g_idx = (ptev & H_PAGE_COMBO_VALID) >> H_PAGE_F_GIX_SHIFT;
27 28 29 30 31 32 33 34 35 36 37 38 39
	index = index >> 2;
	if (g_idx & (0x1 << index))
		return true;
	else
		return false;
}
/*
 * index from 0 - 15
 */
static unsigned long mark_subptegroup_valid(unsigned long ptev, unsigned long index)
{
	unsigned long g_idx;

40
	if (!(ptev & H_PAGE_COMBO))
41 42 43 44
		return ptev;
	index = index >> 2;
	g_idx = 0x1 << index;

45
	return ptev | (g_idx << H_PAGE_F_GIX_SHIFT);
46
}
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68

int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
		   pte_t *ptep, unsigned long trap, unsigned long flags,
		   int ssize, int subpg_prot)
{
	real_pte_t rpte;
	unsigned long *hidxp;
	unsigned long hpte_group;
	unsigned int subpg_index;
	unsigned long rflags, pa, hidx;
	unsigned long old_pte, new_pte, subpg_pte;
	unsigned long vpn, hash, slot;
	unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift;

	/*
	 * atomically mark the linux large page PTE busy and dirty
	 */
	do {
		pte_t pte = READ_ONCE(*ptep);

		old_pte = pte_val(pte);
		/* If PTE busy, retry the access */
69
		if (unlikely(old_pte & H_PAGE_BUSY))
70 71
			return 0;
		/* If PTE permissions don't match, take page fault */
72
		if (unlikely(!check_pte_access(access, old_pte)))
73 74 75 76
			return 1;
		/*
		 * Try to lock the PTE, add ACCESSED and DIRTY if it was
		 * a write access. Since this is 4K insert of 64K page size
77
		 * also add H_PAGE_COMBO
78
		 */
79
		new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED | H_PAGE_COMBO;
80
		if (access & _PAGE_WRITE)
81
			new_pte |= _PAGE_DIRTY;
82 83
	} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));

84 85 86 87
	/*
	 * Handle the subpage protection bits
	 */
	subpg_pte = new_pte & ~subpg_prot;
88
	rflags = htab_convert_pte_flags(subpg_pte);
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105

	if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
	    !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {

		/*
		 * No CPU has hugepages but lacks no execute, so we
		 * don't need to worry about that case
		 */
		rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
	}

	subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
	vpn  = hpt_vpn(ea, vsid, ssize);
	rpte = __real_pte(__pte(old_pte), ptep);
	/*
	 *None of the sub 4k page is hashed
	 */
106
	if (!(old_pte & H_PAGE_HASHPTE))
107 108 109 110 111
		goto htab_insert_hpte;
	/*
	 * Check if the pte was already inserted into the hash table
	 * as a 64k HW page, and invalidate the 64k HPTE if so.
	 */
112
	if (!(old_pte & H_PAGE_COMBO)) {
113
		flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
114 115 116 117 118
		/*
		 * clear the old slot details from the old and new pte.
		 * On hash insert failure we use old pte value and we don't
		 * want slot information there if we have a insert failure.
		 */
119 120
		old_pte &= ~(H_PAGE_HASHPTE | H_PAGE_F_GIX | H_PAGE_F_SECOND);
		new_pte &= ~(H_PAGE_HASHPTE | H_PAGE_F_GIX | H_PAGE_F_SECOND);
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
		goto htab_insert_hpte;
	}
	/*
	 * Check for sub page valid and update
	 */
	if (__rpte_sub_valid(rpte, subpg_index)) {
		int ret;

		hash = hpt_hash(vpn, shift, ssize);
		hidx = __rpte_to_hidx(rpte, subpg_index);
		if (hidx & _PTEIDX_SECONDARY)
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot += hidx & _PTEIDX_GROUP_IX;

		ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
					   MMU_PAGE_4K, MMU_PAGE_4K,
					   ssize, flags);
		/*
		 *if we failed because typically the HPTE wasn't really here
		 * we try an insertion.
		 */
		if (ret == -1)
			goto htab_insert_hpte;

146
		*ptep = __pte(new_pte & ~H_PAGE_BUSY);
147 148 149 150 151
		return 0;
	}

htab_insert_hpte:
	/*
152
	 * handle H_PAGE_4K_PFN case
153
	 */
154
	if (old_pte & H_PAGE_4K_PFN) {
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
		/*
		 * All the sub 4k page have the same
		 * physical address.
		 */
		pa = pte_pfn(__pte(old_pte)) << HW_PAGE_SHIFT;
	} else {
		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
		pa += (subpg_index << shift);
	}
	hash = hpt_hash(vpn, shift, ssize);
repeat:
	hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;

	/* Insert into the hash table, primary slot */
	slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
				  MMU_PAGE_4K, MMU_PAGE_4K, ssize);
	/*
	 * Primary is full, try the secondary
	 */
	if (unlikely(slot == -1)) {
		hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
		slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
					  rflags, HPTE_V_SECONDARY,
					  MMU_PAGE_4K, MMU_PAGE_4K, ssize);
		if (slot == -1) {
			if (mftb() & 0x1)
				hpte_group = ((hash & htab_hash_mask) *
					      HPTES_PER_GROUP) & ~0x7UL;
			ppc_md.hpte_remove(hpte_group);
			/*
			 * FIXME!! Should be try the group from which we removed ?
			 */
			goto repeat;
		}
	}
	/*
191
	 * Hypervisor failure. Restore old pte and return -1
192 193 194 195 196 197 198 199 200 201
	 * similar to __hash_page_*
	 */
	if (unlikely(slot == -2)) {
		*ptep = __pte(old_pte);
		hash_failure_debug(ea, access, vsid, trap, ssize,
				   MMU_PAGE_4K, MMU_PAGE_4K, old_pte);
		return -1;
	}
	/*
	 * Insert slot number & secondary bit in PTE second half,
202 203
	 * clear H_PAGE_BUSY and set appropriate HPTE slot bit
	 * Since we have H_PAGE_BUSY set on ptep, we can be sure
204 205 206 207 208
	 * nobody is undating hidx.
	 */
	hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
	rpte.hidx &= ~(0xfUL << (subpg_index << 2));
	*hidxp = rpte.hidx  | (slot << (subpg_index << 2));
209
	new_pte = mark_subptegroup_valid(new_pte, subpg_index);
210
	new_pte |=  H_PAGE_HASHPTE;
211 212 213 214
	/*
	 * check __real_pte for details on matching smp_rmb()
	 */
	smp_wmb();
215
	*ptep = __pte(new_pte & ~H_PAGE_BUSY);
216 217
	return 0;
}
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236

int __hash_page_64K(unsigned long ea, unsigned long access,
		    unsigned long vsid, pte_t *ptep, unsigned long trap,
		    unsigned long flags, int ssize)
{
	unsigned long hpte_group;
	unsigned long rflags, pa;
	unsigned long old_pte, new_pte;
	unsigned long vpn, hash, slot;
	unsigned long shift = mmu_psize_defs[MMU_PAGE_64K].shift;

	/*
	 * atomically mark the linux large page PTE busy and dirty
	 */
	do {
		pte_t pte = READ_ONCE(*ptep);

		old_pte = pte_val(pte);
		/* If PTE busy, retry the access */
237
		if (unlikely(old_pte & H_PAGE_BUSY))
238 239
			return 0;
		/* If PTE permissions don't match, take page fault */
240
		if (unlikely(!check_pte_access(access, old_pte)))
241 242 243 244 245 246
			return 1;
		/*
		 * Check if PTE has the cache-inhibit bit set
		 * If so, bail out and refault as a 4k page
		 */
		if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE) &&
247
		    unlikely(pte_ci(pte)))
248 249 250
			return 0;
		/*
		 * Try to lock the PTE, add ACCESSED and DIRTY if it was
251
		 * a write access.
252
		 */
253
		new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
254
		if (access & _PAGE_WRITE)
255
			new_pte |= _PAGE_DIRTY;
256
	} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
257 258

	rflags = htab_convert_pte_flags(new_pte);
259 260 261 262 263 264

	if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
	    !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);

	vpn  = hpt_vpn(ea, vsid, ssize);
265
	if (unlikely(old_pte & H_PAGE_HASHPTE)) {
266 267 268 269
		/*
		 * There MIGHT be an HPTE for this pte
		 */
		hash = hpt_hash(vpn, shift, ssize);
270
		if (old_pte & H_PAGE_F_SECOND)
271 272
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
273
		slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
274 275 276 277 278 279

		if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K,
					 MMU_PAGE_64K, ssize, flags) == -1)
			old_pte &= ~_PAGE_HPTEFLAGS;
	}

280
	if (likely(!(old_pte & H_PAGE_HASHPTE))) {
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310

		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
		hash = hpt_hash(vpn, shift, ssize);

repeat:
		hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;

		/* Insert into the hash table, primary slot */
		slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
				  MMU_PAGE_64K, MMU_PAGE_64K, ssize);
		/*
		 * Primary is full, try the secondary
		 */
		if (unlikely(slot == -1)) {
			hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
			slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
						  rflags, HPTE_V_SECONDARY,
						  MMU_PAGE_64K, MMU_PAGE_64K, ssize);
			if (slot == -1) {
				if (mftb() & 0x1)
					hpte_group = ((hash & htab_hash_mask) *
						      HPTES_PER_GROUP) & ~0x7UL;
				ppc_md.hpte_remove(hpte_group);
				/*
				 * FIXME!! Should be try the group from which we removed ?
				 */
				goto repeat;
			}
		}
		/*
311
		 * Hypervisor failure. Restore old pte and return -1
312 313 314 315 316 317 318 319
		 * similar to __hash_page_*
		 */
		if (unlikely(slot == -2)) {
			*ptep = __pte(old_pte);
			hash_failure_debug(ea, access, vsid, trap, ssize,
					   MMU_PAGE_64K, MMU_PAGE_64K, old_pte);
			return -1;
		}
320 321 322
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
		new_pte |= (slot << H_PAGE_F_GIX_SHIFT) &
			(H_PAGE_F_SECOND | H_PAGE_F_GIX);
323
	}
324
	*ptep = __pte(new_pte & ~H_PAGE_BUSY);
325 326
	return 0;
}