stab.c 7.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * PowerPC64 Segment Translation Support.
 *
 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
 *    Copyright (c) 2001 Dave Engebretsen
 *
 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

15 16
#include <linux/lmb.h>

L
Linus Torvalds 已提交
17 18 19 20 21
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
22
#include <asm/prom.h>
23
#include <asm/abs_addr.h>
24
#include <asm/firmware.h>
25
#include <asm/iseries/hv_call.h>
L
Linus Torvalds 已提交
26

27 28 29 30 31
struct stab_entry {
	unsigned long esid_data;
	unsigned long vsid_data;
};

L
Linus Torvalds 已提交
32
#define NR_STAB_CACHE_ENTRIES 8
33 34
static DEFINE_PER_CPU(long, stab_cache_ptr);
static DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
L
Linus Torvalds 已提交
35 36 37 38 39 40 41 42 43 44

/*
 * Create a segment table entry for the given esid/vsid pair.
 */
static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
{
	unsigned long esid_data, vsid_data;
	unsigned long entry, group, old_esid, castout_entry, i;
	unsigned int global_entry;
	struct stab_entry *ste, *castout_ste;
45
	unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60

	vsid_data = vsid << STE_VSID_SHIFT;
	esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
	if (! kernel_segment)
		esid_data |= STE_ESID_KS;

	/* Search the primary group first. */
	global_entry = (esid & 0x1f) << 3;
	ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));

	/* Find an empty entry, if one exists. */
	for (group = 0; group < 2; group++) {
		for (entry = 0; entry < 8; entry++, ste++) {
			if (!(ste->esid_data & STE_ESID_V)) {
				ste->vsid_data = vsid_data;
61
				eieio();
L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
				ste->esid_data = esid_data;
				return (global_entry | entry);
			}
		}
		/* Now search the secondary group. */
		global_entry = ((~esid) & 0x1f) << 3;
		ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
	}

	/*
	 * Could not find empty entry, pick one with a round robin selection.
	 * Search all entries in the two groups.
	 */
	castout_entry = get_paca()->stab_rr;
	for (i = 0; i < 16; i++) {
		if (castout_entry < 8) {
			global_entry = (esid & 0x1f) << 3;
			ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
			castout_ste = ste + castout_entry;
		} else {
			global_entry = ((~esid) & 0x1f) << 3;
			ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
			castout_ste = ste + (castout_entry - 8);
		}

		/* Dont cast out the first kernel segment */
88
		if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
L
Linus Torvalds 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
			break;

		castout_entry = (castout_entry + 1) & 0xf;
	}

	get_paca()->stab_rr = (castout_entry + 1) & 0xf;

	/* Modify the old entry to the new value. */

	/* Force previous translations to complete. DRENG */
	asm volatile("isync" : : : "memory");

	old_esid = castout_ste->esid_data >> SID_SHIFT;
	castout_ste->esid_data = 0;		/* Invalidate old entry */

	asm volatile("sync" : : : "memory");    /* Order update */

	castout_ste->vsid_data = vsid_data;
107
	eieio();				/* Order update */
L
Linus Torvalds 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
	castout_ste->esid_data = esid_data;

	asm volatile("slbie  %0" : : "r" (old_esid << SID_SHIFT));
	/* Ensure completion of slbie */
	asm volatile("sync" : : : "memory");

	return (global_entry | (castout_entry & 0x7));
}

/*
 * Allocate a segment table entry for the given ea and mm
 */
static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
{
	unsigned long vsid;
	unsigned char stab_entry;
	unsigned long offset;

	/* Kernel or user address? */
127
	if (is_kernel_addr(ea)) {
P
Paul Mackerras 已提交
128
		vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
L
Linus Torvalds 已提交
129 130 131 132
	} else {
		if ((ea >= TASK_SIZE_USER64) || (! mm))
			return 1;

P
Paul Mackerras 已提交
133
		vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M);
L
Linus Torvalds 已提交
134 135 136 137
	}

	stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);

138
	if (!is_kernel_addr(ea)) {
L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
		offset = __get_cpu_var(stab_cache_ptr);
		if (offset < NR_STAB_CACHE_ENTRIES)
			__get_cpu_var(stab_cache[offset++]) = stab_entry;
		else
			offset = NR_STAB_CACHE_ENTRIES+1;
		__get_cpu_var(stab_cache_ptr) = offset;

		/* Order update */
		asm volatile("sync":::"memory");
	}

	return 0;
}

int ste_allocate(unsigned long ea)
{
	return __ste_allocate(ea, current->mm);
}

/*
 * Do the segment table work for a context switch: flush all user
 * entries from the table, then preload some probably useful entries
 * for the new task
 */
void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
{
	struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
	struct stab_entry *ste;
	unsigned long offset = __get_cpu_var(stab_cache_ptr);
	unsigned long pc = KSTK_EIP(tsk);
	unsigned long stack = KSTK_ESP(tsk);
	unsigned long unmapped_base;

	/* Force previous translations to complete. DRENG */
	asm volatile("isync" : : : "memory");

	if (offset <= NR_STAB_CACHE_ENTRIES) {
		int i;

		for (i = 0; i < offset; i++) {
			ste = stab + __get_cpu_var(stab_cache[i]);
			ste->esid_data = 0; /* invalidate entry */
		}
	} else {
		unsigned long entry;

		/* Invalidate all entries. */
		ste = stab;

		/* Never flush the first entry. */
		ste += 1;
		for (entry = 1;
191
		     entry < (HW_PAGE_SIZE / sizeof(struct stab_entry));
L
Linus Torvalds 已提交
192 193 194
		     entry++, ste++) {
			unsigned long ea;
			ea = ste->esid_data & ESID_MASK;
195
			if (!is_kernel_addr(ea)) {
L
Linus Torvalds 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
				ste->esid_data = 0;
			}
		}
	}

	asm volatile("sync; slbia; sync":::"memory");

	__get_cpu_var(stab_cache_ptr) = 0;

	/* Now preload some entries for the new task */
	if (test_tsk_thread_flag(tsk, TIF_32BIT))
		unmapped_base = TASK_UNMAPPED_BASE_USER32;
	else
		unmapped_base = TASK_UNMAPPED_BASE_USER64;

	__ste_allocate(pc, mm);

	if (GET_ESID(pc) == GET_ESID(stack))
		return;

	__ste_allocate(stack, mm);

	if ((GET_ESID(pc) == GET_ESID(unmapped_base))
	    || (GET_ESID(stack) == GET_ESID(unmapped_base)))
		return;

	__ste_allocate(unmapped_base, mm);

	/* Order update */
	asm volatile("sync" : : : "memory");
}

228 229 230 231 232
/*
 * Allocate segment tables for secondary CPUs.  These must all go in
 * the first (bolted) segment, so that do_stab_bolted won't get a
 * recursive segment miss on the segment table itself.
 */
233
void __init stabs_alloc(void)
234 235 236 237 238 239
{
	int cpu;

	if (cpu_has_feature(CPU_FTR_SLB))
		return;

240
	for_each_possible_cpu(cpu) {
241 242 243 244 245
		unsigned long newstab;

		if (cpu == 0)
			continue; /* stab for CPU 0 is statically allocated */

246 247
		newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
					 1<<SID_SHIFT);
248
		newstab = (unsigned long)__va(newstab);
249

250
		memset((void *)newstab, 0, HW_PAGE_SIZE);
251 252 253

		paca[cpu].stab_addr = newstab;
		paca[cpu].stab_real = virt_to_abs(newstab);
254 255
		printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
		       "virtual, 0x%llx absolute\n",
256
		       cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
257 258 259
	}
}

L
Linus Torvalds 已提交
260 261 262 263 264 265 266
/*
 * Build an entry for the base kernel segment and put it into
 * the segment table or SLB.  All other segment table or SLB
 * entries are faulted in.
 */
void stab_initialize(unsigned long stab)
{
P
Paul Mackerras 已提交
267
	unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M);
268
	unsigned long stabreal;
L
Linus Torvalds 已提交
269

270
	asm volatile("isync; slbia; isync":::"memory");
271
	make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
L
Linus Torvalds 已提交
272

273 274
	/* Order update */
	asm volatile("sync":::"memory");
275 276 277 278 279 280 281 282 283 284

	/* Set ASR */
	stabreal = get_paca()->stab_real | 0x1ul;

#ifdef CONFIG_PPC_ISERIES
	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
		HvCall1(HvCallBaseSetASR, stabreal);
		return;
	}
#endif /* CONFIG_PPC_ISERIES */
285

286
	mtspr(SPRN_ASR, stabreal);
L
Linus Torvalds 已提交
287
}