slb.c 6.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * PowerPC64 SLB support.
 *
 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
 * Based on earlier code writteh by:
 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
 *    Copyright (c) 2001 Dave Engebretsen
 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 *
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

17 18
#undef DEBUG

L
Linus Torvalds 已提交
19 20 21 22 23 24
#include <linux/config.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
25 26 27 28 29 30 31
#include <asm/cacheflush.h>

#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
L
Linus Torvalds 已提交
32

33 34 35 36 37 38 39 40 41 42
extern void slb_allocate_realmode(unsigned long ea);
extern void slb_allocate_user(unsigned long ea);

static void slb_allocate(unsigned long ea)
{
	/* Currently, we do real mode for all SLBs including user, but
	 * that will change if we bring back dynamic VSIDs
	 */
	slb_allocate_realmode(ea);
}
L
Linus Torvalds 已提交
43 44 45 46 47 48 49 50 51 52 53

static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
{
	return (ea & ESID_MASK) | SLB_ESID_V | slot;
}

static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
{
	return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
}

54 55
static inline void create_slbe(unsigned long ea, unsigned long flags,
			       unsigned long entry)
L
Linus Torvalds 已提交
56 57 58 59 60 61 62 63 64 65 66
{
	asm volatile("slbmte  %0,%1" :
		     : "r" (mk_vsid_data(ea, flags)),
		       "r" (mk_esid_data(ea, entry))
		     : "memory" );
}

static void slb_flush_and_rebolt(void)
{
	/* If you change this make sure you change SLB_NUM_BOLTED
	 * appropriately too. */
67
	unsigned long linear_llp, virtual_llp, lflags, vflags;
L
Linus Torvalds 已提交
68 69 70 71
	unsigned long ksp_esid_data;

	WARN_ON(!irqs_disabled());

72 73 74 75
	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
	virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
	lflags = SLB_VSID_KERNEL | linear_llp;
	vflags = SLB_VSID_KERNEL | virtual_llp;
L
Linus Torvalds 已提交
76 77

	ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
78
	if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
L
Linus Torvalds 已提交
79 80 81 82 83 84 85 86 87 88 89
		ksp_esid_data &= ~SLB_ESID_V;

	/* We need to do this all in asm, so we're sure we don't touch
	 * the stack between the slbia and rebolting it. */
	asm volatile("isync\n"
		     "slbia\n"
		     /* Slot 1 - first VMALLOC segment */
		     "slbmte	%0,%1\n"
		     /* Slot 2 - kernel stack */
		     "slbmte	%2,%3\n"
		     "isync"
90 91
		     :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
		        "r"(mk_esid_data(VMALLOC_START, 1)),
92
		        "r"(mk_vsid_data(ksp_esid_data, lflags)),
L
Linus Torvalds 已提交
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
		        "r"(ksp_esid_data)
		     : "memory");
}

/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{
	unsigned long offset = get_paca()->slb_cache_ptr;
	unsigned long esid_data = 0;
	unsigned long pc = KSTK_EIP(tsk);
	unsigned long stack = KSTK_ESP(tsk);
	unsigned long unmapped_base;

	if (offset <= SLB_CACHE_ENTRIES) {
		int i;
		asm volatile("isync" : : : "memory");
		for (i = 0; i < offset; i++) {
110 111
			esid_data = ((unsigned long)get_paca()->slb_cache[i]
				<< SID_SHIFT) | SLBIE_C;
L
Linus Torvalds 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124
			asm volatile("slbie %0" : : "r" (esid_data));
		}
		asm volatile("isync" : : : "memory");
	} else {
		slb_flush_and_rebolt();
	}

	/* Workaround POWER5 < DD2.1 issue */
	if (offset == 1 || offset > SLB_CACHE_ENTRIES)
		asm volatile("slbie %0" : : "r" (esid_data));

	get_paca()->slb_cache_ptr = 0;
	get_paca()->context = mm->context;
125 126 127
#ifdef CONFIG_PPC_64K_PAGES
	get_paca()->pgdir = mm->pgd;
#endif /* CONFIG_PPC_64K_PAGES */
L
Linus Torvalds 已提交
128 129 130 131 132 133 134 135 136

	/*
	 * preload some userspace segments into the SLB.
	 */
	if (test_tsk_thread_flag(tsk, TIF_32BIT))
		unmapped_base = TASK_UNMAPPED_BASE_USER32;
	else
		unmapped_base = TASK_UNMAPPED_BASE_USER64;

137
	if (is_kernel_addr(pc))
L
Linus Torvalds 已提交
138 139 140 141 142 143
		return;
	slb_allocate(pc);

	if (GET_ESID(pc) == GET_ESID(stack))
		return;

144
	if (is_kernel_addr(stack))
L
Linus Torvalds 已提交
145 146 147 148 149 150 151
		return;
	slb_allocate(stack);

	if ((GET_ESID(pc) == GET_ESID(unmapped_base))
	    || (GET_ESID(stack) == GET_ESID(unmapped_base)))
		return;

152
	if (is_kernel_addr(unmapped_base))
L
Linus Torvalds 已提交
153 154 155 156
		return;
	slb_allocate(unmapped_base);
}

157 158 159 160 161 162 163 164 165 166 167
static inline void patch_slb_encoding(unsigned int *insn_addr,
				      unsigned int immed)
{
	/* Assume the instruction had a "0" immediate value, just
	 * "or" in the new value
	 */
	*insn_addr |= immed;
	flush_icache_range((unsigned long)insn_addr, 4+
			   (unsigned long)insn_addr);
}

L
Linus Torvalds 已提交
168 169
void slb_initialize(void)
{
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
	unsigned long linear_llp, virtual_llp;
	static int slb_encoding_inited;
	extern unsigned int *slb_miss_kernel_load_linear;
	extern unsigned int *slb_miss_kernel_load_virtual;
	extern unsigned int *slb_miss_user_load_normal;
#ifdef CONFIG_HUGETLB_PAGE
	extern unsigned int *slb_miss_user_load_huge;
	unsigned long huge_llp;

	huge_llp = mmu_psize_defs[mmu_huge_psize].sllp;
#endif

	/* Prepare our SLB miss handler based on our page size */
	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
	virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
	if (!slb_encoding_inited) {
		slb_encoding_inited = 1;
		patch_slb_encoding(slb_miss_kernel_load_linear,
				   SLB_VSID_KERNEL | linear_llp);
		patch_slb_encoding(slb_miss_kernel_load_virtual,
				   SLB_VSID_KERNEL | virtual_llp);
		patch_slb_encoding(slb_miss_user_load_normal,
				   SLB_VSID_USER | virtual_llp);

		DBG("SLB: linear  LLP = %04x\n", linear_llp);
		DBG("SLB: virtual LLP = %04x\n", virtual_llp);
#ifdef CONFIG_HUGETLB_PAGE
		patch_slb_encoding(slb_miss_user_load_huge,
				   SLB_VSID_USER | huge_llp);
		DBG("SLB: huge    LLP = %04x\n", huge_llp);
#endif
	}

L
Linus Torvalds 已提交
203 204 205
	/* On iSeries the bolted entries have already been set up by
	 * the hypervisor from the lparMap data in head.S */
#ifndef CONFIG_PPC_ISERIES
206 207
 {
	unsigned long lflags, vflags;
L
Linus Torvalds 已提交
208

209 210
	lflags = SLB_VSID_KERNEL | linear_llp;
	vflags = SLB_VSID_KERNEL | virtual_llp;
L
Linus Torvalds 已提交
211

212 213 214
	/* Invalidate the entire SLB (even slot 0) & all the ERATS */
	asm volatile("isync":::"memory");
	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
L
Linus Torvalds 已提交
215
	asm volatile("isync; slbia; isync":::"memory");
216
	create_slbe(PAGE_OFFSET, lflags, 0);
217 218

	/* VMALLOC space has 4K pages always for now */
219
	create_slbe(VMALLOC_START, vflags, 1);
220

L
Linus Torvalds 已提交
221 222 223 224 225
	/* We don't bolt the stack for the time being - we're in boot,
	 * so the stack is in the bolted segment.  By the time it goes
	 * elsewhere, we'll call _switch() which will bolt in the new
	 * one. */
	asm volatile("isync":::"memory");
226 227
 }
#endif /* CONFIG_PPC_ISERIES */
L
Linus Torvalds 已提交
228 229 230

	get_paca()->stab_rr = SLB_NUM_BOLTED;
}