mmu_context_book3s64.c 6.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *  MMU context allocation for 64-bit kernels.
 *
 *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
21
#include <linux/export.h>
22
#include <linux/gfp.h>
23
#include <linux/slab.h>
24 25

#include <asm/mmu_context.h>
26
#include <asm/pgalloc.h>
27

28
#include "icswx.h"
29

30
static DEFINE_SPINLOCK(mmu_context_lock);
31
static DEFINE_IDA(mmu_context_ida);
32

33
static int alloc_context_id(int min_id, int max_id)
34
{
35
	int index, err;
36 37

again:
38
	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
39 40 41
		return -ENOMEM;

	spin_lock(&mmu_context_lock);
42
	err = ida_get_new_above(&mmu_context_ida, min_id, &index);
43 44 45 46 47 48 49
	spin_unlock(&mmu_context_lock);

	if (err == -EAGAIN)
		goto again;
	else if (err)
		return err;

50
	if (index > max_id) {
51
		spin_lock(&mmu_context_lock);
52
		ida_remove(&mmu_context_ida, index);
53
		spin_unlock(&mmu_context_lock);
54 55 56
		return -ENOMEM;
	}

57 58
	return index;
}
59

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
void hash__reserve_context_id(int id)
{
	int rc, result = 0;

	do {
		if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
			break;

		spin_lock(&mmu_context_lock);
		rc = ida_get_new_above(&mmu_context_ida, id, &result);
		spin_unlock(&mmu_context_lock);
	} while (rc == -EAGAIN);

	WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
}

76 77
int hash__alloc_context_id(void)
{
78 79 80 81 82 83 84 85
	unsigned long max;

	if (mmu_has_feature(MMU_FTR_68_BIT_VA))
		max = MAX_USER_CONTEXT;
	else
		max = MAX_USER_CONTEXT_65BIT_VA;

	return alloc_context_id(MIN_USER_CONTEXT, max);
86 87 88
}
EXPORT_SYMBOL_GPL(hash__alloc_context_id);

89 90 91 92 93 94 95 96
static int hash__init_new_context(struct mm_struct *mm)
{
	int index;

	index = hash__alloc_context_id();
	if (index < 0)
		return index;

97 98 99 100 101
	/*
	 * We do switch_slb() early in fork, even before we setup the
	 * mm->context.addr_limit. Default to max task size so that we copy the
	 * default values to paca which will help us to handle slb miss early.
	 */
102
	mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
103

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
	/*
	 * The old code would re-promote on fork, we don't do that when using
	 * slices as it could cause problem promoting slices that have been
	 * forced down to 4K.
	 *
	 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
	 * explicitly against context.id == 0. This ensures that we properly
	 * initialize context slice details for newly allocated mm's (which will
	 * have id == 0) and don't alter context slice inherited via fork (which
	 * will have id != 0).
	 *
	 * We should not be calling init_new_context() on init_mm. Hence a
	 * check against 0 is OK.
	 */
	if (mm->context.id == 0)
		slice_set_user_psize(mm, mmu_virtual_psize);

	subpage_prot_init_new_context(mm);

	return index;
}

static int radix__init_new_context(struct mm_struct *mm)
127 128
{
	unsigned long rts_field;
129
	int index, max_id;
130

131 132
	max_id = (1 << mmu_pid_bits) - 1;
	index = alloc_context_id(mmu_base_pid, max_id);
133 134
	if (index < 0)
		return index;
135 136 137 138

	/*
	 * set the process table entry,
	 */
139
	rts_field = radix__get_tree_size();
140
	process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
141

142 143 144 145 146 147 148 149
	/*
	 * Order the above store with subsequent update of the PID
	 * register (at which point HW can start loading/caching
	 * the entry) and the corresponding load by the MMU from
	 * the L2 cache.
	 */
	asm volatile("ptesync;isync" : : : "memory");

150 151
	mm->context.npu_context = NULL;

152
	return index;
153
}
154 155 156 157 158

int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int index;

159 160 161 162 163
	if (radix_enabled())
		index = radix__init_new_context(mm);
	else
		index = hash__init_new_context(mm);

164 165 166
	if (index < 0)
		return index;

167
	mm->context.id = index;
168 169 170 171 172
#ifdef CONFIG_PPC_ICSWX
	mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
	if (!mm->context.cop_lockp) {
		__destroy_context(index);
		subpage_prot_free(mm);
173
		mm->context.id = MMU_NO_CONTEXT;
174 175 176 177
		return -ENOMEM;
	}
	spin_lock_init(mm->context.cop_lockp);
#endif /* CONFIG_PPC_ICSWX */
178

179 180
#ifdef CONFIG_PPC_64K_PAGES
	mm->context.pte_frag = NULL;
181 182
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
183
	mm_iommu_init(mm);
184
#endif
185 186 187
	return 0;
}

188
void __destroy_context(int context_id)
189 190
{
	spin_lock(&mmu_context_lock);
191
	ida_remove(&mmu_context_ida, context_id);
192
	spin_unlock(&mmu_context_lock);
193 194
}
EXPORT_SYMBOL_GPL(__destroy_context);
195

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
#ifdef CONFIG_PPC_64K_PAGES
static void destroy_pagetable_page(struct mm_struct *mm)
{
	int count;
	void *pte_frag;
	struct page *page;

	pte_frag = mm->context.pte_frag;
	if (!pte_frag)
		return;

	page = virt_to_page(pte_frag);
	/* drop all the pending references */
	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
	/* We allow PTE_FRAG_NR fragments from a PTE page */
211
	if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
212 213 214 215 216 217 218 219 220 221 222 223
		pgtable_page_dtor(page);
		free_hot_cold_page(page, 0);
	}
}

#else
static inline void destroy_pagetable_page(struct mm_struct *mm)
{
	return;
}
#endif

224 225
void destroy_context(struct mm_struct *mm)
{
226
#ifdef CONFIG_SPAPR_TCE_IOMMU
227
	WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
228
#endif
229 230 231 232 233
#ifdef CONFIG_PPC_ICSWX
	drop_cop(mm->context.acop, mm);
	kfree(mm->context.cop_lockp);
	mm->context.cop_lockp = NULL;
#endif /* CONFIG_PPC_ICSWX */
234

235 236 237 238 239 240 241 242 243
	if (radix_enabled()) {
		/*
		 * Radix doesn't have a valid bit in the process table
		 * entries. However we know that at least P9 implementation
		 * will avoid caching an entry with an invalid RTS field,
		 * and 0 is invalid. So this will do.
		 */
		process_tb[mm->context.id].prtb0 = 0;
	} else
244
		subpage_prot_free(mm);
245
	destroy_pagetable_page(mm);
246
	__destroy_context(mm->context.id);
247
	mm->context.id = MMU_NO_CONTEXT;
248
}
249 250 251 252

#ifdef CONFIG_PPC_RADIX_MMU
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
253 254 255 256 257 258 259 260 261 262

	if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
		isync();
		mtspr(SPRN_PID, next->context.id);
		isync();
		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
	} else {
		mtspr(SPRN_PID, next->context.id);
		isync();
	}
263 264
}
#endif