mmu_context_book3s64.c 4.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *  MMU context allocation for 64-bit kernels.
 *
 *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
21
#include <linux/export.h>
22
#include <linux/gfp.h>
23
#include <linux/slab.h>
24 25

#include <asm/mmu_context.h>
26
#include <asm/pgalloc.h>
27

28
#include "icswx.h"
29

30
static DEFINE_SPINLOCK(mmu_context_lock);
31
static DEFINE_IDA(mmu_context_ida);
32

33
int __init_new_context(void)
34 35 36 37 38
{
	int index;
	int err;

again:
39
	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
40 41 42
		return -ENOMEM;

	spin_lock(&mmu_context_lock);
43
	err = ida_get_new_above(&mmu_context_ida, 1, &index);
44 45 46 47 48 49 50
	spin_unlock(&mmu_context_lock);

	if (err == -EAGAIN)
		goto again;
	else if (err)
		return err;

51
	if (index > MAX_USER_CONTEXT) {
52
		spin_lock(&mmu_context_lock);
53
		ida_remove(&mmu_context_ida, index);
54
		spin_unlock(&mmu_context_lock);
55 56 57
		return -ENOMEM;
	}

58 59 60
	return index;
}
EXPORT_SYMBOL_GPL(__init_new_context);
61 62 63 64 65 66 67
static int radix__init_new_context(struct mm_struct *mm, int index)
{
	unsigned long rts_field;

	/*
	 * set the process table entry,
	 */
68
	rts_field = radix__get_tree_size();
69 70 71
	process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
	return 0;
}
72 73 74 75 76 77 78 79 80

int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int index;

	index = __init_new_context();
	if (index < 0)
		return index;

81 82 83 84 85 86 87
	if (radix_enabled()) {
		radix__init_new_context(mm, index);
	} else {

		/* The old code would re-promote on fork, we don't do that
		 * when using slices as it could cause problem promoting slices
		 * that have been forced down to 4K
88 89 90 91 92 93 94 95 96
		 *
		 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
		 * explicitly against context.id == 0. This ensures that we
		 * properly initialize context slice details for newly allocated
		 * mm's (which will have id == 0) and don't alter context slice
		 * inherited via fork (which will have id != 0).
		 *
		 * We should not be calling init_new_context() on init_mm. Hence a
		 * check against 0 is ok.
97
		 */
98
		if (mm->context.id == 0)
99 100 101
			slice_set_user_psize(mm, mmu_virtual_psize);
		subpage_prot_init_new_context(mm);
	}
102
	mm->context.id = index;
103 104 105 106 107
#ifdef CONFIG_PPC_ICSWX
	mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
	if (!mm->context.cop_lockp) {
		__destroy_context(index);
		subpage_prot_free(mm);
108
		mm->context.id = MMU_NO_CONTEXT;
109 110 111 112
		return -ENOMEM;
	}
	spin_lock_init(mm->context.cop_lockp);
#endif /* CONFIG_PPC_ICSWX */
113

114 115
#ifdef CONFIG_PPC_64K_PAGES
	mm->context.pte_frag = NULL;
116 117
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
118
	mm_iommu_init(mm);
119
#endif
120 121 122
	return 0;
}

123
void __destroy_context(int context_id)
124 125
{
	spin_lock(&mmu_context_lock);
126
	ida_remove(&mmu_context_ida, context_id);
127
	spin_unlock(&mmu_context_lock);
128 129
}
EXPORT_SYMBOL_GPL(__destroy_context);
130

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
#ifdef CONFIG_PPC_64K_PAGES
static void destroy_pagetable_page(struct mm_struct *mm)
{
	int count;
	void *pte_frag;
	struct page *page;

	pte_frag = mm->context.pte_frag;
	if (!pte_frag)
		return;

	page = virt_to_page(pte_frag);
	/* drop all the pending references */
	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
	/* We allow PTE_FRAG_NR fragments from a PTE page */
146
	if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
147 148 149 150 151 152 153 154 155 156 157 158
		pgtable_page_dtor(page);
		free_hot_cold_page(page, 0);
	}
}

#else
static inline void destroy_pagetable_page(struct mm_struct *mm)
{
	return;
}
#endif

159 160
void destroy_context(struct mm_struct *mm)
{
161
#ifdef CONFIG_SPAPR_TCE_IOMMU
162
	WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
163
#endif
164 165 166 167 168
#ifdef CONFIG_PPC_ICSWX
	drop_cop(mm->context.acop, mm);
	kfree(mm->context.cop_lockp);
	mm->context.cop_lockp = NULL;
#endif /* CONFIG_PPC_ICSWX */
169

170 171 172 173
	if (radix_enabled())
		process_tb[mm->context.id].prtb1 = 0;
	else
		subpage_prot_free(mm);
174
	destroy_pagetable_page(mm);
175
	__destroy_context(mm->context.id);
176
	mm->context.id = MMU_NO_CONTEXT;
177
}
178 179 180 181 182

#ifdef CONFIG_PPC_RADIX_MMU
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
	asm volatile("isync": : :"memory");
183 184 185 186
	mtspr(SPRN_PID, next->context.id);
	asm volatile("isync \n"
		     PPC_SLBIA(0x7)
		     : : :"memory");
187 188
}
#endif