mmu_context.h 3.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  arch/arm/include/asm/mmu_context.h
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15
 *
 *  Copyright (C) 1996 Russell King.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Changelog:
 *   27-06-1996	RMK	Created
 */
#ifndef __ASM_ARM_MMU_CONTEXT_H
#define __ASM_ARM_MMU_CONTEXT_H

16
#include <linux/compiler.h>
17
#include <linux/sched.h>
18
#include <asm/cacheflush.h>
19
#include <asm/cachetype.h>
L
Linus Torvalds 已提交
20
#include <asm/proc-fns.h>
21
#include <asm-generic/mm_hooks.h>
L
Linus Torvalds 已提交
22

23 24
void __check_kvm_seq(struct mm_struct *mm);

25
#ifdef CONFIG_CPU_HAS_ASID
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40

/*
 * On ARMv6, we have the following structure in the Context ID:
 *
 * 31                         7          0
 * +-------------------------+-----------+
 * |      process ID         |   ASID    |
 * +-------------------------+-----------+
 * |              context ID             |
 * +-------------------------------------+
 *
 * The ASID is used to tag entries in the CPU caches and TLBs.
 * The context ID is used by debuggers and trace logic, and
 * should be unique within all running processes.
 */
R
Russell King 已提交
41 42 43
#define ASID_BITS		8
#define ASID_MASK		((~0) << ASID_BITS)
#define ASID_FIRST_VERSION	(1 << ASID_BITS)
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53

extern unsigned int cpu_last_asid;

void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);

static inline void check_context(struct mm_struct *mm)
{
	if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
		__new_context(mm);
54 55 56

	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
		__check_kvm_seq(mm);
L
Linus Torvalds 已提交
57 58 59 60 61 62
}

#define init_new_context(tsk,mm)	(__init_new_context(tsk,mm),0)

#else

63 64 65 66 67 68
static inline void check_context(struct mm_struct *mm)
{
	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
		__check_kvm_seq(mm);
}

L
Linus Torvalds 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
#define init_new_context(tsk,mm)	0

#endif

#define destroy_context(mm)		do { } while(0)

/*
 * This is called when "tsk" is about to enter lazy TLB mode.
 *
 * mm:  describes the currently active mm context
 * tsk: task which is entering lazy tlb
 * cpu: cpu number which is entering lazy tlb
 *
 * tsk->mm will be NULL
 */
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}

/*
 * This is the actual mm switch as far as the scheduler
 * is concerned.  No registers are touched.  We avoid
 * calling the CPU specific function when the mm hasn't
 * actually changed.
 */
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
	  struct task_struct *tsk)
{
99
#ifdef CONFIG_MMU
L
Linus Torvalds 已提交
100 101
	unsigned int cpu = smp_processor_id();

102 103 104 105 106
#ifdef CONFIG_SMP
	/* check for possible thread migration */
	if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask))
		__flush_icache_all();
#endif
R
Russell King 已提交
107
	if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
L
Linus Torvalds 已提交
108 109
		check_context(next);
		cpu_switch_mm(next->pgd, next);
110 111
		if (cache_is_vivt())
			cpu_clear(cpu, prev->cpu_vm_mask);
L
Linus Torvalds 已提交
112
	}
113
#endif
L
Linus Torvalds 已提交
114 115 116 117 118 119
}

#define deactivate_mm(tsk,mm)	do { } while (0)
#define activate_mm(prev,next)	switch_mm(prev, next, NULL)

#endif