mmu_context.h 2.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 *  S390 version
 *
 *  Derived from "include/asm-i386/mmu_context.h"
 */

#ifndef __S390_MMU_CONTEXT_H
#define __S390_MMU_CONTEXT_H

G
Gerald Schaefer 已提交
10
#include <asm/pgalloc.h>
11
#include <asm/uaccess.h>
12
#include <asm/tlbflush.h>
13
#include <asm/ctl_reg.h>
14

15 16 17
static inline int init_new_context(struct task_struct *tsk,
				   struct mm_struct *mm)
{
18 19
	atomic_set(&mm->context.attach_count, 0);
	mm->context.flush_mm = 0;
20
	mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
21
#ifdef CONFIG_64BIT
M
Martin Schwidefsky 已提交
22
	mm->context.asce_bits |= _ASCE_TYPE_REGION3;
23
#endif
24
	mm->context.has_pgste = 0;
M
Martin Schwidefsky 已提交
25 26
	mm->context.asce_limit = STACK_TOP_MAX;
	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
27 28
	return 0;
}
L
Linus Torvalds 已提交
29 30 31

#define destroy_context(mm)             do { } while (0)

32
#ifndef CONFIG_64BIT
G
Gerald Schaefer 已提交
33 34 35 36 37
#define LCTL_OPCODE "lctl"
#else
#define LCTL_OPCODE "lctlg"
#endif

38
static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
L
Linus Torvalds 已提交
39
{
40 41 42
	pgd_t *pgd = mm->pgd;

	S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
43
	if (s390_user_mode != HOME_SPACE_MODE) {
44 45
		/* Load primary space page table origin. */
		asm volatile(LCTL_OPCODE" 1,1,%0\n"
46
			     : : "m" (S390_lowcore.user_asce) );
47 48 49 50
	} else
		/* Load home space page table origin. */
		asm volatile(LCTL_OPCODE" 13,13,%0"
			     : : "m" (S390_lowcore.user_asce) );
M
Martin Schwidefsky 已提交
51
	set_fs(current->thread.mm_segment);
L
Linus Torvalds 已提交
52 53 54
}

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
G
Gerald Schaefer 已提交
55
			     struct task_struct *tsk)
L
Linus Torvalds 已提交
56
{
57
	cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
58
	update_mm(next, tsk);
59 60 61 62
	atomic_dec(&prev->context.attach_count);
	WARN_ON(atomic_read(&prev->context.attach_count) < 0);
	atomic_inc(&next->context.attach_count);
	/* Check for TLBs not flushed yet */
63
	__tlb_flush_mm_lazy(next);
L
Linus Torvalds 已提交
64 65
}

66
#define enter_lazy_tlb(mm,tsk)	do { } while (0)
L
Linus Torvalds 已提交
67 68
#define deactivate_mm(tsk,mm)	do { } while (0)

69
static inline void activate_mm(struct mm_struct *prev,
L
Linus Torvalds 已提交
70 71 72 73 74
                               struct mm_struct *next)
{
        switch_mm(prev, next, current);
}

75 76 77 78 79 80 81 82 83 84 85 86 87
static inline void arch_dup_mmap(struct mm_struct *oldmm,
				 struct mm_struct *mm)
{
#ifdef CONFIG_64BIT
	if (oldmm->context.asce_limit < mm->context.asce_limit)
		crst_table_downgrade(mm, oldmm->context.asce_limit);
#endif
}

static inline void arch_exit_mmap(struct mm_struct *mm)
{
}

G
Gerald Schaefer 已提交
88
#endif /* __S390_MMU_CONTEXT_H */