system.h 6.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright IBM Corp. 1999, 2009
L
Linus Torvalds 已提交
3
 *
4
 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
L
Linus Torvalds 已提交
5 6 7 8 9 10
 */

#ifndef __ASM_SYSTEM_H
#define __ASM_SYSTEM_H

#include <linux/kernel.h>
H
Heiko Carstens 已提交
11
#include <linux/errno.h>
L
Linus Torvalds 已提交
12 13 14
#include <asm/types.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
15
#include <asm/processor.h>
16
#include <asm/lowcore.h>
17
#include <asm/cmpxchg.h>
L
Linus Torvalds 已提交
18 19 20 21 22 23

#ifdef __KERNEL__

struct task_struct;

extern struct task_struct *__switch_to(void *, void *);
M
Martin Schwidefsky 已提交
24
extern void update_per_regs(struct task_struct *task);
L
Linus Torvalds 已提交
25 26 27

static inline void save_fp_regs(s390_fp_regs *fpregs)
{
28
	asm volatile(
29 30 31 32 33
		"	std	0,%O0+8(%R0)\n"
		"	std	2,%O0+24(%R0)\n"
		"	std	4,%O0+40(%R0)\n"
		"	std	6,%O0+56(%R0)"
		: "=Q" (*fpregs) : "Q" (*fpregs));
L
Linus Torvalds 已提交
34 35 36
	if (!MACHINE_HAS_IEEE)
		return;
	asm volatile(
37 38 39 40 41 42 43 44 45 46 47 48 49 50
		"	stfpc	%0\n"
		"	std	1,%O0+16(%R0)\n"
		"	std	3,%O0+32(%R0)\n"
		"	std	5,%O0+48(%R0)\n"
		"	std	7,%O0+64(%R0)\n"
		"	std	8,%O0+72(%R0)\n"
		"	std	9,%O0+80(%R0)\n"
		"	std	10,%O0+88(%R0)\n"
		"	std	11,%O0+96(%R0)\n"
		"	std	12,%O0+104(%R0)\n"
		"	std	13,%O0+112(%R0)\n"
		"	std	14,%O0+120(%R0)\n"
		"	std	15,%O0+128(%R0)\n"
		: "=Q" (*fpregs) : "Q" (*fpregs));
L
Linus Torvalds 已提交
51 52 53 54
}

static inline void restore_fp_regs(s390_fp_regs *fpregs)
{
55
	asm volatile(
56 57 58 59 60
		"	ld	0,%O0+8(%R0)\n"
		"	ld	2,%O0+24(%R0)\n"
		"	ld	4,%O0+40(%R0)\n"
		"	ld	6,%O0+56(%R0)"
		: : "Q" (*fpregs));
L
Linus Torvalds 已提交
61 62 63
	if (!MACHINE_HAS_IEEE)
		return;
	asm volatile(
64 65 66 67 68 69 70 71 72 73 74 75 76 77
		"	lfpc	%0\n"
		"	ld	1,%O0+16(%R0)\n"
		"	ld	3,%O0+32(%R0)\n"
		"	ld	5,%O0+48(%R0)\n"
		"	ld	7,%O0+64(%R0)\n"
		"	ld	8,%O0+72(%R0)\n"
		"	ld	9,%O0+80(%R0)\n"
		"	ld	10,%O0+88(%R0)\n"
		"	ld	11,%O0+96(%R0)\n"
		"	ld	12,%O0+104(%R0)\n"
		"	ld	13,%O0+112(%R0)\n"
		"	ld	14,%O0+120(%R0)\n"
		"	ld	15,%O0+128(%R0)\n"
		: : "Q" (*fpregs));
L
Linus Torvalds 已提交
78 79 80 81
}

static inline void save_access_regs(unsigned int *acrs)
{
82
	asm volatile("stam 0,15,%0" : "=Q" (*acrs));
L
Linus Torvalds 已提交
83 84 85 86
}

static inline void restore_access_regs(unsigned int *acrs)
{
87
	asm volatile("lam 0,15,%0" : : "Q" (*acrs));
L
Linus Torvalds 已提交
88 89
}

90 91 92 93 94 95 96 97
#define switch_to(prev,next,last) do {					\
	if (prev->mm) {							\
		save_fp_regs(&prev->thread.fp_regs);			\
		save_access_regs(&prev->thread.acrs[0]);		\
	}								\
	if (next->mm) {							\
		restore_fp_regs(&next->thread.fp_regs);			\
		restore_access_regs(&next->thread.acrs[0]);		\
M
Martin Schwidefsky 已提交
98
		update_per_regs(next);					\
99 100
	}								\
	prev = __switch_to(prev,next);					\
L
Linus Torvalds 已提交
101 102
} while (0)

103
extern void account_vtime(struct task_struct *, struct task_struct *);
104
extern void account_tick_vtime(struct task_struct *);
L
Linus Torvalds 已提交
105

H
Heiko Carstens 已提交
106 107 108 109 110 111 112 113
#ifdef CONFIG_PFAULT
extern int pfault_init(void);
extern void pfault_fini(void);
#else /* CONFIG_PFAULT */
#define pfault_init()		({-1;})
#define pfault_fini()		do { } while (0)
#endif /* CONFIG_PFAULT */

114
extern void cmma_init(void);
115
extern int memcpy_real(void *, void *, size_t);
116
extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
117 118
extern int copy_to_user_real(void __user *dest, void *src, size_t count);
extern int copy_from_user_real(void *dest, void __user *src, size_t count);
119

120
#define finish_arch_switch(prev) do {					     \
L
Linus Torvalds 已提交
121
	set_fs(current->thread.mm_segment);				     \
122
	account_vtime(prev, current);					     \
L
Linus Torvalds 已提交
123 124
} while (0)

125
#define nop() asm volatile("nop")
L
Linus Torvalds 已提交
126 127 128 129 130 131 132 133 134 135 136

/*
 * Force strict CPU ordering.
 * And yes, this is required on UP too when we're talking
 * to devices.
 *
 * This is very similar to the ppc eieio/sync instruction in that is
 * does a checkpoint syncronisation & makes sure that 
 * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
 */

137 138
#define eieio()	asm volatile("bcr 15,0" : : : "memory")
#define SYNC_OTHER_CORES(x)   eieio()
L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
#define mb()    eieio()
#define rmb()   eieio()
#define wmb()   eieio()
#define read_barrier_depends() do { } while(0)
#define smp_mb()       mb()
#define smp_rmb()      rmb()
#define smp_wmb()      wmb()
#define smp_read_barrier_depends()    read_barrier_depends()
#define smp_mb__before_clear_bit()     smp_mb()
#define smp_mb__after_clear_bit()      smp_mb()


#define set_mb(var, value)      do { var = value; mb(); } while (0)

#ifdef __s390x__

155 156 157
#define __ctl_load(array, low, high) ({				\
	typedef struct { char _[sizeof(array)]; } addrtype;	\
	asm volatile(						\
158 159 160
		"	lctlg	%1,%2,%0\n"			\
		: : "Q" (*(addrtype *)(&array)),		\
		    "i" (low), "i" (high));			\
L
Linus Torvalds 已提交
161 162
	})

163 164 165
#define __ctl_store(array, low, high) ({			\
	typedef struct { char _[sizeof(array)]; } addrtype;	\
	asm volatile(						\
166 167 168
		"	stctg	%1,%2,%0\n"			\
		: "=Q" (*(addrtype *)(&array))			\
		: "i" (low), "i" (high));			\
L
Linus Torvalds 已提交
169 170 171 172
	})

#else /* __s390x__ */

173 174 175
#define __ctl_load(array, low, high) ({				\
	typedef struct { char _[sizeof(array)]; } addrtype;	\
	asm volatile(						\
176 177 178
		"	lctl	%1,%2,%0\n"			\
		: : "Q" (*(addrtype *)(&array)),		\
		    "i" (low), "i" (high));			\
179
})
L
Linus Torvalds 已提交
180

181 182 183
#define __ctl_store(array, low, high) ({			\
	typedef struct { char _[sizeof(array)]; } addrtype;	\
	asm volatile(						\
184 185 186
		"	stctl	%1,%2,%0\n"			\
		: "=Q" (*(addrtype *)(&array))			\
		: "i" (low), "i" (high));			\
L
Linus Torvalds 已提交
187 188 189 190
	})

#endif /* __s390x__ */

191 192 193 194 195 196 197 198 199 200 201 202 203 204
#define __ctl_set_bit(cr, bit) ({	\
	unsigned long __dummy;		\
	__ctl_store(__dummy, cr, cr);	\
	__dummy |= 1UL << (bit);	\
	__ctl_load(__dummy, cr, cr);	\
})

#define __ctl_clear_bit(cr, bit) ({	\
	unsigned long __dummy;		\
	__ctl_store(__dummy, cr, cr);	\
	__dummy &= ~(1UL << (bit));	\
	__ctl_load(__dummy, cr, cr);	\
})

205 206 207 208 209 210 211
/*
 * Use to set psw mask except for the first byte which
 * won't be changed by this function.
 */
static inline void
__set_psw_mask(unsigned long mask)
{
D
David Howells 已提交
212
	__load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
213 214
}

215 216 217 218
#define local_mcck_enable() \
	__set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
#define local_mcck_disable() \
	__set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
219

L
Linus Torvalds 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233
#ifdef CONFIG_SMP

extern void smp_ctl_set_bit(int cr, int bit);
extern void smp_ctl_clear_bit(int cr, int bit);
#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)

#else

#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)

#endif /* CONFIG_SMP */

234
#define MAX_FACILITY_BIT (256*8)	/* stfle_fac_list has 256 bytes */
235

236 237 238 239 240 241
/*
 * The test_facility function uses the bit odering where the MSB is bit 0.
 * That makes it easier to query facility bits with the bit number as
 * documented in the Principles of Operation.
 */
static inline int test_facility(unsigned long nr)
H
Heiko Carstens 已提交
242
{
243
	unsigned char *ptr;
H
Heiko Carstens 已提交
244

245 246 247 248
	if (nr >= MAX_FACILITY_BIT)
		return 0;
	ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
	return (*ptr & (0x80 >> (nr & 7))) != 0;
H
Heiko Carstens 已提交
249 250
}

251 252 253 254 255 256 257 258
static inline unsigned short stap(void)
{
	unsigned short cpu_address;

	asm volatile("stap %0" : "=m" (cpu_address));
	return cpu_address;
}

L
Linus Torvalds 已提交
259 260 261 262
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);

263
extern unsigned long arch_align_stack(unsigned long sp);
L
Linus Torvalds 已提交
264

265 266 267 268 269 270 271 272 273 274 275 276 277 278
static inline int tprot(unsigned long addr)
{
	int rc = -EFAULT;

	asm volatile(
		"	tprot	0(%1),0\n"
		"0:	ipm	%0\n"
		"	srl	%0,28\n"
		"1:\n"
		EX_TABLE(0b,1b)
		: "+d" (rc) : "a" (addr) : "cc");
	return rc;
}

L
Linus Torvalds 已提交
279 280 281
#endif /* __KERNEL__ */

#endif