entry.S 36.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *    S390 low-level entry points.
 *
4
 *    Copyright IBM Corp. 1999, 2012
L
Linus Torvalds 已提交
5
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
H
Heiko Carstens 已提交
6 7
 *		 Hartmut Penner (hp@de.ibm.com),
 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8
 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
L
Linus Torvalds 已提交
9 10
 */

11
#include <linux/init.h>
12
#include <linux/linkage.h>
13
#include <asm/processor.h>
L
Linus Torvalds 已提交
14 15 16 17
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
18
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
19 20
#include <asm/unistd.h>
#include <asm/page.h>
21
#include <asm/sigp.h>
22
#include <asm/irq.h>
23 24
#include <asm/fpu-internal.h>
#include <asm/vx-insn.h>
L
Linus Torvalds 已提交
25

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
__PT_R0      =	__PT_GPRS
__PT_R1      =	__PT_GPRS + 8
__PT_R2      =	__PT_GPRS + 16
__PT_R3      =	__PT_GPRS + 24
__PT_R4      =	__PT_GPRS + 32
__PT_R5      =	__PT_GPRS + 40
__PT_R6      =	__PT_GPRS + 48
__PT_R7      =	__PT_GPRS + 56
__PT_R8      =	__PT_GPRS + 64
__PT_R9      =	__PT_GPRS + 72
__PT_R10     =	__PT_GPRS + 80
__PT_R11     =	__PT_GPRS + 88
__PT_R12     =	__PT_GPRS + 96
__PT_R13     =	__PT_GPRS + 104
__PT_R14     =	__PT_GPRS + 112
__PT_R15     =	__PT_GPRS + 120
L
Linus Torvalds 已提交
42 43 44

STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE  = 1 << STACK_SHIFT
45
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
L
Linus Torvalds 已提交
46

47 48
_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
		   _TIF_UPROBE)
49 50
_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
		   _TIF_SYSCALL_TRACEPOINT)
51
_CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU)
52
_PIF_WORK	= (_PIF_PER_TRAP)
L
Linus Torvalds 已提交
53

54
#define BASED(name) name-cleanup_critical(%r13)
L
Linus Torvalds 已提交
55

56
	.macro	TRACE_IRQS_ON
57
#ifdef CONFIG_TRACE_IRQFLAGS
58 59
	basr	%r2,%r0
	brasl	%r14,trace_hardirqs_on_caller
60
#endif
61 62 63
	.endm

	.macro	TRACE_IRQS_OFF
64
#ifdef CONFIG_TRACE_IRQFLAGS
65 66
	basr	%r2,%r0
	brasl	%r14,trace_hardirqs_off_caller
67
#endif
68
	.endm
69 70

	.macro	LOCKDEP_SYS_EXIT
71 72 73
#ifdef CONFIG_LOCKDEP
	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
	jz	.+10
74
	brasl	%r14,lockdep_sys_exit
75
#endif
L
Linus Torvalds 已提交
76 77
	.endm

78
	.macro	CHECK_STACK stacksize,savearea
79
#ifdef CONFIG_CHECK_STACK
80 81 82
	tml	%r15,\stacksize - CONFIG_STACK_GUARD
	lghi	%r14,\savearea
	jz	stack_overflow
83 84 85
#endif
	.endm

86
	.macro	SWITCH_ASYNC savearea,timer
87 88 89 90 91
	tmhh	%r8,0x0001		# interrupting from user ?
	jnz	1f
	lgr	%r14,%r9
	slg	%r14,BASED(.Lcritical_start)
	clg	%r14,BASED(.Lcritical_length)
L
Linus Torvalds 已提交
92
	jhe	0f
93
	lghi	%r11,\savearea		# inside critical section, do cleanup
L
Linus Torvalds 已提交
94
	brasl	%r14,cleanup_critical
95
	tmhh	%r8,0x0001		# retest problem state after cleanup
L
Linus Torvalds 已提交
96
	jnz	1f
97
0:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async stack?
L
Linus Torvalds 已提交
98
	slgr	%r14,%r15
99
	srag	%r14,%r14,STACK_SHIFT
100
	jnz	2f
101
	CHECK_STACK 1<<STACK_SHIFT,\savearea
102
	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
103 104 105
	j	3f
1:	LAST_BREAK %r14
	UPDATE_VTIME %r14,%r15,\timer
106
2:	lg	%r15,__LC_ASYNC_STACK	# load async stack
107
3:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
H
Heiko Carstens 已提交
108
	.endm
L
Linus Torvalds 已提交
109

110 111 112 113 114 115 116 117 118
	.macro UPDATE_VTIME w1,w2,enter_timer
	lg	\w1,__LC_EXIT_TIMER
	lg	\w2,__LC_LAST_UPDATE_TIMER
	slg	\w1,\enter_timer
	slg	\w2,__LC_EXIT_TIMER
	alg	\w1,__LC_USER_TIMER
	alg	\w2,__LC_SYSTEM_TIMER
	stg	\w1,__LC_USER_TIMER
	stg	\w2,__LC_SYSTEM_TIMER
119
	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
L
Linus Torvalds 已提交
120 121
	.endm

122 123 124 125
	.macro	LAST_BREAK scratch
	srag	\scratch,%r10,23
	jz	.+10
	stg	%r10,__TI_last_break(%r12)
126 127
	.endm

128
	.macro REENABLE_IRQS
129 130 131
	stg	%r8,__LC_RETURN_PSW
	ni	__LC_RETURN_PSW,0xbf
	ssm	__LC_RETURN_PSW
132 133
	.endm

134
	.macro STCK savearea
135
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
136 137 138 139 140 141
	.insn	s,0xb27c0000,\savearea		# store clock fast
#else
	.insn	s,0xb2050000,\savearea		# store clock
#endif
	.endm

142 143
	.section .kprobes.text, "ax"

L
Linus Torvalds 已提交
144 145 146 147 148 149 150
/*
 * Scheduler resume function, called by switch_to
 *  gpr2 = (task_struct *) prev
 *  gpr3 = (task_struct *) next
 * Returns:
 *  gpr2 = prev
 */
151
ENTRY(__switch_to)
152
	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
153 154 155 156 157 158 159
	lgr	%r1,%r2
	aghi	%r1,__TASK_thread		# thread_struct of prev task
	lg	%r4,__TASK_thread_info(%r2)	# get thread_info of prev
	lg	%r5,__TASK_thread_info(%r3)	# get thread_info of next
	stg	%r15,__THREAD_ksp(%r1)		# store kernel stack of prev
	lgr	%r1,%r3
	aghi	%r1,__TASK_thread		# thread_struct of next task
160
	lgr	%r15,%r5
161
	aghi	%r15,STACK_INIT			# end of kernel stack of next
162 163 164
	stg	%r3,__LC_CURRENT		# store task struct of next
	stg	%r5,__LC_THREAD_INFO		# store thread info of next
	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
165
	lg	%r15,__THREAD_ksp(%r1)		# load kernel stack of next
166 167
	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
	mvc	__LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
168
	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
L
Linus Torvalds 已提交
169 170
	br	%r14

171
.L__critical_start:
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234

#if IS_ENABLED(CONFIG_KVM)
/*
 * sie64a calling convention:
 * %r2 pointer to sie control block
 * %r3 guest register save area
 */
ENTRY(sie64a)
	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
	xc	__SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
	tm	__LC_CPU_FLAGS+7,_CIF_FPU	# load guest fp/vx registers ?
	jno	.Lsie_load_guest_gprs
	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
.Lsie_load_guest_gprs:
	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
	lg	%r14,__LC_GMAP			# get gmap pointer
	ltgr	%r14,%r14
	jz	.Lsie_gmap
	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
.Lsie_gmap:
	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
	tm	__SIE_PROG20+3(%r14),3		# last exit...
	jnz	.Lsie_skip
	tm	__LC_CPU_FLAGS+7,_CIF_FPU
	jo	.Lsie_skip			# exit if fp/vx regs changed
	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_LPP
	jz	.Lsie_enter
	.insn	s,0xb2800000,__SF_EMPTY(%r15)	# set guest id
.Lsie_enter:
	sie	0(%r14)
	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_LPP
	jz	.Lsie_skip
	.insn	s,0xb2800000,__SF_EMPTY+16(%r15)# set host id
.Lsie_skip:
	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
.Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
# instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
# See also .Lcleanup_sie
.Lrewind_pad:
	nop	0
	.globl sie_exit
sie_exit:
	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
	lg	%r2,__SF_EMPTY+24(%r15)		# return exit reason code
	br	%r14
.Lsie_fault:
	lghi	%r14,-EFAULT
	stg	%r14,__SF_EMPTY+24(%r15)	# set exit reason code
	j	sie_exit

	EX_TABLE(.Lrewind_pad,.Lsie_fault)
	EX_TABLE(sie_exit,.Lsie_fault)
#endif

L
Linus Torvalds 已提交
235 236 237 238 239
/*
 * SVC interrupt handler routine. System calls are synchronous events and
 * are executed with interrupts enabled.
 */

240
ENTRY(system_call)
M
Martin Schwidefsky 已提交
241
	stpt	__LC_SYNC_ENTER_TIMER
242
.Lsysc_stmg:
243 244 245
	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
	lg	%r10,__LC_LAST_BREAK
	lg	%r12,__LC_THREAD_INFO
246
	lghi	%r14,_PIF_SYSCALL
247
.Lsysc_per:
248 249 250
	lg	%r15,__LC_KERNEL_STACK
	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
	LAST_BREAK %r13
251 252
.Lsysc_vtime:
	UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
253 254 255
	stmg	%r0,%r7,__PT_R0(%r11)
	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
M
Martin Schwidefsky 已提交
256
	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
257
	stg	%r14,__PT_FLAGS(%r11)
258
.Lsysc_do_svc:
259
	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
M
Martin Schwidefsky 已提交
260
	llgh	%r8,__PT_INT_CODE+2(%r11)
261
	slag	%r8,%r8,2			# shift and test for svc 0
262
	jnz	.Lsysc_nr_ok
L
Linus Torvalds 已提交
263
	# svc 0: system call number in %r1
264
	llgfr	%r1,%r1				# clear high word in r1
265
	cghi	%r1,NR_syscalls
266
	jnl	.Lsysc_nr_ok
M
Martin Schwidefsky 已提交
267
	sth	%r1,__PT_INT_CODE+2(%r11)
268
	slag	%r8,%r1,2
269
.Lsysc_nr_ok:
270 271 272 273
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
	stg	%r2,__PT_ORIG_GPR2(%r11)
	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
	lgf	%r9,0(%r8,%r10)			# get system call add.
274
	tm	__TI_flags+7(%r12),_TIF_TRACE
275
	jnz	.Lsysc_tracesys
276 277
	basr	%r14,%r9			# call sys_xxxx
	stg	%r2,__PT_R2(%r11)		# store return value
L
Linus Torvalds 已提交
278

279
.Lsysc_return:
280
	LOCKDEP_SYS_EXIT
281
.Lsysc_tif:
282
	tm	__PT_FLAGS+7(%r11),_PIF_WORK
283
	jnz	.Lsysc_work
284
	tm	__TI_flags+7(%r12),_TIF_WORK
285
	jnz	.Lsysc_work			# check for work
286
	tm	__LC_CPU_FLAGS+7,_CIF_WORK
287 288
	jnz	.Lsysc_work
.Lsysc_restore:
289 290 291 292 293 294 295
	lg	%r14,__LC_VDSO_PER_CPU
	lmg	%r0,%r10,__PT_R0(%r11)
	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
	stpt	__LC_EXIT_TIMER
	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
	lmg	%r11,%r15,__PT_R11(%r11)
	lpswe	__LC_RETURN_PSW
296
.Lsysc_done:
297

298 299 300
#
# One of the work bits is on. Find out which one.
#
301
.Lsysc_work:
302
	tm	__LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
303
	jo	.Lsysc_mcck_pending
304
	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
305
	jo	.Lsysc_reschedule
306 307
#ifdef CONFIG_UPROBES
	tm	__TI_flags+7(%r12),_TIF_UPROBE
308
	jo	.Lsysc_uprobe_notify
309
#endif
310
	tm	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
311
	jo	.Lsysc_singlestep
312
	tm	__TI_flags+7(%r12),_TIF_SIGPENDING
313
	jo	.Lsysc_sigpending
314
	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
315
	jo	.Lsysc_notify_resume
316 317
	tm	__LC_CPU_FLAGS+7,_CIF_FPU
	jo	.Lsysc_vxrs
318
	tm	__LC_CPU_FLAGS+7,_CIF_ASCE
319 320
	jo	.Lsysc_uaccess
	j	.Lsysc_return		# beware of critical section cleanup
L
Linus Torvalds 已提交
321 322 323

#
# _TIF_NEED_RESCHED is set, call schedule
H
Heiko Carstens 已提交
324
#
325 326
.Lsysc_reschedule:
	larl	%r14,.Lsysc_return
327
	jg	schedule
L
Linus Torvalds 已提交
328

329
#
330
# _CIF_MCCK_PENDING is set, call handler
331
#
332 333
.Lsysc_mcck_pending:
	larl	%r14,.Lsysc_return
H
Heiko Carstens 已提交
334
	jg	s390_handle_mcck	# TIF bit will be cleared by handler
335

336
#
337
# _CIF_ASCE is set, load user space asce
338
#
339
.Lsysc_uaccess:
340
	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
341
	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
342
	j	.Lsysc_return
343

344 345 346 347 348 349 350
#
# CIF_FPU is set, restore floating-point controls and floating-point registers.
#
.Lsysc_vxrs:
	larl	%r14,.Lsysc_return
	jg	load_fpu_regs

L
Linus Torvalds 已提交
351
#
352
# _TIF_SIGPENDING is set, call do_signal
L
Linus Torvalds 已提交
353
#
354
.Lsysc_sigpending:
355 356
	lgr	%r2,%r11		# pass pointer to pt_regs
	brasl	%r14,do_signal
357
	tm	__PT_FLAGS+7(%r11),_PIF_SYSCALL
358
	jno	.Lsysc_return
359
	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
360
	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
361
	lghi	%r8,0			# svc 0 returns -ENOSYS
362
	llgh	%r1,__PT_INT_CODE+2(%r11)	# load new svc number
363
	cghi	%r1,NR_syscalls
364
	jnl	.Lsysc_nr_ok		# invalid svc number -> do svc 0
365
	slag	%r8,%r1,2
366
	j	.Lsysc_nr_ok		# restart svc
L
Linus Torvalds 已提交
367

M
Martin Schwidefsky 已提交
368 369 370
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
371
.Lsysc_notify_resume:
372
	lgr	%r2,%r11		# pass pointer to pt_regs
373
	larl	%r14,.Lsysc_return
374
	jg	do_notify_resume
M
Martin Schwidefsky 已提交
375

376 377 378 379
#
# _TIF_UPROBE is set, call uprobe_notify_resume
#
#ifdef CONFIG_UPROBES
380
.Lsysc_uprobe_notify:
381
	lgr	%r2,%r11		# pass pointer to pt_regs
382
	larl	%r14,.Lsysc_return
383 384 385
	jg	uprobe_notify_resume
#endif

L
Linus Torvalds 已提交
386
#
387
# _PIF_PER_TRAP is set, call do_per_trap
L
Linus Torvalds 已提交
388
#
389
.Lsysc_singlestep:
390
	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
391
	lgr	%r2,%r11		# pass pointer to pt_regs
392
	larl	%r14,.Lsysc_return
M
Martin Schwidefsky 已提交
393
	jg	do_per_trap
L
Linus Torvalds 已提交
394 395

#
M
Martin Schwidefsky 已提交
396 397
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
# and after the system call
L
Linus Torvalds 已提交
398
#
399
.Lsysc_tracesys:
400
	lgr	%r2,%r11		# pass pointer to pt_regs
L
Linus Torvalds 已提交
401
	la	%r3,0
M
Martin Schwidefsky 已提交
402
	llgh	%r0,__PT_INT_CODE+2(%r11)
403
	stg	%r0,__PT_R2(%r11)
M
Martin Schwidefsky 已提交
404
	brasl	%r14,do_syscall_trace_enter
L
Linus Torvalds 已提交
405
	lghi	%r0,NR_syscalls
M
Martin Schwidefsky 已提交
406
	clgr	%r0,%r2
407
	jnh	.Lsysc_tracenogo
408 409
	sllg	%r8,%r2,2
	lgf	%r9,0(%r8,%r10)
410
.Lsysc_tracego:
411 412 413 414 415
	lmg	%r3,%r7,__PT_R3(%r11)
	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
	lg	%r2,__PT_ORIG_GPR2(%r11)
	basr	%r14,%r9		# call sys_xxx
	stg	%r2,__PT_R2(%r11)	# store return value
416
.Lsysc_tracenogo:
417
	tm	__TI_flags+7(%r12),_TIF_TRACE
418
	jz	.Lsysc_return
419
	lgr	%r2,%r11		# pass pointer to pt_regs
420
	larl	%r14,.Lsysc_return
M
Martin Schwidefsky 已提交
421
	jg	do_syscall_trace_exit
L
Linus Torvalds 已提交
422 423 424 425

#
# a new process exits the kernel with ret_from_fork
#
426
ENTRY(ret_from_fork)
427 428
	la	%r11,STACK_FRAME_OVERHEAD(%r15)
	lg	%r12,__LC_THREAD_INFO
429 430 431
	brasl	%r14,schedule_tail
	TRACE_IRQS_ON
	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
432
	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
433
	jne	.Lsysc_tracenogo
434 435
	# it's a kernel thread
	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
436 437 438
ENTRY(kernel_thread_starter)
	la	%r2,0(%r10)
	basr	%r14,%r9
439
	j	.Lsysc_tracenogo
L
Linus Torvalds 已提交
440 441 442 443 444

/*
 * Program check handler routine
 */

445
ENTRY(pgm_check_handler)
M
Martin Schwidefsky 已提交
446
	stpt	__LC_SYNC_ENTER_TIMER
447 448 449
	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
	lg	%r10,__LC_LAST_BREAK
	lg	%r12,__LC_THREAD_INFO
450
	larl	%r13,cleanup_critical
451 452
	lmg	%r8,%r9,__LC_PGM_OLD_PSW
	tmhh	%r8,0x0001		# test problem state bit
453 454 455 456 457 458 459 460 461 462 463
	jnz	2f			# -> fault in user space
#if IS_ENABLED(CONFIG_KVM)
	# cleanup critical section for sie64a
	lgr	%r14,%r9
	slg	%r14,BASED(.Lsie_critical_start)
	clg	%r14,BASED(.Lsie_critical_length)
	jhe	0f
	brasl	%r14,.Lcleanup_sie
#endif
0:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
	jnz	1f			# -> enabled, can't be a double fault
464
	tm	__LC_PGM_ILC+3,0x80	# check for per exception
465
	jnz	.Lpgm_svcper		# -> single stepped svc
466
1:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
467
	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
468
	j	3f
469 470
2:	LAST_BREAK %r14
	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
471
	lg	%r15,__LC_KERNEL_STACK
472
	lg	%r14,__TI_task(%r12)
473
	aghi	%r14,__TASK_thread	# pointer to thread_struct
474 475
	lghi	%r13,__LC_PGM_TDB
	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
476
	jz	3f
477
	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
478
3:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
479 480 481
	stmg	%r0,%r7,__PT_R0(%r11)
	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
	stmg	%r8,%r9,__PT_PSW(%r11)
M
Martin Schwidefsky 已提交
482 483
	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
484
	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
485 486
	stg	%r10,__PT_ARGS(%r11)
	tm	__LC_PGM_ILC+3,0x80	# check for per exception
487
	jz	4f
488
	tmhh	%r8,0x0001		# kernel per event ?
489
	jz	.Lpgm_kprobe
490
	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
491
	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
492 493
	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
494
4:	REENABLE_IRQS
495
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
496
	larl	%r1,pgm_check_table
M
Martin Schwidefsky 已提交
497 498
	llgh	%r10,__PT_INT_CODE+2(%r11)
	nill	%r10,0x007f
499
	sll	%r10,2
500
	je	.Lpgm_return
501
	lgf	%r1,0(%r10,%r1)		# load address of handler routine
502
	lgr	%r2,%r11		# pass pointer to pt_regs
503
	basr	%r14,%r1		# branch to interrupt-handler
504 505 506 507 508
.Lpgm_return:
	LOCKDEP_SYS_EXIT
	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
	jno	.Lsysc_restore
	j	.Lsysc_tif
L
Linus Torvalds 已提交
509 510

#
511
# PER event in supervisor state, must be kprobes
L
Linus Torvalds 已提交
512
#
513
.Lpgm_kprobe:
514 515 516 517
	REENABLE_IRQS
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
	lgr	%r2,%r11		# pass pointer to pt_regs
	brasl	%r14,do_per_trap
518
	j	.Lpgm_return
L
Linus Torvalds 已提交
519

M
Michael Grundy 已提交
520
#
521
# single stepped system call
M
Michael Grundy 已提交
522
#
523
.Lpgm_svcper:
524
	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
525
	larl	%r14,.Lsysc_per
526
	stg	%r14,__LC_RETURN_PSW+8
527
	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
528
	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per and enable irqs
M
Michael Grundy 已提交
529

L
Linus Torvalds 已提交
530 531 532
/*
 * IO interrupt handler routine
 */
533
ENTRY(io_int_handler)
534
	STCK	__LC_INT_CLOCK
535
	stpt	__LC_ASYNC_ENTER_TIMER
536 537 538
	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
	lg	%r10,__LC_LAST_BREAK
	lg	%r12,__LC_THREAD_INFO
539
	larl	%r13,cleanup_critical
540
	lmg	%r8,%r9,__LC_IO_OLD_PSW
541
	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
542 543 544
	stmg	%r0,%r7,__PT_R0(%r11)
	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
	stmg	%r8,%r9,__PT_PSW(%r11)
545
	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
546
	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
547
	TRACE_IRQS_OFF
548
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
549
.Lio_loop:
550
	lgr	%r2,%r11		# pass pointer to pt_regs
551 552
	lghi	%r3,IO_INTERRUPT
	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
553
	jz	.Lio_call
554
	lghi	%r3,THIN_INTERRUPT
555
.Lio_call:
556
	brasl	%r14,do_IRQ
557
	tm	__LC_MACHINE_FLAGS+6,0x10	# MACHINE_FLAG_LPAR
558
	jz	.Lio_return
559
	tpi	0
560
	jz	.Lio_return
561
	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
562 563
	j	.Lio_loop
.Lio_return:
564 565
	LOCKDEP_SYS_EXIT
	TRACE_IRQS_ON
566
.Lio_tif:
567
	tm	__TI_flags+7(%r12),_TIF_WORK
568
	jnz	.Lio_work		# there is work to do (signals etc.)
569
	tm	__LC_CPU_FLAGS+7,_CIF_WORK
570 571
	jnz	.Lio_work
.Lio_restore:
572 573 574 575 576 577 578
	lg	%r14,__LC_VDSO_PER_CPU
	lmg	%r0,%r10,__PT_R0(%r11)
	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
	stpt	__LC_EXIT_TIMER
	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
	lmg	%r11,%r15,__PT_R11(%r11)
	lpswe	__LC_RETURN_PSW
579
.Lio_done:
L
Linus Torvalds 已提交
580

581
#
582
# There is work todo, find out in which context we have been interrupted:
583
# 1) if we return to user space we can do all _TIF_WORK work
584 585 586 587 588
# 2) if we return to kernel code and kvm is enabled check if we need to
#    modify the psw to leave SIE
# 3) if we return to kernel code and preemptive scheduling is enabled check
#    the preemption counter and if it is zero call preempt_schedule_irq
# Before any work can be done, a switch to the kernel stack is required.
589
#
590
.Lio_work:
591
	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
592
	jo	.Lio_work_user		# yes -> do resched & signal
593
#ifdef CONFIG_PREEMPT
594
	# check for preemptive scheduling
595
	icm	%r0,15,__TI_precount(%r12)
596
	jnz	.Lio_restore		# preemption is disabled
597
	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
598
	jno	.Lio_restore
L
Linus Torvalds 已提交
599
	# switch to kernel stack
600 601 602 603 604
	lg	%r1,__PT_R15(%r11)
	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
	la	%r11,STACK_FRAME_OVERHEAD(%r1)
L
Linus Torvalds 已提交
605
	lgr	%r15,%r1
606
	# TRACE_IRQS_ON already done at .Lio_return, call
607 608 609
	# TRACE_IRQS_OFF to keep things symmetrical
	TRACE_IRQS_OFF
	brasl	%r14,preempt_schedule_irq
610
	j	.Lio_return
611
#else
612
	j	.Lio_restore
613
#endif
L
Linus Torvalds 已提交
614

615 616 617
#
# Need to do work before returning to userspace, switch to kernel stack
#
618
.Lio_work_user:
L
Linus Torvalds 已提交
619
	lg	%r1,__LC_KERNEL_STACK
620 621 622
	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
	la	%r11,STACK_FRAME_OVERHEAD(%r1)
L
Linus Torvalds 已提交
623
	lgr	%r15,%r1
624

L
Linus Torvalds 已提交
625 626 627
#
# One of the work bits is on. Find out which one.
#
628
.Lio_work_tif:
629
	tm	__LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
630
	jo	.Lio_mcck_pending
631
	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
632
	jo	.Lio_reschedule
633
	tm	__TI_flags+7(%r12),_TIF_SIGPENDING
634
	jo	.Lio_sigpending
635
	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
636
	jo	.Lio_notify_resume
637 638
	tm	__LC_CPU_FLAGS+7,_CIF_FPU
	jo	.Lio_vxrs
639
	tm	__LC_CPU_FLAGS+7,_CIF_ASCE
640 641
	jo	.Lio_uaccess
	j	.Lio_return		# beware of critical section cleanup
642

643
#
644
# _CIF_MCCK_PENDING is set, call handler
645
#
646 647
.Lio_mcck_pending:
	# TRACE_IRQS_ON already done at .Lio_return
H
Heiko Carstens 已提交
648
	brasl	%r14,s390_handle_mcck	# TIF bit will be cleared by handler
649
	TRACE_IRQS_OFF
650
	j	.Lio_return
651

652
#
653
# _CIF_ASCE is set, load user space asce
654
#
655
.Lio_uaccess:
656
	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
657
	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
658
	j	.Lio_return
659

660 661 662 663 664 665 666
#
# CIF_FPU is set, restore floating-point controls and floating-point registers.
#
.Lio_vxrs:
	larl	%r14,.Lio_return
	jg	load_fpu_regs

L
Linus Torvalds 已提交
667 668
#
# _TIF_NEED_RESCHED is set, call schedule
H
Heiko Carstens 已提交
669
#
670 671
.Lio_reschedule:
	# TRACE_IRQS_ON already done at .Lio_return
672
	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
H
Heiko Carstens 已提交
673
	brasl	%r14,schedule		# call scheduler
674
	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
675
	TRACE_IRQS_OFF
676
	j	.Lio_return
L
Linus Torvalds 已提交
677 678

#
679
# _TIF_SIGPENDING or is set, call do_signal
L
Linus Torvalds 已提交
680
#
681 682
.Lio_sigpending:
	# TRACE_IRQS_ON already done at .Lio_return
683 684 685 686
	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
	lgr	%r2,%r11		# pass pointer to pt_regs
	brasl	%r14,do_signal
	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
687
	TRACE_IRQS_OFF
688
	j	.Lio_return
L
Linus Torvalds 已提交
689

M
Martin Schwidefsky 已提交
690 691 692
#
# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
#
693 694
.Lio_notify_resume:
	# TRACE_IRQS_ON already done at .Lio_return
695 696 697 698
	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
	lgr	%r2,%r11		# pass pointer to pt_regs
	brasl	%r14,do_notify_resume
	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
M
Martin Schwidefsky 已提交
699
	TRACE_IRQS_OFF
700
	j	.Lio_return
M
Martin Schwidefsky 已提交
701

L
Linus Torvalds 已提交
702 703 704
/*
 * External interrupt handler routine
 */
705
ENTRY(ext_int_handler)
706
	STCK	__LC_INT_CLOCK
707
	stpt	__LC_ASYNC_ENTER_TIMER
708 709 710
	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
	lg	%r10,__LC_LAST_BREAK
	lg	%r12,__LC_THREAD_INFO
711
	larl	%r13,cleanup_critical
712
	lmg	%r8,%r9,__LC_EXT_OLD_PSW
713
	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
714 715 716
	stmg	%r0,%r7,__PT_R0(%r11)
	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
	stmg	%r8,%r9,__PT_PSW(%r11)
717 718 719 720
	lghi	%r1,__LC_EXT_PARAMS2
	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
721
	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
722
	TRACE_IRQS_OFF
723
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
724
	lgr	%r2,%r11		# pass pointer to pt_regs
725 726
	lghi	%r3,EXT_INTERRUPT
	brasl	%r14,do_IRQ
727
	j	.Lio_return
L
Linus Torvalds 已提交
728

M
Martin Schwidefsky 已提交
729
/*
730
 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
M
Martin Schwidefsky 已提交
731 732
 */
ENTRY(psw_idle)
733
	stg	%r3,__SF_EMPTY(%r15)
734
	larl	%r1,.Lpsw_idle_lpsw+4
M
Martin Schwidefsky 已提交
735
	stg	%r1,__SF_EMPTY+8(%r15)
736 737
	STCK	__CLOCK_IDLE_ENTER(%r2)
	stpt	__TIMER_IDLE_ENTER(%r2)
738
.Lpsw_idle_lpsw:
M
Martin Schwidefsky 已提交
739 740
	lpswe	__SF_EMPTY(%r15)
	br	%r14
741
.Lpsw_idle_end:
M
Martin Schwidefsky 已提交
742

743 744 745 746 747 748 749 750 751 752 753
/* Store floating-point controls and floating-point or vector extension
 * registers instead.  A critical section cleanup assures that the registers
 * are stored even if interrupted for some other work.	The register %r2
 * designates a struct fpu to store register contents.	If the specified
 * structure does not contain a register save area, the register store is
 * omitted (see also comments in arch_dup_task_struct()).
 *
 * The CIF_FPU flag is set in any case.  The CIF_FPU triggers a lazy restore
 * of the register contents at system call or io return.
 */
ENTRY(save_fpu_regs)
754 755
	lg	%r2,__LC_CURRENT
	aghi	%r2,__TASK_thread
756 757
	tm	__LC_CPU_FLAGS+7,_CIF_FPU
	bor	%r14
758
	stfpc	__THREAD_FPU_fpc(%r2)
759
.Lsave_fpu_regs_fpc_end:
760
	lg	%r3,__THREAD_FPU_regs(%r2)
761 762
	ltgr	%r3,%r3
	jz	.Lsave_fpu_regs_done	  # no save area -> set CIF_FPU
763
	tm	__THREAD_FPU_flags+3(%r2),FPU_USE_VX
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
.Lsave_fpu_regs_vx_low:
	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
.Lsave_fpu_regs_vx_high:
	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
.Lsave_fpu_regs_fp:
	std	0,0(%r3)
	std	1,8(%r3)
	std	2,16(%r3)
	std	3,24(%r3)
	std	4,32(%r3)
	std	5,40(%r3)
	std	6,48(%r3)
	std	7,56(%r3)
	std	8,64(%r3)
	std	9,72(%r3)
	std	10,80(%r3)
	std	11,88(%r3)
	std	12,96(%r3)
	std	13,104(%r3)
	std	14,112(%r3)
	std	15,120(%r3)
.Lsave_fpu_regs_done:
	oi	__LC_CPU_FLAGS+7,_CIF_FPU
	br	%r14
.Lsave_fpu_regs_end:

/* Load floating-point controls and floating-point or vector extension
 * registers.  A critical section cleanup assures that the register contents
 * are loaded even if interrupted for some other work.	Depending on the saved
 * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared.
 *
 * There are special calling conventions to fit into sysc and io return work:
 *	%r15:	<kernel stack>
 * The function requires:
 *	%r4 and __SF_EMPTY+32(%r15)
 */
load_fpu_regs:
803 804
	lg	%r4,__LC_CURRENT
	aghi	%r4,__TASK_thread
805 806
	tm	__LC_CPU_FLAGS+7,_CIF_FPU
	bnor	%r14
807
	lfpc	__THREAD_FPU_fpc(%r4)
808
	stctg	%c0,%c0,__SF_EMPTY+32(%r15)	# store CR0
809 810
	tm	__THREAD_FPU_flags+3(%r4),FPU_USE_VX	# VX-enabled task ?
	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
	jz	.Lload_fpu_regs_fp_ctl		# -> no VX, load FP regs
.Lload_fpu_regs_vx_ctl:
	tm	__SF_EMPTY+32+5(%r15),2		# test VX control
	jo	.Lload_fpu_regs_vx
	oi	__SF_EMPTY+32+5(%r15),2		# set VX control
	lctlg	%c0,%c0,__SF_EMPTY+32(%r15)
.Lload_fpu_regs_vx:
	VLM	%v0,%v15,0,%r4
.Lload_fpu_regs_vx_high:
	VLM	%v16,%v31,256,%r4
	j	.Lload_fpu_regs_done
.Lload_fpu_regs_fp_ctl:
	tm	__SF_EMPTY+32+5(%r15),2		# test VX control
	jz	.Lload_fpu_regs_fp
	ni	__SF_EMPTY+32+5(%r15),253	# clear VX control
	lctlg	%c0,%c0,__SF_EMPTY+32(%r15)
.Lload_fpu_regs_fp:
	ld	0,0(%r4)
	ld	1,8(%r4)
	ld	2,16(%r4)
	ld	3,24(%r4)
	ld	4,32(%r4)
	ld	5,40(%r4)
	ld	6,48(%r4)
	ld	7,56(%r4)
	ld	8,64(%r4)
	ld	9,72(%r4)
	ld	10,80(%r4)
	ld	11,88(%r4)
	ld	12,96(%r4)
	ld	13,104(%r4)
	ld	14,112(%r4)
	ld	15,120(%r4)
.Lload_fpu_regs_done:
	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
	br	%r14
.Lload_fpu_regs_end:

/* Test and set the vector enablement control in CR0.46 */
ENTRY(__ctl_set_vx)
	stctg	%c0,%c0,__SF_EMPTY(%r15)
	tm	__SF_EMPTY+5(%r15),2
	bor	%r14
	oi	__SF_EMPTY+5(%r15),2
	lctlg	%c0,%c0,__SF_EMPTY(%r15)
	br	%r14
.L__ctl_set_vx_end:

859
.L__critical_end:
860

L
Linus Torvalds 已提交
861 862 863
/*
 * Machine check handler routines
 */
864
ENTRY(mcck_int_handler)
865
	STCK	__LC_MCCK_CLOCK
866 867
	la	%r1,4095		# revalidate r1
	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
H
Heiko Carstens 已提交
868
	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
869 870
	lg	%r10,__LC_LAST_BREAK
	lg	%r12,__LC_THREAD_INFO
871
	larl	%r13,cleanup_critical
872
	lmg	%r8,%r9,__LC_MCK_OLD_PSW
H
Heiko Carstens 已提交
873
	tm	__LC_MCCK_CODE,0x80	# system damage?
874
	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
875 876
	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
877
	tm	__LC_MCCK_CODE+5,0x02	# stored cpu timer value valid?
878
	jo	3f
879 880 881 882 883
	la	%r14,__LC_SYNC_ENTER_TIMER
	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
	jl	0f
	la	%r14,__LC_ASYNC_ENTER_TIMER
0:	clc	0(8,%r14),__LC_EXIT_TIMER
884
	jl	1f
885
	la	%r14,__LC_EXIT_TIMER
886 887
1:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
	jl	2f
888
	la	%r14,__LC_LAST_UPDATE_TIMER
889
2:	spt	0(%r14)
890
	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
891
3:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
892
	jno	.Lmcck_panic		# no -> skip cleanup critical
893
	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
894
.Lmcck_skip:
895 896 897
	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
	stmg	%r0,%r7,__PT_R0(%r11)
	mvc	__PT_R8(64,%r11),0(%r14)
898
	stmg	%r8,%r9,__PT_PSW(%r11)
899
	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
900 901
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
	lgr	%r2,%r11		# pass pointer to pt_regs
902
	brasl	%r14,s390_do_machine_check
903
	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
904
	jno	.Lmcck_return
905
	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
906 907 908
	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
	la	%r11,STACK_FRAME_OVERHEAD(%r1)
909
	lgr	%r15,%r1
910
	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
911
	tm	__LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
912
	jno	.Lmcck_return
913
	TRACE_IRQS_OFF
914
	brasl	%r14,s390_handle_mcck
915
	TRACE_IRQS_ON
916
.Lmcck_return:
917 918 919
	lg	%r14,__LC_VDSO_PER_CPU
	lmg	%r0,%r10,__PT_R0(%r11)
	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
920 921 922
	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
	jno	0f
	stpt	__LC_EXIT_TIMER
923 924 925 926
	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0:	lmg	%r11,%r15,__PT_R11(%r11)
	lpswe	__LC_RETURN_MCCK_PSW

927
.Lmcck_panic:
928
	lg	%r15,__LC_PANIC_STACK
929
	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
930
	j	.Lmcck_skip
L
Linus Torvalds 已提交
931

932 933 934
#
# PSW restart interrupt handler
#
M
Martin Schwidefsky 已提交
935
ENTRY(restart_int_handler)
936
	stg	%r15,__LC_SAVE_AREA_RESTART
M
Martin Schwidefsky 已提交
937
	lg	%r15,__LC_RESTART_STACK
938
	aghi	%r15,-__PT_SIZE			# create pt_regs on stack
M
Martin Schwidefsky 已提交
939
	xc	0(__PT_SIZE,%r15),0(%r15)
940 941 942
	stmg	%r0,%r14,__PT_R0(%r15)
	mvc	__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
	mvc	__PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
M
Martin Schwidefsky 已提交
943 944
	aghi	%r15,-STACK_FRAME_OVERHEAD	# create stack frame on stack
	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
945 946 947
	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
	lg	%r2,__LC_RESTART_DATA
	lg	%r3,__LC_RESTART_SOURCE
M
Martin Schwidefsky 已提交
948 949
	ltgr	%r3,%r3				# test source cpu address
	jm	1f				# negative -> skip source stop
950
0:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
M
Martin Schwidefsky 已提交
951 952 953 954
	brc	10,0b				# wait for status stored
1:	basr	%r14,%r1			# call function
	stap	__SF_EMPTY(%r15)		# store cpu address
	llgh	%r3,__SF_EMPTY(%r15)
955
2:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
M
Martin Schwidefsky 已提交
956 957
	brc	2,2b
3:	j	3b
958

959 960
	.section .kprobes.text, "ax"

L
Linus Torvalds 已提交
961 962 963 964 965 966 967
#ifdef CONFIG_CHECK_STACK
/*
 * The synchronous or the asynchronous stack overflowed. We are dead.
 * No need to properly save the registers, we are going to panic anyway.
 * Setup a pt_regs so that show_trace can provide a good call trace.
 */
stack_overflow:
968 969
	lg	%r15,__LC_PANIC_STACK	# change to panic stack
	la	%r11,STACK_FRAME_OVERHEAD(%r15)
970 971 972 973 974 975
	stmg	%r0,%r7,__PT_R0(%r11)
	stmg	%r8,%r9,__PT_PSW(%r11)
	mvc	__PT_R8(64,%r11),0(%r14)
	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
	lgr	%r2,%r11		# pass pointer to pt_regs
L
Linus Torvalds 已提交
976 977 978 979
	jg	kernel_stack_overflow
#endif

cleanup_critical:
980 981 982 983 984 985
#if IS_ENABLED(CONFIG_KVM)
	clg	%r9,BASED(.Lcleanup_table_sie)	# .Lsie_gmap
	jl	0f
	clg	%r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
	jl	.Lcleanup_sie
#endif
986
	clg	%r9,BASED(.Lcleanup_table)	# system_call
L
Linus Torvalds 已提交
987
	jl	0f
988 989 990
	clg	%r9,BASED(.Lcleanup_table+8)	# .Lsysc_do_svc
	jl	.Lcleanup_system_call
	clg	%r9,BASED(.Lcleanup_table+16)	# .Lsysc_tif
L
Linus Torvalds 已提交
991
	jl	0f
992 993 994 995 996
	clg	%r9,BASED(.Lcleanup_table+24)	# .Lsysc_restore
	jl	.Lcleanup_sysc_tif
	clg	%r9,BASED(.Lcleanup_table+32)	# .Lsysc_done
	jl	.Lcleanup_sysc_restore
	clg	%r9,BASED(.Lcleanup_table+40)	# .Lio_tif
997
	jl	0f
998 999 1000 1001 1002
	clg	%r9,BASED(.Lcleanup_table+48)	# .Lio_restore
	jl	.Lcleanup_io_tif
	clg	%r9,BASED(.Lcleanup_table+56)	# .Lio_done
	jl	.Lcleanup_io_restore
	clg	%r9,BASED(.Lcleanup_table+64)	# psw_idle
M
Martin Schwidefsky 已提交
1003
	jl	0f
1004 1005
	clg	%r9,BASED(.Lcleanup_table+72)	# .Lpsw_idle_end
	jl	.Lcleanup_idle
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
	clg	%r9,BASED(.Lcleanup_table+80)	# save_fpu_regs
	jl	0f
	clg	%r9,BASED(.Lcleanup_table+88)	# .Lsave_fpu_regs_end
	jl	.Lcleanup_save_fpu_regs
	clg	%r9,BASED(.Lcleanup_table+96)	# load_fpu_regs
	jl	0f
	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
	jl	.Lcleanup_load_fpu_regs
	clg	%r9,BASED(.Lcleanup_table+112)	# __ctl_set_vx
	jl	0f
	clg	%r9,BASED(.Lcleanup_table+120)	# .L__ctl_set_vx_end
	jl	.Lcleanup___ctl_set_vx
1018 1019
0:	br	%r14

1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	.align	8
.Lcleanup_table:
	.quad	system_call
	.quad	.Lsysc_do_svc
	.quad	.Lsysc_tif
	.quad	.Lsysc_restore
	.quad	.Lsysc_done
	.quad	.Lio_tif
	.quad	.Lio_restore
	.quad	.Lio_done
	.quad	psw_idle
	.quad	.Lpsw_idle_end
	.quad	save_fpu_regs
	.quad	.Lsave_fpu_regs_end
	.quad	load_fpu_regs
	.quad	.Lload_fpu_regs_end
	.quad	__ctl_set_vx
	.quad	.L__ctl_set_vx_end

#if IS_ENABLED(CONFIG_KVM)
.Lcleanup_table_sie:
	.quad	.Lsie_gmap
	.quad	.Lsie_done

.Lcleanup_sie:
	lg	%r9,__SF_EMPTY(%r15)		# get control block pointer
	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_LPP
	jz	0f
	.insn	s,0xb2800000,__SF_EMPTY+16(%r15)# set host id
0:	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
	larl	%r9,sie_exit			# skip forward to sie_exit
	br	%r14
#endif
L
Linus Torvalds 已提交
1054

1055
.Lcleanup_system_call:
1056
	# check if stpt has been executed
1057
	clg	%r9,BASED(.Lcleanup_system_call_insn)
L
Linus Torvalds 已提交
1058 1059
	jh	0f
	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1060
	cghi	%r11,__LC_SAVE_AREA_ASYNC
1061
	je	0f
1062 1063
	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
0:	# check if stmg has been executed
1064
	clg	%r9,BASED(.Lcleanup_system_call_insn+8)
L
Linus Torvalds 已提交
1065
	jh	0f
1066 1067
	mvc	__LC_SAVE_AREA_SYNC(64),0(%r11)
0:	# check if base register setup + TIF bit load has been done
1068
	clg	%r9,BASED(.Lcleanup_system_call_insn+16)
1069 1070 1071 1072 1073
	jhe	0f
	# set up saved registers r10 and r12
	stg	%r10,16(%r11)		# r10 last break
	stg	%r12,32(%r11)		# r12 thread-info pointer
0:	# check if the user time update has been done
1074
	clg	%r9,BASED(.Lcleanup_system_call_insn+24)
1075 1076 1077 1078 1079 1080
	jh	0f
	lg	%r15,__LC_EXIT_TIMER
	slg	%r15,__LC_SYNC_ENTER_TIMER
	alg	%r15,__LC_USER_TIMER
	stg	%r15,__LC_USER_TIMER
0:	# check if the system time update has been done
1081
	clg	%r9,BASED(.Lcleanup_system_call_insn+32)
1082 1083 1084 1085 1086 1087
	jh	0f
	lg	%r15,__LC_LAST_UPDATE_TIMER
	slg	%r15,__LC_EXIT_TIMER
	alg	%r15,__LC_SYSTEM_TIMER
	stg	%r15,__LC_SYSTEM_TIMER
0:	# update accounting time stamp
L
Linus Torvalds 已提交
1088
	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1089 1090 1091
	# do LAST_BREAK
	lg	%r9,16(%r11)
	srag	%r9,%r9,23
1092
	jz	0f
1093 1094 1095
	mvc	__TI_last_break(8,%r12),16(%r11)
0:	# set up saved register r11
	lg	%r15,__LC_KERNEL_STACK
1096 1097
	la	%r9,STACK_FRAME_OVERHEAD(%r15)
	stg	%r9,24(%r11)		# r11 pt_regs pointer
1098
	# fill pt_regs
1099 1100 1101 1102
	mvc	__PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
	stmg	%r0,%r7,__PT_R0(%r9)
	mvc	__PT_PSW(16,%r9),__LC_SVC_OLD_PSW
	mvc	__PT_INT_CODE(4,%r9),__LC_SVC_ILC
1103 1104
	xc	__PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
	mvi	__PT_FLAGS+7(%r9),_PIF_SYSCALL
1105 1106 1107
	# setup saved register r15
	stg	%r15,56(%r11)		# r15 stack pointer
	# set new psw address and exit
1108
	larl	%r9,.Lsysc_do_svc
L
Linus Torvalds 已提交
1109
	br	%r14
1110
.Lcleanup_system_call_insn:
H
Heiko Carstens 已提交
1111
	.quad	system_call
1112 1113
	.quad	.Lsysc_stmg
	.quad	.Lsysc_per
1114
	.quad	.Lsysc_vtime+36
1115
	.quad	.Lsysc_vtime+42
L
Linus Torvalds 已提交
1116

1117 1118
.Lcleanup_sysc_tif:
	larl	%r9,.Lsysc_tif
L
Linus Torvalds 已提交
1119 1120
	br	%r14

1121 1122
.Lcleanup_sysc_restore:
	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
1123
	je	0f
1124 1125 1126 1127 1128
	lg	%r9,24(%r11)		# get saved pointer to pt_regs
	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
	mvc	0(64,%r11),__PT_R8(%r9)
	lmg	%r0,%r7,__PT_R0(%r9)
0:	lmg	%r8,%r9,__LC_RETURN_PSW
L
Linus Torvalds 已提交
1129
	br	%r14
1130 1131
.Lcleanup_sysc_restore_insn:
	.quad	.Lsysc_done - 4
L
Linus Torvalds 已提交
1132

1133 1134
.Lcleanup_io_tif:
	larl	%r9,.Lio_tif
1135 1136
	br	%r14

1137 1138
.Lcleanup_io_restore:
	clg	%r9,BASED(.Lcleanup_io_restore_insn)
1139 1140 1141 1142 1143 1144
	je	0f
	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
	mvc	0(64,%r11),__PT_R8(%r9)
	lmg	%r0,%r7,__PT_R0(%r9)
0:	lmg	%r8,%r9,__LC_RETURN_PSW
1145
	br	%r14
1146 1147
.Lcleanup_io_restore_insn:
	.quad	.Lio_done - 4
1148

1149
.Lcleanup_idle:
M
Martin Schwidefsky 已提交
1150
	# copy interrupt clock & cpu timer
1151 1152
	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
M
Martin Schwidefsky 已提交
1153 1154
	cghi	%r11,__LC_SAVE_AREA_ASYNC
	je	0f
1155 1156
	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
M
Martin Schwidefsky 已提交
1157
0:	# check if stck & stpt have been executed
1158
	clg	%r9,BASED(.Lcleanup_idle_insn)
M
Martin Schwidefsky 已提交
1159
	jhe	1f
1160 1161 1162
	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
1:	# account system time going idle
M
Martin Schwidefsky 已提交
1163
	lg	%r9,__LC_STEAL_TIMER
1164
	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
M
Martin Schwidefsky 已提交
1165 1166
	slg	%r9,__LC_LAST_UPDATE_CLOCK
	stg	%r9,__LC_STEAL_TIMER
1167
	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
M
Martin Schwidefsky 已提交
1168 1169
	lg	%r9,__LC_SYSTEM_TIMER
	alg	%r9,__LC_LAST_UPDATE_TIMER
1170
	slg	%r9,__TIMER_IDLE_ENTER(%r2)
M
Martin Schwidefsky 已提交
1171
	stg	%r9,__LC_SYSTEM_TIMER
1172
	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
M
Martin Schwidefsky 已提交
1173
	# prepare return psw
1174
	nihh	%r8,0xfcfd		# clear irq & wait state bits
M
Martin Schwidefsky 已提交
1175 1176
	lg	%r9,48(%r11)		# return from psw_idle
	br	%r14
1177 1178
.Lcleanup_idle_insn:
	.quad	.Lpsw_idle_lpsw
M
Martin Schwidefsky 已提交
1179

1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
.Lcleanup_save_fpu_regs:
	tm	__LC_CPU_FLAGS+7,_CIF_FPU
	bor	%r14
	clg	%r9,BASED(.Lcleanup_save_fpu_regs_done)
	jhe	5f
	clg	%r9,BASED(.Lcleanup_save_fpu_regs_fp)
	jhe	4f
	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
	jhe	3f
	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
	jhe	2f
	clg	%r9,BASED(.Lcleanup_save_fpu_fpc_end)
	jhe	1f
1193
	lg	%r2,__LC_CURRENT
1194
0:	# Store floating-point controls
1195
	stfpc	__THREAD_FPU_fpc(%r2)
1196
1:	# Load register save area and check if VX is active
1197
	lg	%r3,__THREAD_FPU_regs(%r2)
1198 1199
	ltgr	%r3,%r3
	jz	5f			  # no save area -> set CIF_FPU
1200
	tm	__THREAD_FPU_flags+3(%r2),FPU_USE_VX
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
	jz	4f			  # no VX -> store FP regs
2:	# Store vector registers (V0-V15)
	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
3:	# Store vector registers (V16-V31)
	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
	j	5f			  # -> done, set CIF_FPU flag
4:	# Store floating-point registers
	std	0,0(%r3)
	std	1,8(%r3)
	std	2,16(%r3)
	std	3,24(%r3)
	std	4,32(%r3)
	std	5,40(%r3)
	std	6,48(%r3)
	std	7,56(%r3)
	std	8,64(%r3)
	std	9,72(%r3)
	std	10,80(%r3)
	std	11,88(%r3)
	std	12,96(%r3)
	std	13,104(%r3)
	std	14,112(%r3)
	std	15,120(%r3)
5:	# Set CIF_FPU flag
	oi	__LC_CPU_FLAGS+7,_CIF_FPU
	lg	%r9,48(%r11)		# return from save_fpu_regs
	br	%r14
.Lcleanup_save_fpu_fpc_end:
	.quad	.Lsave_fpu_regs_fpc_end
.Lcleanup_save_fpu_regs_vx_low:
	.quad	.Lsave_fpu_regs_vx_low
.Lcleanup_save_fpu_regs_vx_high:
	.quad	.Lsave_fpu_regs_vx_high
.Lcleanup_save_fpu_regs_fp:
	.quad	.Lsave_fpu_regs_fp
.Lcleanup_save_fpu_regs_done:
	.quad	.Lsave_fpu_regs_done

.Lcleanup_load_fpu_regs:
	tm	__LC_CPU_FLAGS+7,_CIF_FPU
	bnor	%r14
	clg	%r9,BASED(.Lcleanup_load_fpu_regs_done)
	jhe	1f
	clg	%r9,BASED(.Lcleanup_load_fpu_regs_fp)
	jhe	2f
	clg	%r9,BASED(.Lcleanup_load_fpu_regs_fp_ctl)
	jhe	3f
	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
	jhe	4f
	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx)
	jhe	5f
	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
	jhe	6f
1254 1255 1256 1257
	lg	%r4,__LC_CURRENT
	lfpc	__THREAD_FPU_fpc(%r4)
	tm	__THREAD_FPU_flags+3(%r4),FPU_USE_VX	# VX-enabled task ?
	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
	jz	3f				# -> no VX, load FP regs
6:	# Set VX-enablement control
	stctg	%c0,%c0,__SF_EMPTY+32(%r15)	# store CR0
	tm	__SF_EMPTY+32+5(%r15),2		# test VX control
	jo	5f
	oi	__SF_EMPTY+32+5(%r15),2		# set VX control
	lctlg	%c0,%c0,__SF_EMPTY+32(%r15)
5:	# Load V0 ..V15 registers
	VLM	%v0,%v15,0,%r4
4:	# Load V16..V31 registers
	VLM	%v16,%v31,256,%r4
	j	1f
3:	# Clear VX-enablement control for FP
	stctg	%c0,%c0,__SF_EMPTY+32(%r15)	# store CR0
	tm	__SF_EMPTY+32+5(%r15),2		# test VX control
	jz	2f
	ni	__SF_EMPTY+32+5(%r15),253	# clear VX control
	lctlg	%c0,%c0,__SF_EMPTY+32(%r15)
2:	# Load floating-point registers
	ld	0,0(%r4)
	ld	1,8(%r4)
	ld	2,16(%r4)
	ld	3,24(%r4)
	ld	4,32(%r4)
	ld	5,40(%r4)
	ld	6,48(%r4)
	ld	7,56(%r4)
	ld	8,64(%r4)
	ld	9,72(%r4)
	ld	10,80(%r4)
	ld	11,88(%r4)
	ld	12,96(%r4)
	ld	13,104(%r4)
	ld	14,112(%r4)
	ld	15,120(%r4)
1:	# Clear CIF_FPU bit
	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
	lg	%r9,48(%r11)		# return from load_fpu_regs
	br	%r14
.Lcleanup_load_fpu_regs_vx_ctl:
	.quad	.Lload_fpu_regs_vx_ctl
.Lcleanup_load_fpu_regs_vx:
	.quad	.Lload_fpu_regs_vx
.Lcleanup_load_fpu_regs_vx_high:
	.quad	.Lload_fpu_regs_vx_high
.Lcleanup_load_fpu_regs_fp_ctl:
	.quad	.Lload_fpu_regs_fp_ctl
.Lcleanup_load_fpu_regs_fp:
	.quad	.Lload_fpu_regs_fp
.Lcleanup_load_fpu_regs_done:
	.quad	.Lload_fpu_regs_done

.Lcleanup___ctl_set_vx:
	stctg	%c0,%c0,__SF_EMPTY(%r15)
	tm	__SF_EMPTY+5(%r15),2
	bor	%r14
	oi	__SF_EMPTY+5(%r15),2
	lctlg	%c0,%c0,__SF_EMPTY(%r15)
	lg	%r9,48(%r11)		# return from __ctl_set_vx
	br	%r14

L
Linus Torvalds 已提交
1319 1320 1321
/*
 * Integer constants
 */
1322
	.align	8
L
Linus Torvalds 已提交
1323
.Lcritical_start:
1324
	.quad	.L__critical_start
1325
.Lcritical_length:
1326
	.quad	.L__critical_end - .L__critical_start
1327
#if IS_ENABLED(CONFIG_KVM)
1328
.Lsie_critical_start:
1329
	.quad	.Lsie_gmap
1330
.Lsie_critical_length:
1331
	.quad	.Lsie_done - .Lsie_gmap
1332 1333
#endif

H
Heiko Carstens 已提交
1334 1335
	.section .rodata, "a"
#define SYSCALL(esame,emu)	.long esame
1336
	.globl	sys_call_table
L
Linus Torvalds 已提交
1337 1338 1339 1340
sys_call_table:
#include "syscalls.S"
#undef SYSCALL

1341
#ifdef CONFIG_COMPAT
L
Linus Torvalds 已提交
1342

H
Heiko Carstens 已提交
1343
#define SYSCALL(esame,emu)	.long emu
1344
	.globl	sys_call_table_emu
L
Linus Torvalds 已提交
1345 1346 1347 1348
sys_call_table_emu:
#include "syscalls.S"
#undef SYSCALL
#endif