sleep.S 3.3 KB
Newer Older
1
#include <linux/linkage.h>
2
#include <linux/threads.h>
3 4 5 6 7 8 9 10 11 12
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
#include <asm/system.h>
	.text

/*
 * Save CPU state for a suspend
 *  r1 = v:p offset
13
 *  r2 = suspend function arg0
14 15
 *  r3 = virtual return function
 * Note: sp is decremented to allocate space for CPU state on stack
16
 * r0-r3,ip,lr corrupted
17 18
 */
ENTRY(cpu_suspend)
19
	stmfd	sp!, {r3}
20
	stmfd	sp!, {r4 - r11}
21 22
#ifdef MULTI_CPU
	ldr	r10, =processor
23
	ldr	r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
24
	ldr	ip, [r10, #CPU_DO_RESUME] @ virtual resume function
25
#else
26
	ldr	r5, =cpu_suspend_size
27
	ldr	ip, =cpu_do_resume
28
#endif
29 30
	mov	r6, sp			@ current virtual SP
	sub	sp, sp, r5		@ allocate CPU state on stack
31
	mov	r0, sp			@ save pointer
32
	add	ip, ip, r1		@ convert resume fn to phys
33 34 35
	stmfd	sp!, {r1, r6, ip}	@ save v:p, virt SP, phys resume fn
	ldr	r5, =sleep_save_sp
	add	r6, sp, r1		@ convert SP to phys
36
	stmfd	sp!, {r2, lr}		@ save suspend func arg and pointer
37 38 39 40
#ifdef CONFIG_SMP
	ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
	ALT_UP(mov lr, #0)
	and	lr, lr, #15
41
	str	r6, [r5, lr, lsl #2]	@ save phys SP
42
#else
43
	str	r6, [r5]		@ save phys SP
44
#endif
45 46 47 48
#ifdef MULTI_CPU
	mov	lr, pc
	ldr	pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
#else
49 50 51 52 53 54
	bl	cpu_do_suspend
#endif

	@ flush data cache
#ifdef MULTI_CACHE
	ldr	r10, =cpu_cache
55
	mov	lr, pc
56 57
	ldr	pc, [r10, #CACHE_FLUSH_KERN_ALL]
#else
58
	bl	__cpuc_flush_kern_all
59
#endif
60
	ldmfd	sp!, {r0, pc}		@ call suspend fn
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
ENDPROC(cpu_suspend)
	.ltorg

/*
 * r0 = control register value
 * r1 = v:p offset (preserved by cpu_do_resume)
 * r2 = phys page table base
 * r3 = L1 section flags
 */
ENTRY(cpu_resume_mmu)
	adr	r4, cpu_resume_turn_mmu_on
	mov	r4, r4, lsr #20
	orr	r3, r3, r4, lsl #20
	ldr	r5, [r2, r4, lsl #2]	@ save old mapping
	str	r3, [r2, r4, lsl #2]	@ setup 1:1 mapping for mmu code
	sub	r2, r2, r1
	ldr	r3, =cpu_resume_after_mmu
	bic	r1, r0, #CR_C		@ ensure D-cache is disabled
	b	cpu_resume_turn_mmu_on
ENDPROC(cpu_resume_mmu)
	.ltorg
	.align	5
cpu_resume_turn_mmu_on:
	mcr	p15, 0, r1, c1, c0, 0	@ turn on MMU, I-cache, etc
	mrc	p15, 0, r1, c0, c0, 0	@ read id reg
	mov	r1, r1
	mov	r1, r1
	mov	pc, r3			@ jump to virtual address
ENDPROC(cpu_resume_turn_mmu_on)
cpu_resume_after_mmu:
	str	r5, [r2, r4, lsl #2]	@ restore old mapping
	mcr	p15, 0, r0, c1, c0, 0	@ turn on D-cache
93
	ldmfd	sp!, {r4 - r11, pc}
94 95 96 97 98 99 100 101 102 103 104 105
ENDPROC(cpu_resume_after_mmu)

/*
 * Note: Yes, part of the following code is located into the .data section.
 *       This is to allow sleep_save_sp to be accessed with a relative load
 *       while we can't rely on any MMU translation.  We could have put
 *       sleep_save_sp in the .text section as well, but some setups might
 *       insist on it to be truly read-only.
 */
	.data
	.align
ENTRY(cpu_resume)
106 107 108 109 110 111 112
#ifdef CONFIG_SMP
	adr	r0, sleep_save_sp
	ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
	ALT_UP(mov r1, #0)
	and	r1, r1, #15
	ldr	r0, [r0, r1, lsl #2]	@ stack phys addr
#else
113
	ldr	r0, sleep_save_sp	@ stack phys addr
114
#endif
115
	setmode	PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1  @ set SVC, irqs off
116 117 118
	@ load v:p, stack, resume fn
  ARM(	ldmia	r0!, {r1, sp, pc}	)
THUMB(	ldmia	r0!, {r1, r2, r3}	)
119
THUMB(	mov	sp, r2			)
120
THUMB(	bx	r3			)
121 122 123
ENDPROC(cpu_resume)

sleep_save_sp:
124 125 126
	.rept	CONFIG_NR_CPUS
	.long	0				@ preserve stack phys ptr here
	.endr