sleep.S 3.2 KB
Newer Older
1
#include <linux/linkage.h>
2
#include <linux/threads.h>
3 4 5 6 7 8 9 10 11 12
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
#include <asm/system.h>
	.text

/*
 * Save CPU state for a suspend
 *  r1 = v:p offset
13
 *  r2 = suspend function arg0
14 15
 *  r3 = suspend function
 * Note: does not return until system resumes
16 17
 */
ENTRY(cpu_suspend)
18
	stmfd	sp!, {r4 - r11, lr}
19 20
#ifdef MULTI_CPU
	ldr	r10, =processor
21
	ldr	r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
22
	ldr	ip, [r10, #CPU_DO_RESUME] @ virtual resume function
23
#else
24
	ldr	r5, =cpu_suspend_size
25
	ldr	ip, =cpu_do_resume
26
#endif
27 28
	mov	r6, sp			@ current virtual SP
	sub	sp, sp, r5		@ allocate CPU state on stack
29
	mov	r0, sp			@ save pointer
30
	add	ip, ip, r1		@ convert resume fn to phys
31 32 33
	stmfd	sp!, {r1, r6, ip}	@ save v:p, virt SP, phys resume fn
	ldr	r5, =sleep_save_sp
	add	r6, sp, r1		@ convert SP to phys
34
	stmfd	sp!, {r2, r3}		@ save suspend func arg and pointer
35 36 37 38
#ifdef CONFIG_SMP
	ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
	ALT_UP(mov lr, #0)
	and	lr, lr, #15
39
	str	r6, [r5, lr, lsl #2]	@ save phys SP
40
#else
41
	str	r6, [r5]		@ save phys SP
42
#endif
43 44 45 46
#ifdef MULTI_CPU
	mov	lr, pc
	ldr	pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
#else
47 48 49 50 51 52
	bl	cpu_do_suspend
#endif

	@ flush data cache
#ifdef MULTI_CACHE
	ldr	r10, =cpu_cache
53
	mov	lr, pc
54 55
	ldr	pc, [r10, #CACHE_FLUSH_KERN_ALL]
#else
56
	bl	__cpuc_flush_kern_all
57
#endif
58
	ldmfd	sp!, {r0, pc}		@ call suspend fn
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
ENDPROC(cpu_suspend)
	.ltorg

/*
 * r0 = control register value
 * r1 = v:p offset (preserved by cpu_do_resume)
 * r2 = phys page table base
 * r3 = L1 section flags
 */
ENTRY(cpu_resume_mmu)
	adr	r4, cpu_resume_turn_mmu_on
	mov	r4, r4, lsr #20
	orr	r3, r3, r4, lsl #20
	ldr	r5, [r2, r4, lsl #2]	@ save old mapping
	str	r3, [r2, r4, lsl #2]	@ setup 1:1 mapping for mmu code
	sub	r2, r2, r1
	ldr	r3, =cpu_resume_after_mmu
	bic	r1, r0, #CR_C		@ ensure D-cache is disabled
	b	cpu_resume_turn_mmu_on
ENDPROC(cpu_resume_mmu)
	.ltorg
	.align	5
cpu_resume_turn_mmu_on:
	mcr	p15, 0, r1, c1, c0, 0	@ turn on MMU, I-cache, etc
	mrc	p15, 0, r1, c0, c0, 0	@ read id reg
	mov	r1, r1
	mov	r1, r1
	mov	pc, r3			@ jump to virtual address
ENDPROC(cpu_resume_turn_mmu_on)
cpu_resume_after_mmu:
	str	r5, [r2, r4, lsl #2]	@ restore old mapping
	mcr	p15, 0, r0, c1, c0, 0	@ turn on D-cache
91
	bl	cpu_init		@ restore the und/abt/irq banked regs
92
	ldmfd	sp!, {r4 - r11, pc}
93 94 95 96 97 98 99 100 101 102 103 104
ENDPROC(cpu_resume_after_mmu)

/*
 * Note: Yes, part of the following code is located into the .data section.
 *       This is to allow sleep_save_sp to be accessed with a relative load
 *       while we can't rely on any MMU translation.  We could have put
 *       sleep_save_sp in the .text section as well, but some setups might
 *       insist on it to be truly read-only.
 */
	.data
	.align
ENTRY(cpu_resume)
105 106 107 108 109 110 111
#ifdef CONFIG_SMP
	adr	r0, sleep_save_sp
	ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
	ALT_UP(mov r1, #0)
	and	r1, r1, #15
	ldr	r0, [r0, r1, lsl #2]	@ stack phys addr
#else
112
	ldr	r0, sleep_save_sp	@ stack phys addr
113
#endif
114
	setmode	PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1  @ set SVC, irqs off
115 116 117
	@ load v:p, stack, resume fn
  ARM(	ldmia	r0!, {r1, sp, pc}	)
THUMB(	ldmia	r0!, {r1, r2, r3}	)
118
THUMB(	mov	sp, r2			)
119
THUMB(	bx	r3			)
120 121 122
ENDPROC(cpu_resume)

sleep_save_sp:
123 124 125
	.rept	CONFIG_NR_CPUS
	.long	0				@ preserve stack phys ptr here
	.endr