提交 29cb3cd2 编写于 作者: R Russell King

ARM: pm: allow suspend finisher to return error codes

There are SoCs where attempting to enter a low power state is ignored,
and the CPU continues executing instructions with all state preserved.
It is over-complex at that point to disable the MMU just to call the
resume path.

Instead, allow the suspend finisher to return error codes to abort
suspend in this circumstance, where the cpu_suspend internals will then
unwind the saved state on the stack.  Also omit the tlb flush as no
changes to the page tables will have happened.
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
上级 cbe26349
......@@ -10,12 +10,13 @@ extern void cpu_resume(void);
* Hide the first two arguments to __cpu_suspend - these are an implementation
* detail which platform code shouldn't have to know about.
*/
static inline void cpu_suspend(unsigned long arg, void (*fn)(unsigned long))
static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
extern void __cpu_suspend(int, long, unsigned long,
void (*)(unsigned long));
__cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn);
extern int __cpu_suspend(int, long, unsigned long,
int (*)(unsigned long));
int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn);
flush_tlb_all();
return ret;
}
#endif
......@@ -12,7 +12,6 @@
* r1 = v:p offset
* r2 = suspend function arg0
* r3 = suspend function
* Note: does not return until system resumes
*/
ENTRY(__cpu_suspend)
stmfd sp!, {r4 - r11, lr}
......@@ -26,7 +25,7 @@ ENTRY(__cpu_suspend)
#endif
mov r6, sp @ current virtual SP
sub sp, sp, r5 @ allocate CPU state on stack
mov r0, sp @ save pointer
mov r0, sp @ save pointer to CPU save block
add ip, ip, r1 @ convert resume fn to phys
stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn
ldr r5, =sleep_save_sp
......@@ -55,10 +54,17 @@ ENTRY(__cpu_suspend)
#else
bl __cpuc_flush_kern_all
#endif
adr lr, BSYM(cpu_suspend_abort)
ldmfd sp!, {r0, pc} @ call suspend fn
ENDPROC(__cpu_suspend)
.ltorg
cpu_suspend_abort:
ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn
mov sp, r2
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_suspend_abort)
/*
* r0 = control register value
* r1 = v:p offset (preserved by cpu_do_resume)
......@@ -89,6 +95,7 @@ cpu_resume_after_mmu:
str r5, [r2, r4, lsl #2] @ restore old mapping
mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
bl cpu_init @ restore the und/abt/irq banked regs
mov r0, #0 @ return zero on success
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_resume_after_mmu)
......
......@@ -280,7 +280,7 @@ static struct sleep_save exynos4_l2cc_save[] = {
SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL),
};
void exynos4_cpu_suspend(unsigned long arg)
static int exynos4_cpu_suspend(unsigned long arg)
{
unsigned long tmp;
unsigned long mask = 0xFFFFFFFF;
......
......@@ -321,9 +321,10 @@ static void omap34xx_save_context(u32 *save)
*save++ = val;
}
static void omap34xx_do_sram_idle(unsigned long save_state)
static int omap34xx_do_sram_idle(unsigned long save_state)
{
omap34xx_cpu_suspend(save_state);
return 0;
}
void omap_sram_idle(void)
......
......@@ -22,8 +22,8 @@ struct pxa_cpu_pm_fns {
extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns;
/* sleep.S */
extern void pxa25x_finish_suspend(unsigned long);
extern void pxa27x_finish_suspend(unsigned long);
extern int pxa25x_finish_suspend(unsigned long);
extern int pxa27x_finish_suspend(unsigned long);
extern int pxa_pm_enter(suspend_state_t state);
extern int pxa_pm_prepare(void);
......
......@@ -148,7 +148,7 @@ static void pxa3xx_cpu_pm_suspend(void)
asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0));
#endif
extern void pxa3xx_finish_suspend(unsigned long);
extern int pxa3xx_finish_suspend(unsigned long);
/* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
......
......@@ -37,7 +37,7 @@
extern void s3c2412_sleep_enter(void);
static void s3c2412_cpu_suspend(unsigned long arg)
static int s3c2412_cpu_suspend(unsigned long arg)
{
unsigned long tmp;
......@@ -48,6 +48,8 @@ static void s3c2412_cpu_suspend(unsigned long arg)
__raw_writel(tmp, S3C2412_PWRCFG);
s3c2412_sleep_enter();
panic("sleep resumed to originator?");
}
static void s3c2412_pm_prepare(void)
......
......@@ -24,7 +24,7 @@
extern void s3c2412_sleep_enter(void);
static void s3c2416_cpu_suspend(unsigned long arg)
static int s3c2416_cpu_suspend(unsigned long arg)
{
/* enable wakeup sources regardless of battery state */
__raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG);
......@@ -33,6 +33,8 @@ static void s3c2416_cpu_suspend(unsigned long arg)
__raw_writel(0x2BED, S3C2443_PWRMODE);
s3c2412_sleep_enter();
panic("sleep resumed to originator?");
}
static void s3c2416_pm_prepare(void)
......
......@@ -112,7 +112,7 @@ void s3c_pm_save_core(void)
* this.
*/
static void s3c64xx_cpu_suspend(unsigned long arg)
static int s3c64xx_cpu_suspend(unsigned long arg)
{
unsigned long tmp;
......
......@@ -33,7 +33,7 @@
#include <asm/system.h>
#include <asm/mach/time.h>
extern void sa1100_finish_suspend(unsigned long);
extern int sa1100_finish_suspend(unsigned long);
#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
......
......@@ -42,7 +42,7 @@ extern unsigned long s3c_irqwake_eintallow;
/* per-cpu sleep functions */
extern void (*pm_cpu_prep)(void);
extern void (*pm_cpu_sleep)(unsigned long);
extern int (*pm_cpu_sleep)(unsigned long);
/* Flags for PM Control */
......@@ -54,7 +54,7 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */
extern void s3c_cpu_resume(void);
extern void s3c2410_cpu_suspend(unsigned long);
extern int s3c2410_cpu_suspend(unsigned long);
/* sleep save info */
......
......@@ -232,7 +232,7 @@ static void __maybe_unused s3c_pm_show_resume_irqs(int start,
void (*pm_cpu_prep)(void);
void (*pm_cpu_sleep)(unsigned long);
int (*pm_cpu_sleep)(unsigned long);
#define any_allowed(mask, allow) (((mask) & (allow)) != (allow))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册