diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 4176df721bf09bace95bad96d1c194e5b6b7a038..1a0045abead7562be1e27163e0aee3c6afbe9b40 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -253,21 +253,22 @@ .endm .macro restore_user_regs, fast = 0, offset = 0 - ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr - ldr lr, [sp, #\offset + S_PC]! @ get pc + mov r2, sp + ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr + ldr lr, [r2, #\offset + S_PC]! @ get pc msr spsr_cxsf, r1 @ save in spsr_svc #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) @ We must avoid clrex due to Cortex-A15 erratum #830321 - strex r1, r2, [sp] @ clear the exclusive monitor + strex r1, r2, [r2] @ clear the exclusive monitor #endif .if \fast - ldmdb sp, {r1 - lr}^ @ get calling r1 - lr + ldmdb r2, {r1 - lr}^ @ get calling r1 - lr .else - ldmdb sp, {r0 - lr}^ @ get calling r0 - lr + ldmdb r2, {r0 - lr}^ @ get calling r0 - lr .endif mov r0, r0 @ ARMv5T and earlier require a nop @ after ldm {}^ - add sp, sp, #S_FRAME_SIZE - S_PC + add sp, sp, #\offset + S_FRAME_SIZE movs pc, lr @ return & move spsr_svc into cpsr .endm diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index f7c65adaa428c9eabd2c5080ae097a374c2206f5..557e128e4df08ce711d4bab89952eae0f1d6d7ea 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -116,8 +116,14 @@ int armpmu_event_set_period(struct perf_event *event) ret = 1; } - if (left > (s64)armpmu->max_period) - left = armpmu->max_period; + /* + * Limit the maximum period to prevent the counter value + * from overtaking the one we are about to program. In + * effect we are reducing max_period to account for + * interrupt latency (and we are being very conservative). + */ + if (left > (armpmu->max_period >> 1)) + left = armpmu->max_period >> 1; local64_set(&hwc->prev_count, (u64)-left); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 715ae19bc7c87302350093b6894251c4519ea957..e55408e965596964ff8c8708dcfec529a559b1bc 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -657,10 +657,13 @@ int __init arm_add_memory(u64 start, u64 size) /* * Ensure that start/size are aligned to a page boundary. - * Size is appropriately rounded down, start is rounded up. + * Size is rounded down, start is rounded up. */ - size -= start & ~PAGE_MASK; aligned_start = PAGE_ALIGN(start); + if (aligned_start > start + size) + size = 0; + else + size -= aligned_start - start; #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT if (aligned_start > ULONG_MAX) {