diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 497e0e4702b7e83c4d56ce54f0475d02506c3316..97864aabc2a6fa2aa7c86df3b19454b7e68c3dda 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -74,7 +74,7 @@ config ARM select HAVE_CONTEXT_TRACKING select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT - select HAVE_DEBUG_KMEMLEAK + select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE @@ -1905,7 +1905,7 @@ config XIP_DEFLATED_DATA config KEXEC bool "Kexec system call (EXPERIMENTAL)" depends on (!SMP || PM_SLEEP_SMP) - depends on !CPU_V7M + depends on MMU select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index a1e883c5e5c4558ba737b1f8fc851289cbca9247..da599c3a1193433207b6e2b2bdd44a6bb8a76647 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -110,12 +110,12 @@ endif # -fstack-protector-strong triggers protection checks in this code, # but it is being used too early to link to meaningful stack_chk logic. -nossp_flags := $(call cc-option, -fno-stack-protector) -CFLAGS_atags_to_fdt.o := $(nossp_flags) -CFLAGS_fdt.o := $(nossp_flags) -CFLAGS_fdt_ro.o := $(nossp_flags) -CFLAGS_fdt_rw.o := $(nossp_flags) -CFLAGS_fdt_wip.o := $(nossp_flags) +nossp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector +CFLAGS_atags_to_fdt.o := $(nossp-flags-y) +CFLAGS_fdt.o := $(nossp-flags-y) +CFLAGS_fdt_ro.o := $(nossp-flags-y) +CFLAGS_fdt_rw.o := $(nossp-flags-y) +CFLAGS_fdt_wip.o := $(nossp-flags-y) ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj) asflags-y := -DZIMAGE diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index ead21e5f2b8032cb4e39faf1620260f5cd6fa2b4..088b0a0608761d5eff23ff5bfc3dac674bbb6610 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -140,6 +140,17 @@ #endif .endm + .macro enable_cp15_barriers, reg + mrc p15, 0, \reg, c1, c0, 0 @ read SCTLR + tst \reg, #(1 << 5) @ CP15BEN bit set? + bne .L_\@ + orr \reg, \reg, #(1 << 5) @ CP15 barrier instructions + mcr p15, 0, \reg, c1, c0, 0 @ write SCTLR + ARM( .inst 0xf57ff06f @ v7+ isb ) + THUMB( isb ) +.L_\@: + .endm + .section ".start", "ax" /* * sort out different calling conventions @@ -820,6 +831,7 @@ __armv4_mmu_cache_on: mov pc, r12 __armv7_mmu_cache_on: + enable_cp15_barriers r11 mov r12, lr #ifdef CONFIG_MMU mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 @@ -1209,6 +1221,7 @@ __armv6_mmu_cache_flush: mov pc, lr __armv7_mmu_cache_flush: + enable_cp15_barriers r10 tst r4, #1 bne iflush mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 @@ -1447,21 +1460,7 @@ ENTRY(efi_stub_entry) @ Preserve return value of efi_entry() in r4 mov r4, r0 - - @ our cache maintenance code relies on CP15 barrier instructions - @ but since we arrived here with the MMU and caches configured - @ by UEFI, we must check that the CP15BEN bit is set in SCTLR. - @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in - @ the enable path will be executed on v7+ only. - mrc p15, 0, r1, c1, c0, 0 @ read SCTLR - tst r1, #(1 << 5) @ CP15BEN bit set? - bne 0f - orr r1, r1, #(1 << 5) @ CP15 barrier instructions - mcr p15, 0, r1, c1, c0, 0 @ write SCTLR - ARM( .inst 0xf57ff06f @ v7+ isb ) - THUMB( isb ) - -0: bl cache_clean_flush + bl cache_clean_flush bl cache_off @ Set parameters for booting zImage according to boot protocol diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index 010fa1a35a6836293f6ae0136251d9b41dcd38c9..30fb2330f57b8730f0ea4fef1ea763c5811a5523 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -42,12 +42,6 @@ #define swapper_pg_dir ((pgd_t *) 0) -#define __swp_type(x) (0) -#define __swp_offset(x) (0) -#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) - typedef pte_t *pte_addr_t; diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 71778bb0475b31376d88d33eb898732caa4c0588..cc726afea0233d6607324d8f3860fa89f04846f0 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -92,6 +92,8 @@ static int save_trace(struct stackframe *frame, void *d) return 0; regs = (struct pt_regs *)frame->sp; + if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE)) + return 0; trace->entries[trace->nr_entries++] = regs->ARM_pc; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index abb7dd7e656fde025edbb3c88bfee01b7b831759..1e70e7227f0ff2283b07becf1ed543b3482bb280 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -64,14 +64,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) { + unsigned long end = frame + 4 + sizeof(struct pt_regs); + #ifdef CONFIG_KALLSYMS printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); #else printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif - if (in_entry_text(from)) - dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); + if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE)) + dump_mem("", "Exception stack", frame + 4, end); } void dump_backtrace_stm(u32 *stack, u32 instruction) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3ef204137e7325e091c9520d30acb7b1169eabc6..054be44d1cdb44f1e683a5e1c42d3c5a2995ec24 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -324,7 +324,7 @@ static inline void poison_init_mem(void *s, size_t count) *p++ = 0xe7fddef0; } -static inline void +static inline void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg;