提交 d5ff835f 编写于 作者: L Linus Torvalds

Merge tag 'arm64-stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64

Pull ARM64 fixes from Catalin Marinas:
 - Remove preempt_count modifications in the arm64 IRQ handling code
   since that's already dealt with in generic irq_enter/irq_exit
 - PTE_PROT_NONE bit moved higher up to avoid overlapping with the
   hardware bits (for PROT_NONE mappings which are pte_present)
 - Big-endian fixes for ptrace support
 - Asynchronous aborts unmasking while in the kernel
 - pgprot_writecombine() change to create Normal NonCacheable memory
   rather than Device GRE

* tag 'arm64-stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64:
  arm64: Move PTE_PROT_NONE higher up
  arm64: Use Normal NonCacheable memory for writecombine
  arm64: debug: make aarch32 bkpt checking endian clean
  arm64: ptrace: fix compat registes get/set to be endian clean
  arm64: Unmask asynchronous aborts when in kernel mode
  arm64: dts: Reserve the memory used for secondary CPU release address
  arm64: let the core code deal with preempt_count
......@@ -6,6 +6,8 @@
/dts-v1/;
/memreserve/ 0x80000000 0x00010000;
/ {
model = "Foundation-v8A";
compatible = "arm,foundation-aarch64", "arm,vexpress";
......
......@@ -56,6 +56,9 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
#define local_async_disable() asm("msr daifset, #4" : : : "memory")
/*
* Save the current interrupt enable state.
*/
......
......@@ -25,10 +25,11 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */
#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */
#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
/* bit 57 for PMD_SECT_SPLITTING */
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
......@@ -254,7 +255,7 @@ static inline int has_transparent_hugepage(void)
#define pgprot_noncached(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define __HAVE_PHYS_MEM_ACCESS_PROT
......@@ -357,18 +358,20 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a swap entry:
* bits 0, 2: present (must both be zero)
* bit 3: PTE_FILE
* bits 4-8: swap type
* bits 9-63: swap offset
* bits 0-1: present (must be zero)
* bit 2: PTE_FILE
* bits 3-8: swap type
* bits 9-57: swap offset
*/
#define __SWP_TYPE_SHIFT 4
#define __SWP_TYPE_SHIFT 3
#define __SWP_TYPE_BITS 6
#define __SWP_OFFSET_BITS 49
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
......@@ -382,15 +385,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a file entry:
* bits 0, 2: present (must both be zero)
* bit 3: PTE_FILE
* bits 4-63: file offset / PAGE_SIZE
* bits 0-1: present (must be zero)
* bit 2: PTE_FILE
* bits 3-57: file offset / PAGE_SIZE
*/
#define pte_file(pte) (pte_val(pte) & PTE_FILE)
#define pte_to_pgoff(x) (pte_val(x) >> 4)
#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
#define pte_to_pgoff(x) (pte_val(x) >> 3)
#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
#define PTE_FILE_MAX_BITS 60
#define PTE_FILE_MAX_BITS 55
extern int kern_addr_valid(unsigned long addr);
......
......@@ -248,7 +248,8 @@ static int brk_handler(unsigned long addr, unsigned int esr,
int aarch32_break_handler(struct pt_regs *regs)
{
siginfo_t info;
unsigned int instr;
u32 arm_instr;
u16 thumb_instr;
bool bp = false;
void __user *pc = (void __user *)instruction_pointer(regs);
......@@ -257,18 +258,21 @@ int aarch32_break_handler(struct pt_regs *regs)
if (compat_thumb_mode(regs)) {
/* get 16-bit Thumb instruction */
get_user(instr, (u16 __user *)pc);
if (instr == AARCH32_BREAK_THUMB2_LO) {
get_user(thumb_instr, (u16 __user *)pc);
thumb_instr = le16_to_cpu(thumb_instr);
if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
/* get second half of 32-bit Thumb-2 instruction */
get_user(instr, (u16 __user *)(pc + 2));
bp = instr == AARCH32_BREAK_THUMB2_HI;
get_user(thumb_instr, (u16 __user *)(pc + 2));
thumb_instr = le16_to_cpu(thumb_instr);
bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
} else {
bp = instr == AARCH32_BREAK_THUMB;
bp = thumb_instr == AARCH32_BREAK_THUMB;
}
} else {
/* 32-bit ARM instruction */
get_user(instr, (u32 __user *)pc);
bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
get_user(arm_instr, (u32 __user *)pc);
arm_instr = le32_to_cpu(arm_instr);
bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
}
if (!bp)
......
......@@ -309,15 +309,12 @@ el1_irq:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
add w0, w24, #1 // increment it
str w0, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
str w24, [tsk, #TI_PREEMPT] // restore preempt count
get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // restore preempt count
cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
......@@ -507,22 +504,10 @@ el0_irq_naked:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
get_thread_info tsk
#ifdef CONFIG_PREEMPT
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
add w23, w24, #1 // increment it
str w23, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
ldr w0, [tsk, #TI_PREEMPT]
str w24, [tsk, #TI_PREEMPT]
cmp w0, w23
b.eq 1f
mov x1, #0
str x1, [x1] // BUG
1:
#endif
get_thread_info tsk
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
#endif
......
......@@ -636,28 +636,27 @@ static int compat_gpr_get(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
void *reg;
compat_ulong_t reg;
switch (idx) {
case 15:
reg = (void *)&task_pt_regs(target)->pc;
reg = task_pt_regs(target)->pc;
break;
case 16:
reg = (void *)&task_pt_regs(target)->pstate;
reg = task_pt_regs(target)->pstate;
break;
case 17:
reg = (void *)&task_pt_regs(target)->orig_x0;
reg = task_pt_regs(target)->orig_x0;
break;
default:
reg = (void *)&task_pt_regs(target)->regs[idx];
reg = task_pt_regs(target)->regs[idx];
}
ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
ret = copy_to_user(ubuf, &reg, sizeof(reg));
if (ret)
break;
else
ubuf += sizeof(compat_ulong_t);
ubuf += sizeof(reg);
}
return ret;
......@@ -685,28 +684,28 @@ static int compat_gpr_set(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
void *reg;
compat_ulong_t reg;
ret = copy_from_user(&reg, ubuf, sizeof(reg));
if (ret)
return ret;
ubuf += sizeof(reg);
switch (idx) {
case 15:
reg = (void *)&newregs.pc;
newregs.pc = reg;
break;
case 16:
reg = (void *)&newregs.pstate;
newregs.pstate = reg;
break;
case 17:
reg = (void *)&newregs.orig_x0;
newregs.orig_x0 = reg;
break;
default:
reg = (void *)&newregs.regs[idx];
newregs.regs[idx] = reg;
}
ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
if (ret)
goto out;
else
ubuf += sizeof(compat_ulong_t);
}
if (valid_user_regs(&newregs.user_regs))
......@@ -714,7 +713,6 @@ static int compat_gpr_set(struct task_struct *target,
else
ret = -EINVAL;
out:
return ret;
}
......
......@@ -205,6 +205,11 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
{
/*
* Unmask asynchronous aborts early to catch possible system errors.
*/
local_async_enable();
setup_processor();
setup_machine_fdt(__fdt_pointer);
......
......@@ -160,6 +160,7 @@ asmlinkage void secondary_start_kernel(void)
local_irq_enable();
local_fiq_enable();
local_async_enable();
/*
* OK, it's off to the idle thread for us
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册