提交 2d1eb87a 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM changes from Russell King:

 - Perf updates from Will Deacon:
   - Support for Qualcomm Krait processors (run perf on your phone!)
   - Support for Cortex-A12 (run perf stat on your FPGA!)
   - Support for perf_sample_event_took, allowing us to automatically decrease
     the sample rate if we can't handle the PMU interrupts quickly enough
     (run perf record on your FPGA!).

 - Basic uprobes support from David Long:
     This patch series adds basic uprobes support to ARM. It is based on
     patches developed earlier by Rabin Vincent. That approach of adding
     hooks into the kprobes instruction parsing code was not well received.
     This approach separates the ARM instruction parsing code in kprobes out
     into a separate set of functions which can be used by both kprobes and
     uprobes. Both kprobes and uprobes then provide their own semantic action
     tables to process the results of the parsing.

 - ARMv7M (microcontroller) updates from Uwe Kleine-König

 - OMAP DMA updates (recently added Vinod's Ack even though they've been
   sitting in linux-next for a few months) to reduce the reliance of
   omap-dma on the code in arch/arm.

 - SA11x0 changes from Dmitry Eremin-Solenikov and Alexander Shiyan

 - Support for Cortex-A12 CPU

 - Align support for ARMv6 with ARMv7 so they can cooperate better in a
   single zImage.

 - Addition of first AT_HWCAP2 feature bits for ARMv8 crypto support.

 - Removal of IRQ_DISABLED from various ARM files

 - Improved efficiency of virt_to_page() for single zImage

 - Patch from Ulf Hansson to permit runtime PM callbacks to be available for
   AMBA devices for suspend/resume as well.

 - Finally kill asm/system.h on ARM.

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (89 commits)
  dmaengine: omap-dma: more consolidation of CCR register setup
  dmaengine: omap-dma: move IRQ handling to omap-dma
  dmaengine: omap-dma: move register read/writes into omap-dma.c
  ARM: omap: dma: get rid of 'p' allocation and clean up
  ARM: omap: move dma channel allocation into plat-omap code
  ARM: omap: dma: get rid of errata global
  ARM: omap: clean up DMA register accesses
  ARM: omap: remove almost-const variables
  ARM: omap: remove references to disable_irq_lch
  dmaengine: omap-dma: cleanup errata 3.3 handling
  dmaengine: omap-dma: provide register read/write functions
  dmaengine: omap-dma: use cached CCR value when enabling DMA
  dmaengine: omap-dma: move barrier to omap_dma_start_desc()
  dmaengine: omap-dma: move clnk_ctrl setting to preparation functions
  dmaengine: omap-dma: improve efficiency loading C.SA/C.EI/C.FI registers
  dmaengine: omap-dma: consolidate clearing channel status register
  dmaengine: omap-dma: move CCR buffering disable errata out of the fast path
  dmaengine: omap-dma: provide register definitions
  dmaengine: omap-dma: consolidate setup of CCR
  dmaengine: omap-dma: consolidate setup of CSDP
  ...
......@@ -9,6 +9,7 @@ Required properties:
- compatible : should be one of
"arm,armv8-pmuv3"
"arm,cortex-a15-pmu"
"arm,cortex-a12-pmu"
"arm,cortex-a9-pmu"
"arm,cortex-a8-pmu"
"arm,cortex-a7-pmu"
......@@ -16,7 +17,14 @@ Required properties:
"arm,arm11mpcore-pmu"
"arm,arm1176-pmu"
"arm,arm1136-pmu"
- interrupts : 1 combined interrupt or 1 per core.
"qcom,krait-pmu"
- interrupts : 1 combined interrupt or 1 per core. If the interrupt is a per-cpu
interrupt (PPI) then 1 interrupt should be specified.
Optional properties:
- qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd
events.
Example:
......
......@@ -86,9 +86,7 @@ config KPROBES_ON_FTRACE
optimize on top of function tracing.
config UPROBES
bool "Transparent user-space probes (EXPERIMENTAL)"
depends on UPROBE_EVENT && PERF_EVENTS
default n
def_bool n
select PERCPU_RWSEM
help
Uprobes is the user-space counterpart to kprobes: they
......@@ -101,8 +99,6 @@ config UPROBES
managed by the kernel and kept transparent to the probed
application. )
If in doubt, say "N".
config HAVE_64BIT_ALIGNED_ACCESS
def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
help
......
......@@ -207,6 +207,9 @@ config ZONE_DMA
config NEED_DMA_MAP_STATE
def_bool y
config ARCH_SUPPORTS_UPROBES
def_bool y
config ARCH_HAS_DMA_SET_COHERENT_MASK
bool
......@@ -2271,7 +2274,7 @@ source "kernel/power/Kconfig"
config ARCH_SUSPEND_POSSIBLE
depends on !ARCH_S5PC100
depends on CPU_ARM920T || CPU_ARM926T || CPU_FEROCEON || CPU_SA1100 || \
CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE || CPU_MOHAWK
CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M || CPU_XSC3 || CPU_XSCALE || CPU_MOHAWK
def_bool y
config ARM_CPU_SUSPEND
......
......@@ -13,6 +13,7 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
CFLAGS_REMOVE_mcpm_entry.o = -pg
AFLAGS_mcpm_head.o := -march=armv7-a
AFLAGS_vlock.o := -march=armv7-a
obj-$(CONFIG_TI_PRIV_EDMA) += edma.o
......
......@@ -232,8 +232,6 @@ static int scoop_probe(struct platform_device *pdev)
return 0;
if (devptr->gpio.base != -1)
temp = gpiochip_remove(&devptr->gpio);
err_gpio:
platform_set_drvdata(pdev, NULL);
err_ioremap:
......
......@@ -30,8 +30,8 @@
* Endian independent macros for shifting bytes within registers.
*/
#ifndef __ARMEB__
#define pull lsr
#define push lsl
#define lspull lsr
#define lspush lsl
#define get_byte_0 lsl #0
#define get_byte_1 lsr #8
#define get_byte_2 lsr #16
......@@ -41,8 +41,8 @@
#define put_byte_2 lsl #16
#define put_byte_3 lsl #24
#else
#define pull lsl
#define push lsr
#define lspull lsl
#define lspush lsr
#define get_byte_0 lsr #24
#define get_byte_1 lsr #16
#define get_byte_2 lsr #8
......
......@@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%3]\n"
......@@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%3]\n"
......@@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long res;
smp_mb();
prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
......@@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__ ("@ atomic_add_unless\n"
"1: ldrex %0, [%4]\n"
" teq %0, %5\n"
" beq 2f\n"
" add %1, %0, %6\n"
" strex %2, %1, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
"2:"
: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
if (oldval != u)
smp_mb();
return oldval;
}
#else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP
......@@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
#endif /* __LINUX_ARM_ARCH__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
......@@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
#endif /* __LINUX_ARM_ARCH__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
......@@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
unsigned long tmp;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
......@@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
unsigned long tmp;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
......@@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
unsigned long res;
smp_mb();
prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic64_cmpxchg\n"
......@@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
unsigned long tmp;
smp_mb();
prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n"
"1: ldrexd %0, %H0, [%3]\n"
......@@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
......@@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
int ret = 1;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_unless\n"
"1: ldrexd %0, %H0, [%4]\n"
......
......@@ -2,6 +2,7 @@
#define __ASM_ARM_CMPXCHG_H
#include <linux/irqflags.h>
#include <linux/prefetch.h>
#include <asm/barrier.h>
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
......@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#endif
smp_mb();
prefetchw((const void *)ptr);
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
......@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
{
unsigned long oldval, res;
prefetchw((const void *)ptr);
switch (size) {
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1:
......@@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
unsigned long long oldval;
unsigned long res;
prefetchw(ptr);
__asm__ __volatile__(
"1: ldrexd %1, %H1, [%3]\n"
" teq %1, %4\n"
......
......@@ -71,6 +71,7 @@
#define ARM_CPU_PART_CORTEX_A5 0xC050
#define ARM_CPU_PART_CORTEX_A15 0xC0F0
#define ARM_CPU_PART_CORTEX_A7 0xC070
#define ARM_CPU_PART_CORTEX_A12 0xC0D0
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
......
......@@ -25,7 +25,7 @@
#define fd_inb(port) inb((port))
#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
IRQF_DISABLED,"floppy",NULL)
0,"floppy",NULL)
#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
......
......@@ -3,11 +3,6 @@
#ifdef __KERNEL__
#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
/* ARM doesn't provide unprivileged exclusive memory accessors */
#include <asm-generic/futex.h>
#else
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
......@@ -28,6 +23,7 @@
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \
prefetchw(uaddr); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
......@@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
......@@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret;
}
#endif /* !(CPU_USE_DOMAINS && SMP) */
#endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */
......@@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V7_ECP14 3
#define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
#define ARM_DEBUG_ARCH_V8 6
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
......
......@@ -9,6 +9,7 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
extern unsigned int elf_hwcap;
#define ELF_HWCAP2 (elf_hwcap2)
extern unsigned int elf_hwcap, elf_hwcap2;
#endif
#endif
......@@ -4,7 +4,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm/system.h>
#define JUMP_LABEL_NOP_SIZE 4
......
......@@ -18,7 +18,7 @@
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 2
......@@ -28,21 +28,10 @@
#define kretprobe_blacklist_size 0
typedef u32 kprobe_opcode_t;
struct kprobe;
typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *);
typedef unsigned long (kprobe_check_cc)(unsigned long);
typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *);
typedef void (kprobe_insn_fn_t)(void);
#include <asm/probes.h>
/* Architecture specific copy of original instruction. */
struct arch_specific_insn {
kprobe_opcode_t *insn;
kprobe_insn_handler_t *insn_handler;
kprobe_check_cc *insn_check_cc;
kprobe_insn_singlestep_t *insn_singlestep;
kprobe_insn_fn_t *insn_fn;
};
#define arch_specific_insn arch_probes_insn
struct prev_kprobe {
struct kprobe *kp;
......
......@@ -166,9 +166,17 @@
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*/
#ifndef __virt_to_phys
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
#if defined(__virt_to_phys)
#define PHYS_OFFSET PLAT_PHYS_OFFSET
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
/*
* Constants used to force the right instruction encodings and shifts
......@@ -177,12 +185,17 @@
#define __PV_BITS_31_24 0x81000000
#define __PV_BITS_7_0 0x81
extern u64 __pv_phys_offset;
extern unsigned long __pv_phys_pfn_offset;
extern u64 __pv_offset;
extern void fixup_pv_table(const void *, unsigned long);
extern const void *__pv_table_begin, *__pv_table_end;
#define PHYS_OFFSET __pv_phys_offset
#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
#define virt_to_pfn(kaddr) \
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
PHYS_PFN_OFFSET)
#define __pv_stub(from,to,instr,type) \
__asm__("@ __pv_stub\n" \
......@@ -243,6 +256,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#else
#define PHYS_OFFSET PLAT_PHYS_OFFSET
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
static inline phys_addr_t __virt_to_phys(unsigned long x)
{
......@@ -254,18 +268,11 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
return x - PHYS_OFFSET + PAGE_OFFSET;
}
#endif
#endif
#define virt_to_pfn(kaddr) \
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
PHYS_PFN_OFFSET)
/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
* This is the PFN of the first RAM page in the kernel
* direct-mapped view. We assume this is the first page
* of RAM in the mem_map as well.
*/
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
#endif
/*
* These are *only* valid on the kernel direct mapped RAM memory.
......@@ -343,9 +350,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
*/
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
&& pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
&& pfn_valid(virt_to_pfn(kaddr)))
#endif
......
......@@ -140,6 +140,7 @@
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
#ifndef __ASSEMBLY__
......
......@@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID)
#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
#define pte_special(pte) (0)
#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
#define pte_valid_user(pte) \
(pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
......@@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
{
unsigned long ext = 0;
if (addr < TASK_SIZE && pte_present_user(pteval)) {
if (addr < TASK_SIZE && pte_valid_user(pteval)) {
__sync_icache_dcache(pteval);
ext |= PTE_EXT_NG;
}
......
......@@ -71,6 +71,8 @@ struct arm_pmu {
void (*disable)(struct perf_event *event);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
struct perf_event *event);
void (*clear_event_idx)(struct pmu_hw_events *hw_events,
struct perf_event *event);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
u32 (*read_counter)(struct perf_event *event);
......
/*
* arch/arm/include/asm/probes.h
*
* Original contents copied from arch/arm/include/asm/kprobes.h
* which contains the following notice...
*
* Copyright (C) 2006, 2007 Motorola Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef _ASM_PROBES_H
#define _ASM_PROBES_H
typedef u32 probes_opcode_t;
struct arch_probes_insn;
typedef void (probes_insn_handler_t)(probes_opcode_t,
struct arch_probes_insn *,
struct pt_regs *);
typedef unsigned long (probes_check_cc)(unsigned long);
typedef void (probes_insn_singlestep_t)(probes_opcode_t,
struct arch_probes_insn *,
struct pt_regs *);
typedef void (probes_insn_fn_t)(void);
/* Architecture specific copy of original instruction. */
struct arch_probes_insn {
probes_opcode_t *insn;
probes_insn_handler_t *insn_handler;
probes_check_cc *insn_check_cc;
probes_insn_singlestep_t *insn_singlestep;
probes_insn_fn_t *insn_fn;
};
#endif
......@@ -27,9 +27,13 @@ struct pt_regs {
#define thumb_mode(regs) (0)
#endif
#ifndef CONFIG_CPU_V7M
#define isa_mode(regs) \
((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \
(((regs)->ARM_cpsr & PSR_T_BIT) >> 5))
((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \
(((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT))))
#else
#define isa_mode(regs) 1 /* Thumb */
#endif
#define processor_mode(regs) \
((regs)->ARM_cpsr & MODE_MASK)
......@@ -80,6 +84,12 @@ static inline long regs_return_value(struct pt_regs *regs)
#define instruction_pointer(regs) (regs)->ARM_pc
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
instruction_pointer(regs) = val;
}
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
#else
......
......@@ -2,7 +2,6 @@
#define __ASM_SYNC_BITOPS_H__
#include <asm/bitops.h>
#include <asm/system.h>
/* sync_bitops functions are equivalent to the SMP implementation of the
* original functions, independently from CONFIG_SMP being defined.
......
/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
#include <asm/barrier.h>
#include <asm/compiler.h>
#include <asm/cmpxchg.h>
#include <asm/switch_to.h>
#include <asm/system_info.h>
#include <asm/system_misc.h>
......@@ -153,6 +153,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_UPROBE 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
#define TIF_SYSCALL_TRACEPOINT 10
......@@ -165,6 +166,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
......@@ -178,7 +180,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
/*
* Change these and you break ASM code in entry-common.S
*/
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
......@@ -19,7 +19,7 @@
#include <asm/unified.h>
#include <asm/compiler.h>
#if __LINUX_ARM_ARCH__ < 6
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#include <asm-generic/uaccess-unaligned.h>
#else
#define __get_user_unaligned __get_user
......
......@@ -48,6 +48,5 @@
*/
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
#define __IGNORE_kcmp
#endif /* __ASM_ARM_UNISTD_H */
/*
* Copyright (C) 2012 Rabin Vincent <rabin at rab.in>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_UPROBES_H
#define _ASM_UPROBES_H
#include <asm/probes.h>
#include <asm/opcodes.h>
typedef u32 uprobe_opcode_t;
#define MAX_UINSN_BYTES 4
#define UPROBE_XOL_SLOT_BYTES 64
#define UPROBE_SWBP_ARM_INSN 0xe7f001f9
#define UPROBE_SS_ARM_INSN 0xe7f001fa
#define UPROBE_SWBP_INSN __opcode_to_mem_arm(UPROBE_SWBP_ARM_INSN)
#define UPROBE_SWBP_INSN_SIZE 4
struct arch_uprobe_task {
u32 backup;
unsigned long saved_trap_no;
};
struct arch_uprobe {
u8 insn[MAX_UINSN_BYTES];
unsigned long ixol[2];
uprobe_opcode_t bpinsn;
bool simulate;
u32 pcreg;
void (*prehandler)(struct arch_uprobe *auprobe,
struct arch_uprobe_task *autask,
struct pt_regs *regs);
void (*posthandler)(struct arch_uprobe *auprobe,
struct arch_uprobe_task *autask,
struct pt_regs *regs);
struct arch_probes_insn asi;
};
#endif
......@@ -28,4 +28,13 @@
#define HWCAP_LPAE (1 << 20)
#define HWCAP_EVTSTRM (1 << 21)
/*
* HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2
*/
#define HWCAP2_AES (1 << 0)
#define HWCAP2_PMULL (1 << 1)
#define HWCAP2_SHA1 (1 << 2)
#define HWCAP2_SHA2 (1 << 3)
#define HWCAP2_CRC32 (1 << 4)
#endif /* _UAPI__ASMARM_HWCAP_H */
......@@ -50,11 +50,12 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o patch.o
obj-$(CONFIG_UPROBES) += probes.o probes-arm.o uprobes.o uprobes-arm.o
obj-$(CONFIG_KPROBES) += probes.o kprobes.o kprobes-common.o patch.o
ifdef CONFIG_THUMB2_KERNEL
obj-$(CONFIG_KPROBES) += kprobes-thumb.o
obj-$(CONFIG_KPROBES) += kprobes-thumb.o probes-thumb.o
else
obj-$(CONFIG_KPROBES) += kprobes-arm.o
obj-$(CONFIG_KPROBES) += kprobes-arm.o probes-arm.o
endif
obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
test-kprobes-objs := kprobes-test.o
......
......@@ -158,6 +158,6 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
EXPORT_SYMBOL(__pv_phys_offset);
EXPORT_SYMBOL(__pv_phys_pfn_offset);
EXPORT_SYMBOL(__pv_offset);
#endif
......@@ -605,41 +605,10 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
*/
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for (idx = 0; idx < 6; idx++) {
/* Only set up the requested stuff */
if (!(mask & (1 << idx)))
continue;
r = dev->resource + idx;
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because"
" of resource collisions\n", pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (pci_has_flag(PCI_PROBE_ONLY))
return 0;
/*
* Bridges (eg, cardbus bridges) need to be fully enabled
*/
if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk("PCI: enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
return pci_enable_resources(dev, mask);
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
......
......@@ -584,9 +584,10 @@ __fixup_pv_table:
subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address
add r6, r6, r3 @ adjust __pv_phys_offset address
add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
add r7, r7, r3 @ adjust __pv_offset address
str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset
mov r0, r8, lsr #12 @ convert to PFN
str r0, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
......@@ -600,7 +601,7 @@ ENDPROC(__fixup_pv_table)
1: .long .
.long __pv_table_begin
.long __pv_table_end
2: .long __pv_phys_offset
2: .long __pv_phys_pfn_offset
.long __pv_offset
.text
......@@ -688,11 +689,11 @@ ENTRY(fixup_pv_table)
ENDPROC(fixup_pv_table)
.data
.globl __pv_phys_offset
.type __pv_phys_offset, %object
__pv_phys_offset:
.quad 0
.size __pv_phys_offset, . -__pv_phys_offset
.globl __pv_phys_pfn_offset
.type __pv_phys_pfn_offset, %object
__pv_phys_pfn_offset:
.word 0
.size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
.globl __pv_offset
.type __pv_offset, %object
......
......@@ -167,7 +167,7 @@ static int debug_arch_supported(void)
/* Can we determine the watchpoint access type from the fsr? */
static int debug_exception_updates_fsr(void)
{
return 0;
return get_debug_arch() >= ARM_DEBUG_ARCH_V8;
}
/* Determine number of WRP registers available. */
......@@ -257,6 +257,7 @@ static int enable_monitor_mode(void)
break;
case ARM_DEBUG_ARCH_V7_ECP14:
case ARM_DEBUG_ARCH_V7_1:
case ARM_DEBUG_ARCH_V8:
ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
isb();
break;
......
此差异已折叠。
......@@ -13,178 +13,14 @@
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <asm/system_info.h>
#include "kprobes.h"
#ifndef find_str_pc_offset
/*
* For STR and STM instructions, an ARM core may choose to use either
* a +8 or a +12 displacement from the current instruction's address.
* Whichever value is chosen for a given core, it must be the same for
* both instructions and may not change. This function measures it.
*/
int str_pc_offset;
void __init find_str_pc_offset(void)
{
int addr, scratch, ret;
__asm__ (
"sub %[ret], pc, #4 \n\t"
"str pc, %[addr] \n\t"
"ldr %[scr], %[addr] \n\t"
"sub %[ret], %[scr], %[ret] \n\t"
: [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr));
str_pc_offset = ret;
}
#endif /* !find_str_pc_offset */
#ifndef test_load_write_pc_interworking
bool load_write_pc_interworks;
void __init test_load_write_pc_interworking(void)
{
int arch = cpu_architecture();
BUG_ON(arch == CPU_ARCH_UNKNOWN);
load_write_pc_interworks = arch >= CPU_ARCH_ARMv5T;
}
#endif /* !test_load_write_pc_interworking */
#ifndef test_alu_write_pc_interworking
bool alu_write_pc_interworks;
void __init test_alu_write_pc_interworking(void)
{
int arch = cpu_architecture();
BUG_ON(arch == CPU_ARCH_UNKNOWN);
alu_write_pc_interworks = arch >= CPU_ARCH_ARMv7;
}
#endif /* !test_alu_write_pc_interworking */
void __init arm_kprobe_decode_init(void)
{
find_str_pc_offset();
test_load_write_pc_interworking();
test_alu_write_pc_interworking();
}
static unsigned long __kprobes __check_eq(unsigned long cpsr)
{
return cpsr & PSR_Z_BIT;
}
static unsigned long __kprobes __check_ne(unsigned long cpsr)
{
return (~cpsr) & PSR_Z_BIT;
}
static unsigned long __kprobes __check_cs(unsigned long cpsr)
{
return cpsr & PSR_C_BIT;
}
static unsigned long __kprobes __check_cc(unsigned long cpsr)
{
return (~cpsr) & PSR_C_BIT;
}
static unsigned long __kprobes __check_mi(unsigned long cpsr)
{
return cpsr & PSR_N_BIT;
}
static unsigned long __kprobes __check_pl(unsigned long cpsr)
{
return (~cpsr) & PSR_N_BIT;
}
static unsigned long __kprobes __check_vs(unsigned long cpsr)
{
return cpsr & PSR_V_BIT;
}
static unsigned long __kprobes __check_vc(unsigned long cpsr)
{
return (~cpsr) & PSR_V_BIT;
}
static unsigned long __kprobes __check_hi(unsigned long cpsr)
{
cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
return cpsr & PSR_C_BIT;
}
static unsigned long __kprobes __check_ls(unsigned long cpsr)
{
cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
return (~cpsr) & PSR_C_BIT;
}
static unsigned long __kprobes __check_ge(unsigned long cpsr)
{
cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
return (~cpsr) & PSR_N_BIT;
}
static unsigned long __kprobes __check_lt(unsigned long cpsr)
{
cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
return cpsr & PSR_N_BIT;
}
static unsigned long __kprobes __check_gt(unsigned long cpsr)
{
unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
return (~temp) & PSR_N_BIT;
}
static unsigned long __kprobes __check_le(unsigned long cpsr)
{
unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
return temp & PSR_N_BIT;
}
static unsigned long __kprobes __check_al(unsigned long cpsr)
{
return true;
}
kprobe_check_cc * const kprobe_condition_checks[16] = {
&__check_eq, &__check_ne, &__check_cs, &__check_cc,
&__check_mi, &__check_pl, &__check_vs, &__check_vc,
&__check_hi, &__check_ls, &__check_ge, &__check_lt,
&__check_gt, &__check_le, &__check_al, &__check_al
};
void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs)
{
}
void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs)
{
p->ainsn.insn_fn();
}
static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
static void __kprobes simulate_ldm1stm1(probes_opcode_t insn,
struct arch_probes_insn *asi,
struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
int rn = (insn >> 16) & 0xf;
int lbit = insn & (1 << 20);
int wbit = insn & (1 << 21);
......@@ -223,24 +59,31 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
}
}
static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
static void __kprobes simulate_stm1_pc(probes_opcode_t insn,
struct arch_probes_insn *asi,
struct pt_regs *regs)
{
regs->ARM_pc = (long)p->addr + str_pc_offset;
simulate_ldm1stm1(p, regs);
regs->ARM_pc = (long)p->addr + 4;
unsigned long addr = regs->ARM_pc - 4;
regs->ARM_pc = (long)addr + str_pc_offset;
simulate_ldm1stm1(insn, asi, regs);
regs->ARM_pc = (long)addr + 4;
}
static void __kprobes simulate_ldm1_pc(struct kprobe *p, struct pt_regs *regs)
static void __kprobes simulate_ldm1_pc(probes_opcode_t insn,
struct arch_probes_insn *asi,
struct pt_regs *regs)
{
simulate_ldm1stm1(p, regs);
simulate_ldm1stm1(insn, asi, regs);
load_write_pc(regs->ARM_pc, regs);
}
static void __kprobes
emulate_generic_r0_12_noflags(struct kprobe *p, struct pt_regs *regs)
emulate_generic_r0_12_noflags(probes_opcode_t insn,
struct arch_probes_insn *asi, struct pt_regs *regs)
{
register void *rregs asm("r1") = regs;
register void *rfn asm("lr") = p->ainsn.insn_fn;
register void *rfn asm("lr") = asi->insn_fn;
__asm__ __volatile__ (
"stmdb sp!, {%[regs], r11} \n\t"
......@@ -264,22 +107,27 @@ emulate_generic_r0_12_noflags(struct kprobe *p, struct pt_regs *regs)
}
static void __kprobes
emulate_generic_r2_14_noflags(struct kprobe *p, struct pt_regs *regs)
emulate_generic_r2_14_noflags(probes_opcode_t insn,
struct arch_probes_insn *asi, struct pt_regs *regs)
{
emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+2));
emulate_generic_r0_12_noflags(insn, asi,
(struct pt_regs *)(regs->uregs+2));
}
static void __kprobes
emulate_ldm_r3_15(struct kprobe *p, struct pt_regs *regs)
emulate_ldm_r3_15(probes_opcode_t insn,
struct arch_probes_insn *asi, struct pt_regs *regs)
{
emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+3));
emulate_generic_r0_12_noflags(insn, asi,
(struct pt_regs *)(regs->uregs+3));
load_write_pc(regs->ARM_pc, regs);
}
enum kprobe_insn __kprobes
kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
enum probes_insn __kprobes
kprobe_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi,
const struct decode_header *h)
{
kprobe_insn_handler_t *handler = 0;
probes_insn_handler_t *handler = 0;
unsigned reglist = insn & 0xffff;
int is_ldm = insn & 0x100000;
int rn = (insn >> 16) & 0xf;
......@@ -319,260 +167,3 @@ kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return INSN_GOOD_NO_SLOT;
}
/*
* Prepare an instruction slot to receive an instruction for emulating.
* This is done by placing a subroutine return after the location where the
* instruction will be placed. We also modify ARM instructions to be
* unconditional as the condition code will already be checked before any
* emulation handler is called.
*/
static kprobe_opcode_t __kprobes
prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
bool thumb)
{
#ifdef CONFIG_THUMB2_KERNEL
if (thumb) {
u16 *thumb_insn = (u16 *)asi->insn;
thumb_insn[1] = 0x4770; /* Thumb bx lr */
thumb_insn[2] = 0x4770; /* Thumb bx lr */
return insn;
}
asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
#else
asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
#endif
/* Make an ARM instruction unconditional */
if (insn < 0xe0000000)
insn = (insn | 0xe0000000) & ~0x10000000;
return insn;
}
/*
* Write a (probably modified) instruction into the slot previously prepared by
* prepare_emulated_insn
*/
static void __kprobes
set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
bool thumb)
{
#ifdef CONFIG_THUMB2_KERNEL
if (thumb) {
u16 *ip = (u16 *)asi->insn;
if (is_wide_instruction(insn))
*ip++ = insn >> 16;
*ip++ = insn;
return;
}
#endif
asi->insn[0] = insn;
}
/*
* When we modify the register numbers encoded in an instruction to be emulated,
* the new values come from this define. For ARM and 32-bit Thumb instructions
* this gives...
*
* bit position 16 12 8 4 0
* ---------------+---+---+---+---+---+
* register r2 r0 r1 -- r3
*/
#define INSN_NEW_BITS 0x00020103
/* Each nibble has same value as that at INSN_NEW_BITS bit 16 */
#define INSN_SAMEAS16_BITS 0x22222222
/*
* Validate and modify each of the registers encoded in an instruction.
*
* Each nibble in regs contains a value from enum decode_reg_type. For each
* non-zero value, the corresponding nibble in pinsn is validated and modified
* according to the type.
*/
static bool __kprobes decode_regs(kprobe_opcode_t* pinsn, u32 regs)
{
kprobe_opcode_t insn = *pinsn;
kprobe_opcode_t mask = 0xf; /* Start at least significant nibble */
for (; regs != 0; regs >>= 4, mask <<= 4) {
kprobe_opcode_t new_bits = INSN_NEW_BITS;
switch (regs & 0xf) {
case REG_TYPE_NONE:
/* Nibble not a register, skip to next */
continue;
case REG_TYPE_ANY:
/* Any register is allowed */
break;
case REG_TYPE_SAMEAS16:
/* Replace register with same as at bit position 16 */
new_bits = INSN_SAMEAS16_BITS;
break;
case REG_TYPE_SP:
/* Only allow SP (R13) */
if ((insn ^ 0xdddddddd) & mask)
goto reject;
break;
case REG_TYPE_PC:
/* Only allow PC (R15) */
if ((insn ^ 0xffffffff) & mask)
goto reject;
break;
case REG_TYPE_NOSP:
/* Reject SP (R13) */
if (((insn ^ 0xdddddddd) & mask) == 0)
goto reject;
break;
case REG_TYPE_NOSPPC:
case REG_TYPE_NOSPPCX:
/* Reject SP and PC (R13 and R15) */
if (((insn ^ 0xdddddddd) & 0xdddddddd & mask) == 0)
goto reject;
break;
case REG_TYPE_NOPCWB:
if (!is_writeback(insn))
break; /* No writeback, so any register is OK */
/* fall through... */
case REG_TYPE_NOPC:
case REG_TYPE_NOPCX:
/* Reject PC (R15) */
if (((insn ^ 0xffffffff) & mask) == 0)
goto reject;
break;
}
/* Replace value of nibble with new register number... */
insn &= ~mask;
insn |= new_bits & mask;
}
*pinsn = insn;
return true;
reject:
return false;
}
static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
[DECODE_TYPE_TABLE] = sizeof(struct decode_table),
[DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom),
[DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate),
[DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate),
[DECODE_TYPE_OR] = sizeof(struct decode_or),
[DECODE_TYPE_REJECT] = sizeof(struct decode_reject)
};
/*
* kprobe_decode_insn operates on data tables in order to decode an ARM
* architecture instruction onto which a kprobe has been placed.
*
* These instruction decoding tables are a concatenation of entries each
* of which consist of one of the following structs:
*
* decode_table
* decode_custom
* decode_simulate
* decode_emulate
* decode_or
* decode_reject
*
* Each of these starts with a struct decode_header which has the following
* fields:
*
* type_regs
* mask
* value
*
* The least significant DECODE_TYPE_BITS of type_regs contains a value
* from enum decode_type, this indicates which of the decode_* structs
* the entry contains. The value DECODE_TYPE_END indicates the end of the
* table.
*
* When the table is parsed, each entry is checked in turn to see if it
* matches the instruction to be decoded using the test:
*
* (insn & mask) == value
*
* If no match is found before the end of the table is reached then decoding
* fails with INSN_REJECTED.
*
* When a match is found, decode_regs() is called to validate and modify each
* of the registers encoded in the instruction; the data it uses to do this
* is (type_regs >> DECODE_TYPE_BITS). A validation failure will cause decoding
* to fail with INSN_REJECTED.
*
* Once the instruction has passed the above tests, further processing
* depends on the type of the table entry's decode struct.
*
*/
int __kprobes
kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
const union decode_item *table, bool thumb)
{
const struct decode_header *h = (struct decode_header *)table;
const struct decode_header *next;
bool matched = false;
insn = prepare_emulated_insn(insn, asi, thumb);
for (;; h = next) {
enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS;
if (type == DECODE_TYPE_END)
return INSN_REJECTED;
next = (struct decode_header *)
((uintptr_t)h + decode_struct_sizes[type]);
if (!matched && (insn & h->mask.bits) != h->value.bits)
continue;
if (!decode_regs(&insn, regs))
return INSN_REJECTED;
switch (type) {
case DECODE_TYPE_TABLE: {
struct decode_table *d = (struct decode_table *)h;
next = (struct decode_header *)d->table.table;
break;
}
case DECODE_TYPE_CUSTOM: {
struct decode_custom *d = (struct decode_custom *)h;
return (*d->decoder.decoder)(insn, asi);
}
case DECODE_TYPE_SIMULATE: {
struct decode_simulate *d = (struct decode_simulate *)h;
asi->insn_handler = d->handler.handler;
return INSN_GOOD_NO_SLOT;
}
case DECODE_TYPE_EMULATE: {
struct decode_emulate *d = (struct decode_emulate *)h;
asi->insn_handler = d->handler.handler;
set_emulated_insn(insn, asi, thumb);
return INSN_GOOD;
}
case DECODE_TYPE_OR:
matched = true;
break;
case DECODE_TYPE_REJECT:
default:
return INSN_REJECTED;
}
}
}
......@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/system_info.h>
#include "kprobes-test.h"
......
......@@ -201,10 +201,14 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kprobes.h>
#include <linux/errno.h>
#include <linux/stddef.h>
#include <linux/bug.h>
#include <asm/opcodes.h>
#include "kprobes.h"
#include "probes-arm.h"
#include "probes-thumb.h"
#include "kprobes-test.h"
......@@ -1608,7 +1612,7 @@ static int __init run_all_tests(void)
goto out;
pr_info("ARM instruction simulation\n");
ret = run_test_cases(kprobe_arm_test_cases, kprobe_decode_arm_table);
ret = run_test_cases(kprobe_arm_test_cases, probes_decode_arm_table);
if (ret)
goto out;
......@@ -1631,13 +1635,13 @@ static int __init run_all_tests(void)
pr_info("16-bit Thumb instruction simulation\n");
ret = run_test_cases(kprobe_thumb16_test_cases,
kprobe_decode_thumb16_table);
probes_decode_thumb16_table);
if (ret)
goto out;
pr_info("32-bit Thumb instruction simulation\n");
ret = run_test_cases(kprobe_thumb32_test_cases,
kprobe_decode_thumb32_table);
probes_decode_thumb32_table);
if (ret)
goto out;
#endif
......
此差异已折叠。
......@@ -27,8 +27,12 @@
#include <linux/stringify.h>
#include <asm/traps.h>
#include <asm/cacheflush.h>
#include <linux/percpu.h>
#include <linux/bug.h>
#include "kprobes.h"
#include "probes-arm.h"
#include "probes-thumb.h"
#include "patch.h"
#define MIN_STACK_SIZE(addr) \
......@@ -54,6 +58,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
unsigned long addr = (unsigned long)p->addr;
bool thumb;
kprobe_decode_insn_t *decode_insn;
const union decode_action *actions;
int is;
if (in_exception_text(addr))
......@@ -66,21 +71,25 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (is_wide_instruction(insn)) {
insn <<= 16;
insn |= ((u16 *)addr)[1];
decode_insn = thumb32_kprobe_decode_insn;
} else
decode_insn = thumb16_kprobe_decode_insn;
decode_insn = thumb32_probes_decode_insn;
actions = kprobes_t32_actions;
} else {
decode_insn = thumb16_probes_decode_insn;
actions = kprobes_t16_actions;
}
#else /* !CONFIG_THUMB2_KERNEL */
thumb = false;
if (addr & 0x3)
return -EINVAL;
insn = *p->addr;
decode_insn = arm_kprobe_decode_insn;
decode_insn = arm_probes_decode_insn;
actions = kprobes_arm_actions;
#endif
p->opcode = insn;
p->ainsn.insn = tmp_insn;
switch ((*decode_insn)(insn, &p->ainsn)) {
switch ((*decode_insn)(insn, &p->ainsn, true, actions)) {
case INSN_REJECTED: /* not supported */
return -EINVAL;
......@@ -92,7 +101,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
p->ainsn.insn[is] = tmp_insn[is];
flush_insns(p->ainsn.insn,
sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE);
p->ainsn.insn_fn = (kprobe_insn_fn_t *)
p->ainsn.insn_fn = (probes_insn_fn_t *)
((uintptr_t)p->ainsn.insn | thumb);
break;
......@@ -197,7 +206,7 @@ singlestep_skip(struct kprobe *p, struct pt_regs *regs)
static inline void __kprobes
singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
p->ainsn.insn_singlestep(p, regs);
p->ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
}
/*
......@@ -607,7 +616,7 @@ static struct undef_hook kprobes_arm_break_hook = {
int __init arch_init_kprobes()
{
arm_kprobe_decode_init();
arm_probes_decode_init();
#ifdef CONFIG_THUMB2_KERNEL
register_undef_hook(&kprobes_thumb16_break_hook);
register_undef_hook(&kprobes_thumb32_break_hook);
......
......@@ -19,6 +19,8 @@
#ifndef _ARM_KERNEL_KPROBES_H
#define _ARM_KERNEL_KPROBES_H
#include "probes.h"
/*
* These undefined instructions must be unique and
* reserved solely for kprobes' use.
......@@ -27,402 +29,24 @@
#define KPROBE_THUMB16_BREAKPOINT_INSTRUCTION 0xde18
#define KPROBE_THUMB32_BREAKPOINT_INSTRUCTION 0xf7f0a018
enum probes_insn __kprobes
kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_probes_insn *asi,
const struct decode_header *h);
enum kprobe_insn {
INSN_REJECTED,
INSN_GOOD,
INSN_GOOD_NO_SLOT
};
typedef enum kprobe_insn (kprobe_decode_insn_t)(kprobe_opcode_t,
struct arch_specific_insn *);
typedef enum probes_insn (kprobe_decode_insn_t)(probes_opcode_t,
struct arch_probes_insn *,
bool,
const union decode_action *);
#ifdef CONFIG_THUMB2_KERNEL
enum kprobe_insn thumb16_kprobe_decode_insn(kprobe_opcode_t,
struct arch_specific_insn *);
enum kprobe_insn thumb32_kprobe_decode_insn(kprobe_opcode_t,
struct arch_specific_insn *);
extern const union decode_action kprobes_t32_actions[];
extern const union decode_action kprobes_t16_actions[];
#else /* !CONFIG_THUMB2_KERNEL */
enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t,
struct arch_specific_insn *);
#endif
void __init arm_kprobe_decode_init(void);
extern kprobe_check_cc * const kprobe_condition_checks[16];
#if __LINUX_ARM_ARCH__ >= 7
/* str_pc_offset is architecturally defined from ARMv7 onwards */
#define str_pc_offset 8
#define find_str_pc_offset()
#else /* __LINUX_ARM_ARCH__ < 7 */
/* We need a run-time check to determine str_pc_offset */
extern int str_pc_offset;
void __init find_str_pc_offset(void);
extern const union decode_action kprobes_arm_actions[];
#endif
/*
* Update ITSTATE after normal execution of an IT block instruction.
*
* The 8 IT state bits are split into two parts in CPSR:
* ITSTATE<1:0> are in CPSR<26:25>
* ITSTATE<7:2> are in CPSR<15:10>
*/
static inline unsigned long it_advance(unsigned long cpsr)
{
if ((cpsr & 0x06000400) == 0) {
/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
cpsr &= ~PSR_IT_MASK;
} else {
/* We need to shift left ITSTATE<4:0> */
const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
unsigned long it = cpsr & mask;
it <<= 1;
it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
it &= mask;
cpsr &= ~mask;
cpsr |= it;
}
return cpsr;
}
static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)
{
long cpsr = regs->ARM_cpsr;
if (pcv & 0x1) {
cpsr |= PSR_T_BIT;
pcv &= ~0x1;
} else {
cpsr &= ~PSR_T_BIT;
pcv &= ~0x2; /* Avoid UNPREDICTABLE address allignment */
}
regs->ARM_cpsr = cpsr;
regs->ARM_pc = pcv;
}
#if __LINUX_ARM_ARCH__ >= 6
/* Kernels built for >= ARMv6 should never run on <= ARMv5 hardware, so... */
#define load_write_pc_interworks true
#define test_load_write_pc_interworking()
#else /* __LINUX_ARM_ARCH__ < 6 */
/* We need run-time testing to determine if load_write_pc() should interwork. */
extern bool load_write_pc_interworks;
void __init test_load_write_pc_interworking(void);
#endif
static inline void __kprobes load_write_pc(long pcv, struct pt_regs *regs)
{
if (load_write_pc_interworks)
bx_write_pc(pcv, regs);
else
regs->ARM_pc = pcv;
}
#if __LINUX_ARM_ARCH__ >= 7
#define alu_write_pc_interworks true
#define test_alu_write_pc_interworking()
#elif __LINUX_ARM_ARCH__ <= 5
/* Kernels built for <= ARMv5 should never run on >= ARMv6 hardware, so... */
#define alu_write_pc_interworks false
#define test_alu_write_pc_interworking()
#else /* __LINUX_ARM_ARCH__ == 6 */
/* We could be an ARMv6 binary on ARMv7 hardware so we need a run-time check. */
extern bool alu_write_pc_interworks;
void __init test_alu_write_pc_interworking(void);
#endif /* __LINUX_ARM_ARCH__ == 6 */
static inline void __kprobes alu_write_pc(long pcv, struct pt_regs *regs)
{
if (alu_write_pc_interworks)
bx_write_pc(pcv, regs);
else
regs->ARM_pc = pcv;
}
void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs);
void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs);
enum kprobe_insn __kprobes
kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi);
/*
* Test if load/store instructions writeback the address register.
* if P (bit 24) == 0 or W (bit 21) == 1
*/
#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000)
/*
* The following definitions and macros are used to build instruction
* decoding tables for use by kprobe_decode_insn.
*
* These tables are a concatenation of entries each of which consist of one of
* the decode_* structs. All of the fields in every type of decode structure
* are of the union type decode_item, therefore the entire decode table can be
* viewed as an array of these and declared like:
*
* static const union decode_item table_name[] = {};
*
* In order to construct each entry in the table, macros are used to
* initialise a number of sequential decode_item values in a layout which
* matches the relevant struct. E.g. DECODE_SIMULATE initialise a struct
* decode_simulate by initialising four decode_item objects like this...
*
* {.bits = _type},
* {.bits = _mask},
* {.bits = _value},
* {.handler = _handler},
*
* Initialising a specified member of the union means that the compiler
* will produce a warning if the argument is of an incorrect type.
*
* Below is a list of each of the macros used to initialise entries and a
* description of the action performed when that entry is matched to an
* instruction. A match is found when (instruction & mask) == value.
*
* DECODE_TABLE(mask, value, table)
* Instruction decoding jumps to parsing the new sub-table 'table'.
*
* DECODE_CUSTOM(mask, value, decoder)
* The custom function 'decoder' is called to the complete decoding
* of an instruction.
*
* DECODE_SIMULATE(mask, value, handler)
* Set the probes instruction handler to 'handler', this will be used
* to simulate the instruction when the probe is hit. Decoding returns
* with INSN_GOOD_NO_SLOT.
*
* DECODE_EMULATE(mask, value, handler)
* Set the probes instruction handler to 'handler', this will be used
* to emulate the instruction when the probe is hit. The modified
* instruction (see below) is placed in the probes instruction slot so it
* may be called by the emulation code. Decoding returns with INSN_GOOD.
*
* DECODE_REJECT(mask, value)
* Instruction decoding fails with INSN_REJECTED
*
* DECODE_OR(mask, value)
* This allows the mask/value test of multiple table entries to be
* logically ORed. Once an 'or' entry is matched the decoding action to
* be performed is that of the next entry which isn't an 'or'. E.g.
*
* DECODE_OR (mask1, value1)
* DECODE_OR (mask2, value2)
* DECODE_SIMULATE (mask3, value3, simulation_handler)
*
* This means that if any of the three mask/value pairs match the
* instruction being decoded, then 'simulation_handler' will be used
* for it.
*
* Both the SIMULATE and EMULATE macros have a second form which take an
* additional 'regs' argument.
*
* DECODE_SIMULATEX(mask, value, handler, regs)
* DECODE_EMULATEX (mask, value, handler, regs)
*
* These are used to specify what kind of CPU register is encoded in each of the
* least significant 5 nibbles of the instruction being decoded. The regs value
* is specified using the REGS macro, this takes any of the REG_TYPE_* values
* from enum decode_reg_type as arguments; only the '*' part of the name is
* given. E.g.
*
* REGS(0, ANY, NOPC, 0, ANY)
*
* This indicates an instruction is encoded like:
*
* bits 19..16 ignore
* bits 15..12 any register allowed here
* bits 11.. 8 any register except PC allowed here
* bits 7.. 4 ignore
* bits 3.. 0 any register allowed here
*
* This register specification is checked after a decode table entry is found to
* match an instruction (through the mask/value test). Any invalid register then
* found in the instruction will cause decoding to fail with INSN_REJECTED. In
* the above example this would happen if bits 11..8 of the instruction were
* 1111, indicating R15 or PC.
*
* As well as checking for legal combinations of registers, this data is also
* used to modify the registers encoded in the instructions so that an
* emulation routines can use it. (See decode_regs() and INSN_NEW_BITS.)
*
* Here is a real example which matches ARM instructions of the form
* "AND <Rd>,<Rn>,<Rm>,<shift> <Rs>"
*
* DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags,
* REGS(ANY, ANY, NOPC, 0, ANY)),
* ^ ^ ^ ^
* Rn Rd Rs Rm
*
* Decoding the instruction "AND R4, R5, R6, ASL R15" will be rejected because
* Rs == R15
*
* Decoding the instruction "AND R4, R5, R6, ASL R7" will be accepted and the
* instruction will be modified to "AND R0, R2, R3, ASL R1" and then placed into
* the kprobes instruction slot. This can then be called later by the handler
* function emulate_rd12rn16rm0rs8_rwflags in order to simulate the instruction.
*/
enum decode_type {
DECODE_TYPE_END,
DECODE_TYPE_TABLE,
DECODE_TYPE_CUSTOM,
DECODE_TYPE_SIMULATE,
DECODE_TYPE_EMULATE,
DECODE_TYPE_OR,
DECODE_TYPE_REJECT,
NUM_DECODE_TYPES /* Must be last enum */
};
#define DECODE_TYPE_BITS 4
#define DECODE_TYPE_MASK ((1 << DECODE_TYPE_BITS) - 1)
enum decode_reg_type {
REG_TYPE_NONE = 0, /* Not a register, ignore */
REG_TYPE_ANY, /* Any register allowed */
REG_TYPE_SAMEAS16, /* Register should be same as that at bits 19..16 */
REG_TYPE_SP, /* Register must be SP */
REG_TYPE_PC, /* Register must be PC */
REG_TYPE_NOSP, /* Register must not be SP */
REG_TYPE_NOSPPC, /* Register must not be SP or PC */
REG_TYPE_NOPC, /* Register must not be PC */
REG_TYPE_NOPCWB, /* No PC if load/store write-back flag also set */
/* The following types are used when the encoding for PC indicates
* another instruction form. This distiction only matters for test
* case coverage checks.
*/
REG_TYPE_NOPCX, /* Register must not be PC */
REG_TYPE_NOSPPCX, /* Register must not be SP or PC */
/* Alias to allow '0' arg to be used in REGS macro. */
REG_TYPE_0 = REG_TYPE_NONE
};
#define REGS(r16, r12, r8, r4, r0) \
((REG_TYPE_##r16) << 16) + \
((REG_TYPE_##r12) << 12) + \
((REG_TYPE_##r8) << 8) + \
((REG_TYPE_##r4) << 4) + \
(REG_TYPE_##r0)
union decode_item {
u32 bits;
const union decode_item *table;
kprobe_insn_handler_t *handler;
kprobe_decode_insn_t *decoder;
};
#define DECODE_END \
{.bits = DECODE_TYPE_END}
struct decode_header {
union decode_item type_regs;
union decode_item mask;
union decode_item value;
};
#define DECODE_HEADER(_type, _mask, _value, _regs) \
{.bits = (_type) | ((_regs) << DECODE_TYPE_BITS)}, \
{.bits = (_mask)}, \
{.bits = (_value)}
struct decode_table {
struct decode_header header;
union decode_item table;
};
#define DECODE_TABLE(_mask, _value, _table) \
DECODE_HEADER(DECODE_TYPE_TABLE, _mask, _value, 0), \
{.table = (_table)}
struct decode_custom {
struct decode_header header;
union decode_item decoder;
};
#define DECODE_CUSTOM(_mask, _value, _decoder) \
DECODE_HEADER(DECODE_TYPE_CUSTOM, _mask, _value, 0), \
{.decoder = (_decoder)}
struct decode_simulate {
struct decode_header header;
union decode_item handler;
};
#define DECODE_SIMULATEX(_mask, _value, _handler, _regs) \
DECODE_HEADER(DECODE_TYPE_SIMULATE, _mask, _value, _regs), \
{.handler = (_handler)}
#define DECODE_SIMULATE(_mask, _value, _handler) \
DECODE_SIMULATEX(_mask, _value, _handler, 0)
struct decode_emulate {
struct decode_header header;
union decode_item handler;
};
#define DECODE_EMULATEX(_mask, _value, _handler, _regs) \
DECODE_HEADER(DECODE_TYPE_EMULATE, _mask, _value, _regs), \
{.handler = (_handler)}
#define DECODE_EMULATE(_mask, _value, _handler) \
DECODE_EMULATEX(_mask, _value, _handler, 0)
struct decode_or {
struct decode_header header;
};
#define DECODE_OR(_mask, _value) \
DECODE_HEADER(DECODE_TYPE_OR, _mask, _value, 0)
struct decode_reject {
struct decode_header header;
};
#define DECODE_REJECT(_mask, _value) \
DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0)
#ifdef CONFIG_THUMB2_KERNEL
extern const union decode_item kprobe_decode_thumb16_table[];
extern const union decode_item kprobe_decode_thumb32_table[];
#else
extern const union decode_item kprobe_decode_arm_table[];
#endif
int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
const union decode_item *table, bool thumb16);
#endif /* _ARM_KERNEL_KPROBES_H */
......@@ -16,6 +16,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
......@@ -205,6 +207,8 @@ armpmu_del(struct perf_event *event, int flags)
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
if (armpmu->clear_event_idx)
armpmu->clear_event_idx(hw_events, event);
perf_event_update_userpage(event);
}
......@@ -295,14 +299,27 @@ validate_group(struct perf_event *event)
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu = (struct arm_pmu *) dev;
struct platform_device *plat_device = armpmu->plat_device;
struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
struct arm_pmu *armpmu;
struct platform_device *plat_device;
struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
if (irq_is_percpu(irq))
dev = *(void **)dev;
armpmu = dev;
plat_device = armpmu->plat_device;
plat = dev_get_platdata(&plat_device->dev);
start_clock = sched_clock();
if (plat && plat->handle_irq)
return plat->handle_irq(irq, dev, armpmu->handle_irq);
ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
else
return armpmu->handle_irq(irq, dev);
ret = armpmu->handle_irq(irq, dev);
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
return ret;
}
static void
......
......@@ -25,6 +25,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
......@@ -33,6 +35,7 @@
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *cpu_pmu;
static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
......@@ -71,6 +74,26 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
return this_cpu_ptr(&cpu_hw_events);
}
static void cpu_pmu_enable_percpu_irq(void *data)
{
struct arm_pmu *cpu_pmu = data;
struct platform_device *pmu_device = cpu_pmu->plat_device;
int irq = platform_get_irq(pmu_device, 0);
enable_percpu_irq(irq, IRQ_TYPE_NONE);
cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
}
static void cpu_pmu_disable_percpu_irq(void *data)
{
struct arm_pmu *cpu_pmu = data;
struct platform_device *pmu_device = cpu_pmu->plat_device;
int irq = platform_get_irq(pmu_device, 0);
cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
disable_percpu_irq(irq);
}
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
int i, irq, irqs;
......@@ -78,12 +101,18 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
irqs = min(pmu_device->num_resources, num_possible_cpus());
for (i = 0; i < irqs; ++i) {
if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
free_irq(irq, cpu_pmu);
irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1);
free_percpu_irq(irq, &percpu_pmu);
} else {
for (i = 0; i < irqs; ++i) {
if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
free_irq(irq, cpu_pmu);
}
}
}
......@@ -101,33 +130,44 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return -ENODEV;
}
for (i = 0; i < irqs; ++i) {
err = 0;
irq = platform_get_irq(pmu_device, i);
if (irq < 0)
continue;
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, i);
continue;
}
err = request_irq(irq, handler,
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
cpu_pmu);
irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
return err;
}
cpumask_set_cpu(i, &cpu_pmu->active_irqs);
on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1);
} else {
for (i = 0; i < irqs; ++i) {
err = 0;
irq = platform_get_irq(pmu_device, i);
if (irq < 0)
continue;
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, i);
continue;
}
err = request_irq(irq, handler,
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
cpu_pmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
return err;
}
cpumask_set_cpu(i, &cpu_pmu->active_irqs);
}
}
return 0;
......@@ -141,6 +181,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
events->events = per_cpu(hw_events, cpu);
events->used_mask = per_cpu(used_mask, cpu);
raw_spin_lock_init(&events->pmu_lock);
per_cpu(percpu_pmu, cpu) = cpu_pmu;
}
cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
......@@ -181,6 +222,7 @@ static struct notifier_block cpu_pmu_hotplug_notifier = {
*/
static struct of_device_id cpu_pmu_of_device_ids[] = {
{.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
{.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
{.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
{.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
{.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
......@@ -188,6 +230,7 @@ static struct of_device_id cpu_pmu_of_device_ids[] = {
{.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
{.compatible = "arm,arm1176-pmu", .data = armv6pmu_init},
{.compatible = "arm,arm1136-pmu", .data = armv6pmu_init},
{.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
{},
};
......@@ -225,15 +268,6 @@ static int probe_current_pmu(struct arm_pmu *pmu)
case ARM_CPU_PART_CORTEX_A9:
ret = armv7_a9_pmu_init(pmu);
break;
case ARM_CPU_PART_CORTEX_A5:
ret = armv7_a5_pmu_init(pmu);
break;
case ARM_CPU_PART_CORTEX_A15:
ret = armv7_a15_pmu_init(pmu);
break;
case ARM_CPU_PART_CORTEX_A7:
ret = armv7_a7_pmu_init(pmu);
break;
}
/* Intel CPUs [xscale]. */
} else if (implementor == ARM_CPU_IMP_INTEL) {
......@@ -270,6 +304,9 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
return -ENOMEM;
}
cpu_pmu = pmu;
cpu_pmu->plat_device = pdev;
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
init_fn = of_id->data;
ret = init_fn(pmu);
......@@ -282,8 +319,6 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
goto out_free;
}
cpu_pmu = pmu;
cpu_pmu->plat_device = pdev;
cpu_pmu_init(cpu_pmu);
ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
......
此差异已折叠。
此差异已折叠。
/*
* arch/arm/kernel/probes-arm.h
*
* Copyright 2013 Linaro Ltd.
* Written by: David A. Long
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#ifndef _ARM_KERNEL_PROBES_ARM_H
#define _ARM_KERNEL_PROBES_ARM_H
enum probes_arm_action {
PROBES_EMULATE_NONE,
PROBES_SIMULATE_NOP,
PROBES_PRELOAD_IMM,
PROBES_PRELOAD_REG,
PROBES_BRANCH_IMM,
PROBES_BRANCH_REG,
PROBES_MRS,
PROBES_CLZ,
PROBES_SATURATING_ARITHMETIC,
PROBES_MUL1,
PROBES_MUL2,
PROBES_SWP,
PROBES_LDRSTRD,
PROBES_LOAD,
PROBES_STORE,
PROBES_LOAD_EXTRA,
PROBES_STORE_EXTRA,
PROBES_MOV_IP_SP,
PROBES_DATA_PROCESSING_REG,
PROBES_DATA_PROCESSING_IMM,
PROBES_MOV_HALFWORD,
PROBES_SEV,
PROBES_WFE,
PROBES_SATURATE,
PROBES_REV,
PROBES_MMI,
PROBES_PACK,
PROBES_EXTEND,
PROBES_EXTEND_ADD,
PROBES_MUL_ADD_LONG,
PROBES_MUL_ADD,
PROBES_BITFIELD,
PROBES_BRANCH,
PROBES_LDMSTM,
NUM_PROBES_ARM_ACTIONS
};
void __kprobes simulate_bbl(probes_opcode_t opcode,
struct arch_probes_insn *asi, struct pt_regs *regs);
void __kprobes simulate_blx1(probes_opcode_t opcode,
struct arch_probes_insn *asi, struct pt_regs *regs);
void __kprobes simulate_blx2bx(probes_opcode_t opcode,
struct arch_probes_insn *asi, struct pt_regs *regs);
void __kprobes simulate_mrs(probes_opcode_t opcode,
struct arch_probes_insn *asi, struct pt_regs *regs);
void __kprobes simulate_mov_ipsp(probes_opcode_t opcode,
struct arch_probes_insn *asi, struct pt_regs *regs);
extern const union decode_item probes_decode_arm_table[];
enum probes_insn arm_probes_decode_insn(probes_opcode_t,
struct arch_probes_insn *, bool emulate,
const union decode_action *actions);
#endif
此差异已折叠。
/*
* arch/arm/kernel/probes-thumb.h
*
* Copyright 2013 Linaro Ltd.
* Written by: David A. Long
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#ifndef _ARM_KERNEL_PROBES_THUMB_H
#define _ARM_KERNEL_PROBES_THUMB_H
/*
* True if current instruction is in an IT block.
*/
#define in_it_block(cpsr) ((cpsr & 0x06000c00) != 0x00000000)
/*
* Return the condition code to check for the currently executing instruction.
* This is in ITSTATE<7:4> which is in CPSR<15:12> but is only valid if
* in_it_block returns true.
*/
#define current_cond(cpsr) ((cpsr >> 12) & 0xf)
enum probes_t32_action {
PROBES_T32_EMULATE_NONE,
PROBES_T32_SIMULATE_NOP,
PROBES_T32_LDMSTM,
PROBES_T32_LDRDSTRD,
PROBES_T32_TABLE_BRANCH,
PROBES_T32_TST,
PROBES_T32_CMP,
PROBES_T32_MOV,
PROBES_T32_ADDSUB,
PROBES_T32_LOGICAL,
PROBES_T32_ADDWSUBW_PC,
PROBES_T32_ADDWSUBW,
PROBES_T32_MOVW,
PROBES_T32_SAT,
PROBES_T32_BITFIELD,
PROBES_T32_SEV,
PROBES_T32_WFE,
PROBES_T32_MRS,
PROBES_T32_BRANCH_COND,
PROBES_T32_BRANCH,
PROBES_T32_PLDI,
PROBES_T32_LDR_LIT,
PROBES_T32_LDRSTR,
PROBES_T32_SIGN_EXTEND,
PROBES_T32_MEDIA,
PROBES_T32_REVERSE,
PROBES_T32_MUL_ADD,
PROBES_T32_MUL_ADD2,
PROBES_T32_MUL_ADD_LONG,
NUM_PROBES_T32_ACTIONS
};
enum probes_t16_action {
PROBES_T16_ADD_SP,
PROBES_T16_CBZ,
PROBES_T16_SIGN_EXTEND,
PROBES_T16_PUSH,
PROBES_T16_POP,
PROBES_T16_SEV,
PROBES_T16_WFE,
PROBES_T16_IT,
PROBES_T16_CMP,
PROBES_T16_ADDSUB,
PROBES_T16_LOGICAL,
PROBES_T16_BLX,
PROBES_T16_HIREGOPS,
PROBES_T16_LDR_LIT,
PROBES_T16_LDRHSTRH,
PROBES_T16_LDRSTR,
PROBES_T16_ADR,
PROBES_T16_LDMSTM,
PROBES_T16_BRANCH_COND,
PROBES_T16_BRANCH,
NUM_PROBES_T16_ACTIONS
};
extern const union decode_item probes_decode_thumb32_table[];
extern const union decode_item probes_decode_thumb16_table[];
enum probes_insn __kprobes
thumb16_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
bool emulate, const union decode_action *actions);
enum probes_insn __kprobes
thumb32_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
bool emulate, const union decode_action *actions);
#endif
此差异已折叠。
此差异已折叠。
......@@ -47,14 +47,14 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
static const char *processor_modes[] = {
static const char *processor_modes[] __maybe_unused = {
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
"UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
"USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
"UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
};
static const char *isa_modes[] = {
static const char *isa_modes[] __maybe_unused = {
"ARM" , "Thumb" , "Jazelle", "ThumbEE"
};
......@@ -270,12 +270,17 @@ void __show_regs(struct pt_regs *regs)
buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
buf[4] = '\0';
#ifndef CONFIG_CPU_V7M
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
buf, interrupts_enabled(regs) ? "n" : "ff",
fast_interrupts_enabled(regs) ? "n" : "ff",
processor_modes[processor_mode(regs)],
isa_modes[isa_mode(regs)],
get_fs() == get_ds() ? "kernel" : "user");
#else
printk("xPSR: %08lx\n", regs->ARM_cpsr);
#endif
#ifdef CONFIG_CPU_CP15
{
unsigned int ctrl;
......
......@@ -100,6 +100,9 @@ EXPORT_SYMBOL(system_serial_high);
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL(elf_hwcap);
unsigned int elf_hwcap2 __read_mostly;
EXPORT_SYMBOL(elf_hwcap2);
#ifdef MULTI_CPU
struct processor processor __read_mostly;
......@@ -1005,6 +1008,15 @@ static const char *hwcap_str[] = {
NULL
};
static const char *hwcap2_str[] = {
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
NULL
};
static int c_show(struct seq_file *m, void *v)
{
int i, j;
......@@ -1028,6 +1040,10 @@ static int c_show(struct seq_file *m, void *v)
if (elf_hwcap & (1 << j))
seq_printf(m, "%s ", hwcap_str[j]);
for (j = 0; hwcap2_str[j]; j++)
if (elf_hwcap2 & (1 << j))
seq_printf(m, "%s ", hwcap2_str[j]);
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
seq_printf(m, "CPU architecture: %s\n",
proc_arch[cpu_architecture()]);
......
......@@ -13,6 +13,7 @@
#include <linux/personality.h>
#include <linux/uaccess.h>
#include <linux/tracehook.h>
#include <linux/uprobes.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
......@@ -590,6 +591,9 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
return restart;
}
syscall = 0;
} else if (thread_flags & _TIF_UPROBE) {
clear_thread_flag(TIF_UPROBE);
uprobe_notify_resume(regs);
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
......
......@@ -68,6 +68,12 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
struct unwind_ctrl_block {
unsigned long vrs[16]; /* virtual register set */
const unsigned long *insn; /* pointer to the current instructions word */
unsigned long sp_high; /* highest value of sp allowed */
/*
* 1 : check for stack overflow for each register pop.
* 0 : save overhead if there is plenty of stack remaining.
*/
int check_each_pop;
int entries; /* number of entries left to interpret */
int byte; /* current byte number in the instructions word */
};
......@@ -235,12 +241,85 @@ static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
return ret;
}
/* Before poping a register check whether it is feasible or not */
static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
unsigned long **vsp, unsigned int reg)
{
if (unlikely(ctrl->check_each_pop))
if (*vsp >= (unsigned long *)ctrl->sp_high)
return -URC_FAILURE;
ctrl->vrs[reg] = *(*vsp)++;
return URC_OK;
}
/* Helper functions to execute the instructions */
static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
unsigned long mask)
{
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int load_sp, reg = 4;
load_sp = mask & (1 << (13 - 4));
while (mask) {
if (mask & 1)
if (unwind_pop_register(ctrl, &vsp, reg))
return -URC_FAILURE;
mask >>= 1;
reg++;
}
if (!load_sp)
ctrl->vrs[SP] = (unsigned long)vsp;
return URC_OK;
}
static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
unsigned long insn)
{
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int reg;
/* pop R4-R[4+bbb] */
for (reg = 4; reg <= 4 + (insn & 7); reg++)
if (unwind_pop_register(ctrl, &vsp, reg))
return -URC_FAILURE;
if (insn & 0x80)
if (unwind_pop_register(ctrl, &vsp, 14))
return -URC_FAILURE;
ctrl->vrs[SP] = (unsigned long)vsp;
return URC_OK;
}
static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
unsigned long mask)
{
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int reg = 0;
/* pop R0-R3 according to mask */
while (mask) {
if (mask & 1)
if (unwind_pop_register(ctrl, &vsp, reg))
return -URC_FAILURE;
mask >>= 1;
reg++;
}
ctrl->vrs[SP] = (unsigned long)vsp;
return URC_OK;
}
/*
* Execute the current unwind instruction.
*/
static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
{
unsigned long insn = unwind_get_byte(ctrl);
int ret = URC_OK;
pr_debug("%s: insn = %08lx\n", __func__, insn);
......@@ -250,8 +329,6 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
else if ((insn & 0xf0) == 0x80) {
unsigned long mask;
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int load_sp, reg = 4;
insn = (insn << 8) | unwind_get_byte(ctrl);
mask = insn & 0x0fff;
......@@ -261,29 +338,16 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
return -URC_FAILURE;
}
/* pop R4-R15 according to mask */
load_sp = mask & (1 << (13 - 4));
while (mask) {
if (mask & 1)
ctrl->vrs[reg] = *vsp++;
mask >>= 1;
reg++;
}
if (!load_sp)
ctrl->vrs[SP] = (unsigned long)vsp;
ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask);
if (ret)
goto error;
} else if ((insn & 0xf0) == 0x90 &&
(insn & 0x0d) != 0x0d)
ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
else if ((insn & 0xf0) == 0xa0) {
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int reg;
/* pop R4-R[4+bbb] */
for (reg = 4; reg <= 4 + (insn & 7); reg++)
ctrl->vrs[reg] = *vsp++;
if (insn & 0x80)
ctrl->vrs[14] = *vsp++;
ctrl->vrs[SP] = (unsigned long)vsp;
ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
if (ret)
goto error;
} else if (insn == 0xb0) {
if (ctrl->vrs[PC] == 0)
ctrl->vrs[PC] = ctrl->vrs[LR];
......@@ -291,8 +355,6 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
ctrl->entries = 0;
} else if (insn == 0xb1) {
unsigned long mask = unwind_get_byte(ctrl);
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int reg = 0;
if (mask == 0 || mask & 0xf0) {
pr_warning("unwind: Spare encoding %04lx\n",
......@@ -300,14 +362,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
return -URC_FAILURE;
}
/* pop R0-R3 according to mask */
while (mask) {
if (mask & 1)
ctrl->vrs[reg] = *vsp++;
mask >>= 1;
reg++;
}
ctrl->vrs[SP] = (unsigned long)vsp;
ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask);
if (ret)
goto error;
} else if (insn == 0xb2) {
unsigned long uleb128 = unwind_get_byte(ctrl);
......@@ -320,7 +377,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
return URC_OK;
error:
return ret;
}
/*
......@@ -329,13 +387,13 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
*/
int unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
unsigned long low;
const struct unwind_idx *idx;
struct unwind_ctrl_block ctrl;
/* only go to a higher address on the stack */
/* store the highest address on the stack to avoid crossing it*/
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
ctrl.sp_high = ALIGN(low, THREAD_SIZE);
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
frame->pc, frame->lr, frame->sp);
......@@ -382,11 +440,16 @@ int unwind_frame(struct stackframe *frame)
return -URC_FAILURE;
}
ctrl.check_each_pop = 0;
while (ctrl.entries > 0) {
int urc = unwind_exec_insn(&ctrl);
int urc;
if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
ctrl.check_each_pop = 1;
urc = unwind_exec_insn(&ctrl);
if (urc < 0)
return urc;
if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= ctrl.sp_high)
return -URC_FAILURE;
}
......
此差异已折叠。
此差异已折叠。
/*
* Copyright (C) 2012 Rabin Vincent <rabin at rab.in>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ARM_KERNEL_UPROBES_H
#define __ARM_KERNEL_UPROBES_H
enum probes_insn uprobe_decode_ldmstm(probes_opcode_t insn,
struct arch_probes_insn *asi,
const struct decode_header *d);
enum probes_insn decode_ldr(probes_opcode_t insn,
struct arch_probes_insn *asi,
const struct decode_header *d);
enum probes_insn
decode_rd12rn16rm0rs8_rwflags(probes_opcode_t insn,
struct arch_probes_insn *asi,
const struct decode_header *d);
enum probes_insn
decode_wb_pc(probes_opcode_t insn, struct arch_probes_insn *asi,
const struct decode_header *d, bool alu);
enum probes_insn
decode_pc_ro(probes_opcode_t insn, struct arch_probes_insn *asi,
const struct decode_header *d);
extern const union decode_action uprobes_probes_actions[];
#endif
......@@ -37,6 +37,11 @@ UNWIND( .fnstart )
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask
smp_dmb
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
ALT_UP(W(nop))
#endif
1: ldrex r2, [r1]
ands r0, r2, r3 @ save old value of bit
\instr r2, r2, r3 @ toggle bit
......
......@@ -197,24 +197,24 @@
12: PLD( pld [r1, #124] )
13: ldr4w r1, r4, r5, r6, r7, abort=19f
mov r3, lr, pull #\pull
mov r3, lr, lspull #\pull
subs r2, r2, #32
ldr4w r1, r8, r9, ip, lr, abort=19f
orr r3, r3, r4, push #\push
mov r4, r4, pull #\pull
orr r4, r4, r5, push #\push
mov r5, r5, pull #\pull
orr r5, r5, r6, push #\push
mov r6, r6, pull #\pull
orr r6, r6, r7, push #\push
mov r7, r7, pull #\pull
orr r7, r7, r8, push #\push
mov r8, r8, pull #\pull
orr r8, r8, r9, push #\push
mov r9, r9, pull #\pull
orr r9, r9, ip, push #\push
mov ip, ip, pull #\pull
orr ip, ip, lr, push #\push
orr r3, r3, r4, lspush #\push
mov r4, r4, lspull #\pull
orr r4, r4, r5, lspush #\push
mov r5, r5, lspull #\pull
orr r5, r5, r6, lspush #\push
mov r6, r6, lspull #\pull
orr r6, r6, r7, lspush #\push
mov r7, r7, lspull #\pull
orr r7, r7, r8, lspush #\push
mov r8, r8, lspull #\pull
orr r8, r8, r9, lspush #\push
mov r9, r9, lspull #\pull
orr r9, r9, ip, lspush #\push
mov ip, ip, lspull #\pull
orr ip, ip, lr, lspush #\push
str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f
bge 12b
PLD( cmn r2, #96 )
......@@ -225,10 +225,10 @@
14: ands ip, r2, #28
beq 16f
15: mov r3, lr, pull #\pull
15: mov r3, lr, lspull #\pull
ldr1w r1, lr, abort=21f
subs ip, ip, #4
orr r3, r3, lr, push #\push
orr r3, r3, lr, lspush #\push
str1w r0, r3, abort=21f
bgt 15b
CALGN( cmp r2, #0 )
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -155,7 +155,7 @@ static irqreturn_t cns3xxx_timer_interrupt(int irq, void *dev_id)
static struct irqaction cns3xxx_timer_irq = {
.name = "timer",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = cns3xxx_timer_interrupt,
};
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册