提交 1ee07ef6 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "This patch set contains the main portion of the changes for 3.18 in
  regard to the s390 architecture.  It is a bit bigger than usual,
  mainly because of a new driver and the vector extension patches.

  The interesting bits are:
   - Quite a bit of work on the tracing front.  Uprobes is enabled and
     the ftrace code is reworked to get some of the lost performance
     back if CONFIG_FTRACE is enabled.
   - To improve boot time with CONFIG_DEBIG_PAGEALLOC, support for the
     IPTE range facility is added.
   - The rwlock code is re-factored to improve writer fairness and to be
     able to use the interlocked-access instructions.
   - The kernel part for the support of the vector extension is added.
   - The device driver to access the CD/DVD on the HMC is added, this
     will hopefully come in handy to improve the installation process.
   - Add support for control-unit initiated reconfiguration.
   - The crypto device driver is enhanced to enable the additional AP
     domains and to allow the new crypto hardware to be used.
   - Bug fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (39 commits)
  s390/ftrace: simplify enabling/disabling of ftrace_graph_caller
  s390/ftrace: remove 31 bit ftrace support
  s390/kdump: add support for vector extension
  s390/disassembler: add vector instructions
  s390: add support for vector extension
  s390/zcrypt: Toleration of new crypto hardware
  s390/idle: consolidate idle functions and definitions
  s390/nohz: use a per-cpu flag for arch_needs_cpu
  s390/vtime: do not reset idle data on CPU hotplug
  s390/dasd: add support for control unit initiated reconfiguration
  s390/dasd: fix infinite loop during format
  s390/mm: make use of ipte range facility
  s390/setup: correct 4-level kernel page table detection
  s390/topology: call set_sched_topology early
  s390/uprobes: architecture backend for uprobes
  s390/uprobes: common library for kprobes and uprobes
  s390/rwlock: use the interlocked-access facility 1 instructions
  s390/rwlock: improve writer fairness
  s390/rwlock: remove interrupt-enabling rwlock variant.
  s390/mm: remove change bit override support
  ...
...@@ -300,6 +300,7 @@ architectures: ...@@ -300,6 +300,7 @@ architectures:
- arm - arm
- ppc - ppc
- mips - mips
- s390
3. Configuring Kprobes 3. Configuring Kprobes
......
...@@ -58,6 +58,9 @@ config NO_IOPORT_MAP ...@@ -58,6 +58,9 @@ config NO_IOPORT_MAP
config PCI_QUIRKS config PCI_QUIRKS
def_bool n def_bool n
config ARCH_SUPPORTS_UPROBES
def_bool 64BIT
config S390 config S390
def_bool y def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
...@@ -97,6 +100,7 @@ config S390 ...@@ -97,6 +100,7 @@ config S390
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2 select CLONE_BACKWARDS2
select DYNAMIC_FTRACE if FUNCTION_TRACER
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES if !SMP select GENERIC_CPU_DEVICES if !SMP
select GENERIC_FIND_FIRST_BIT select GENERIC_FIND_FIRST_BIT
...@@ -113,10 +117,11 @@ config S390 ...@@ -113,10 +117,11 @@ config S390
select HAVE_CMPXCHG_LOCAL select HAVE_CMPXCHG_LOCAL
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE if 64BIT
select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER if 64BIT
select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
......
...@@ -35,13 +35,16 @@ endif ...@@ -35,13 +35,16 @@ endif
export LD_BFD export LD_BFD
cflags-$(CONFIG_MARCH_G5) += -march=g5 mflags-$(CONFIG_MARCH_G5) := -march=g5
cflags-$(CONFIG_MARCH_Z900) += -march=z900 mflags-$(CONFIG_MARCH_Z900) := -march=z900
cflags-$(CONFIG_MARCH_Z990) += -march=z990 mflags-$(CONFIG_MARCH_Z990) := -march=z990
cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109 mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
cflags-$(CONFIG_MARCH_Z10) += -march=z10 mflags-$(CONFIG_MARCH_Z10) := -march=z10
cflags-$(CONFIG_MARCH_Z196) += -march=z196 mflags-$(CONFIG_MARCH_Z196) := -march=z196
cflags-$(CONFIG_MARCH_ZEC12) += -march=zEC12 mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
aflags-y += $(mflags-y)
cflags-y += $(mflags-y)
cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5 cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900 cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
......
...@@ -15,11 +15,13 @@ ...@@ -15,11 +15,13 @@
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */ /* Fast-BCR without checkpoint synchronization */
#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0) #define __ASM_BARRIER "bcr 14,0\n"
#else #else
#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0) #define __ASM_BARRIER "bcr 15,0\n"
#endif #endif
#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
#define rmb() mb() #define rmb() mb()
#define wmb() mb() #define wmb() mb()
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
......
...@@ -8,8 +8,6 @@ ...@@ -8,8 +8,6 @@
#define _S390_CPUTIME_H #define _S390_CPUTIME_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <asm/div64.h> #include <asm/div64.h>
...@@ -167,28 +165,8 @@ static inline clock_t cputime64_to_clock_t(cputime64_t cputime) ...@@ -167,28 +165,8 @@ static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
return clock; return clock;
} }
struct s390_idle_data { cputime64_t arch_cpu_idle_time(int cpu);
int nohz_delay;
unsigned int sequence;
unsigned long long idle_count;
unsigned long long idle_time;
unsigned long long clock_idle_enter;
unsigned long long clock_idle_exit;
unsigned long long timer_idle_enter;
unsigned long long timer_idle_exit;
};
DECLARE_PER_CPU(struct s390_idle_data, s390_idle); #define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
cputime64_t s390_get_idle_time(int cpu);
#define arch_idle_time(cpu) s390_get_idle_time(cpu)
static inline int s390_nohz_delay(int cpu)
{
return __get_cpu_var(s390_idle).nohz_delay != 0;
}
#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
#endif /* _S390_CPUTIME_H */ #endif /* _S390_CPUTIME_H */
...@@ -13,12 +13,13 @@ ...@@ -13,12 +13,13 @@
#define OPERAND_FPR 0x2 /* Operand printed as %fx */ #define OPERAND_FPR 0x2 /* Operand printed as %fx */
#define OPERAND_AR 0x4 /* Operand printed as %ax */ #define OPERAND_AR 0x4 /* Operand printed as %ax */
#define OPERAND_CR 0x8 /* Operand printed as %cx */ #define OPERAND_CR 0x8 /* Operand printed as %cx */
#define OPERAND_DISP 0x10 /* Operand printed as displacement */ #define OPERAND_VR 0x10 /* Operand printed as %vx */
#define OPERAND_BASE 0x20 /* Operand printed as base register */ #define OPERAND_DISP 0x20 /* Operand printed as displacement */
#define OPERAND_INDEX 0x40 /* Operand printed as index register */ #define OPERAND_BASE 0x40 /* Operand printed as base register */
#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ #define OPERAND_INDEX 0x80 /* Operand printed as index register */
#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ #define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ #define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
struct s390_operand { struct s390_operand {
......
...@@ -102,6 +102,7 @@ ...@@ -102,6 +102,7 @@
#define HWCAP_S390_ETF3EH 256 #define HWCAP_S390_ETF3EH 256
#define HWCAP_S390_HIGH_GPRS 512 #define HWCAP_S390_HIGH_GPRS 512
#define HWCAP_S390_TE 1024 #define HWCAP_S390_TE 1024
#define HWCAP_S390_VXRS 2048
/* /*
* These are used to set parameters in the core dumps. * These are used to set parameters in the core dumps.
...@@ -225,6 +226,6 @@ int arch_setup_additional_pages(struct linux_binprm *, int); ...@@ -225,6 +226,6 @@ int arch_setup_additional_pages(struct linux_binprm *, int);
extern unsigned long arch_randomize_brk(struct mm_struct *mm); extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk #define arch_randomize_brk arch_randomize_brk
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
#endif #endif
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void _mcount(void); extern void _mcount(void);
extern char ftrace_graph_caller_end;
struct dyn_arch_ftrace { }; struct dyn_arch_ftrace { };
...@@ -17,10 +18,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) ...@@ -17,10 +18,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#ifdef CONFIG_64BIT #define MCOUNT_INSN_SIZE 18
#define MCOUNT_INSN_SIZE 12
#else #define ARCH_SUPPORTS_FTRACE_OPS 1
#define MCOUNT_INSN_SIZE 22
#endif
#endif /* _ASM_S390_FTRACE_H */ #endif /* _ASM_S390_FTRACE_H */
/*
* Copyright IBM Corp. 2014
*
* Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _S390_IDLE_H
#define _S390_IDLE_H
#include <linux/types.h>
#include <linux/device.h>
struct s390_idle_data {
unsigned int sequence;
unsigned long long idle_count;
unsigned long long idle_time;
unsigned long long clock_idle_enter;
unsigned long long clock_idle_exit;
unsigned long long timer_idle_enter;
unsigned long long timer_idle_exit;
};
extern struct device_attribute dev_attr_idle_count;
extern struct device_attribute dev_attr_idle_time_us;
#endif /* _S390_IDLE_H */
...@@ -89,12 +89,12 @@ extern u32 ipl_flags; ...@@ -89,12 +89,12 @@ extern u32 ipl_flags;
extern u32 dump_prefix_page; extern u32 dump_prefix_page;
struct dump_save_areas { struct dump_save_areas {
struct save_area **areas; struct save_area_ext **areas;
int count; int count;
}; };
extern struct dump_save_areas dump_save_areas; extern struct dump_save_areas dump_save_areas;
struct save_area *dump_save_area_create(int cpu); struct save_area_ext *dump_save_area_create(int cpu);
extern void do_reipl(void); extern void do_reipl(void);
extern void do_halt(void); extern void do_halt(void);
......
...@@ -51,6 +51,7 @@ enum interruption_class { ...@@ -51,6 +51,7 @@ enum interruption_class {
IRQEXT_CMS, IRQEXT_CMS,
IRQEXT_CMC, IRQEXT_CMC,
IRQEXT_CMR, IRQEXT_CMR,
IRQEXT_FTP,
IRQIO_CIO, IRQIO_CIO,
IRQIO_QAI, IRQIO_QAI,
IRQIO_DAS, IRQIO_DAS,
......
...@@ -84,6 +84,10 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr); ...@@ -84,6 +84,10 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
int kprobe_exceptions_notify(struct notifier_block *self, int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data); unsigned long val, void *data);
int probe_is_prohibited_opcode(u16 *insn);
int probe_get_fixup_type(u16 *insn);
int probe_is_insn_relative_long(u16 *insn);
#define flush_insn_slot(p) do { } while (0) #define flush_insn_slot(p) do { } while (0)
#endif /* _ASM_S390_KPROBES_H */ #endif /* _ASM_S390_KPROBES_H */
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/types.h>
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
...@@ -31,6 +32,11 @@ struct save_area { ...@@ -31,6 +32,11 @@ struct save_area {
u32 ctrl_regs[16]; u32 ctrl_regs[16];
} __packed; } __packed;
struct save_area_ext {
struct save_area sa;
__vector128 vx_regs[32];
};
struct _lowcore { struct _lowcore {
psw_t restart_psw; /* 0x0000 */ psw_t restart_psw; /* 0x0000 */
psw_t restart_old_psw; /* 0x0008 */ psw_t restart_old_psw; /* 0x0008 */
...@@ -183,6 +189,11 @@ struct save_area { ...@@ -183,6 +189,11 @@ struct save_area {
u64 ctrl_regs[16]; u64 ctrl_regs[16];
} __packed; } __packed;
struct save_area_ext {
struct save_area sa;
__vector128 vx_regs[32];
};
struct _lowcore { struct _lowcore {
__u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */ __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
__u32 ipl_parmblock_ptr; /* 0x0014 */ __u32 ipl_parmblock_ptr; /* 0x0014 */
...@@ -310,7 +321,10 @@ struct _lowcore { ...@@ -310,7 +321,10 @@ struct _lowcore {
/* Extended facility list */ /* Extended facility list */
__u64 stfle_fac_list[32]; /* 0x0f00 */ __u64 stfle_fac_list[32]; /* 0x0f00 */
__u8 pad_0x1000[0x11b8-0x1000]; /* 0x1000 */ __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
/* Pointer to vector register save area */
__u64 vector_save_area_addr; /* 0x11b0 */
/* 64 bit extparam used for pfault/diag 250: defined by architecture */ /* 64 bit extparam used for pfault/diag 250: defined by architecture */
__u64 ext_params2; /* 0x11B8 */ __u64 ext_params2; /* 0x11B8 */
...@@ -334,9 +348,10 @@ struct _lowcore { ...@@ -334,9 +348,10 @@ struct _lowcore {
/* Transaction abort diagnostic block */ /* Transaction abort diagnostic block */
__u8 pgm_tdb[256]; /* 0x1800 */ __u8 pgm_tdb[256]; /* 0x1800 */
__u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */
/* align to the top of the prefix area */ /* Software defined save area for vector registers */
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */ __u8 vector_save_area[1024]; /* 0x1c00 */
} __packed; } __packed;
#endif /* CONFIG_32BIT */ #endif /* CONFIG_32BIT */
......
...@@ -38,7 +38,7 @@ struct mci { ...@@ -38,7 +38,7 @@ struct mci {
__u32 pm : 1; /* 22 psw program mask and cc validity */ __u32 pm : 1; /* 22 psw program mask and cc validity */
__u32 ia : 1; /* 23 psw instruction address validity */ __u32 ia : 1; /* 23 psw instruction address validity */
__u32 fa : 1; /* 24 failing storage address validity */ __u32 fa : 1; /* 24 failing storage address validity */
__u32 : 1; /* 25 */ __u32 vr : 1; /* 25 vector register validity */
__u32 ec : 1; /* 26 external damage code validity */ __u32 ec : 1; /* 26 external damage code validity */
__u32 fp : 1; /* 27 floating point register validity */ __u32 fp : 1; /* 27 floating point register validity */
__u32 gr : 1; /* 28 general register validity */ __u32 gr : 1; /* 28 general register validity */
......
...@@ -217,7 +217,6 @@ extern unsigned long MODULES_END; ...@@ -217,7 +217,6 @@ extern unsigned long MODULES_END;
*/ */
/* Hardware bits in the page table entry */ /* Hardware bits in the page table entry */
#define _PAGE_CO 0x100 /* HW Change-bit override */
#define _PAGE_PROTECT 0x200 /* HW read-only bit */ #define _PAGE_PROTECT 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */ #define _PAGE_INVALID 0x400 /* HW invalid bit */
#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
...@@ -234,8 +233,8 @@ extern unsigned long MODULES_END; ...@@ -234,8 +233,8 @@ extern unsigned long MODULES_END;
#define __HAVE_ARCH_PTE_SPECIAL #define __HAVE_ARCH_PTE_SPECIAL
/* Set of bits not changed in pte_modify */ /* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
_PAGE_DIRTY | _PAGE_YOUNG) _PAGE_YOUNG)
/* /*
* handle_pte_fault uses pte_present, pte_none and pte_file to find out the * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
...@@ -354,7 +353,6 @@ extern unsigned long MODULES_END; ...@@ -354,7 +353,6 @@ extern unsigned long MODULES_END;
#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
#define _REGION3_ENTRY_RO 0x200 /* page protection bit */ #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
/* Bits in the segment table entry */ /* Bits in the segment table entry */
#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
...@@ -371,7 +369,6 @@ extern unsigned long MODULES_END; ...@@ -371,7 +369,6 @@ extern unsigned long MODULES_END;
#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */ #define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
#define _SEGMENT_ENTRY_CO 0x0100 /* change-recording override */
#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
...@@ -873,8 +870,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -873,8 +870,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pgste = pgste_set_pte(ptep, pgste, entry); pgste = pgste_set_pte(ptep, pgste, entry);
pgste_set_unlock(ptep, pgste); pgste_set_unlock(ptep, pgste);
} else { } else {
if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
pte_val(entry) |= _PAGE_CO;
*ptep = entry; *ptep = entry;
} }
} }
...@@ -1044,6 +1039,22 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) ...@@ -1044,6 +1039,22 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
} }
static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
#ifndef CONFIG_64BIT
/* pto in ESA mode must point to the start of the segment table */
pto &= 0x7ffffc00;
#endif
/* Invalidate a range of ptes + global TLB flush of the ptes */
do {
asm volatile(
" .insn rrf,0xb2210000,%2,%0,%1,0"
: "+a" (address), "+a" (nr) : "a" (pto) : "memory");
} while (nr != 255);
}
static inline void ptep_flush_direct(struct mm_struct *mm, static inline void ptep_flush_direct(struct mm_struct *mm,
unsigned long address, pte_t *ptep) unsigned long address, pte_t *ptep)
{ {
......
...@@ -13,9 +13,11 @@ ...@@ -13,9 +13,11 @@
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ #define CIF_MCCK_PENDING 0 /* machine check handling is pending */
#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ #define CIF_ASCE 1 /* user asce needs fixup / uaccess */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) #define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING)
#define _CIF_ASCE (1<<CIF_ASCE) #define _CIF_ASCE (1<<CIF_ASCE)
#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -43,6 +45,8 @@ static inline int test_cpu_flag(int flag) ...@@ -43,6 +45,8 @@ static inline int test_cpu_flag(int flag)
return !!(S390_lowcore.cpu_flags & (1U << flag)); return !!(S390_lowcore.cpu_flags & (1U << flag));
} }
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
/* /*
* Default implementation of macro that returns current * Default implementation of macro that returns current
* instruction pointer ("program counter"). * instruction pointer ("program counter").
...@@ -113,6 +117,7 @@ struct thread_struct { ...@@ -113,6 +117,7 @@ struct thread_struct {
int ri_signum; int ri_signum;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
__vector128 *vxrs; /* Vector register save area */
#endif #endif
}; };
...@@ -285,7 +290,12 @@ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) ...@@ -285,7 +290,12 @@ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
return (psw.addr - ilc) & mask; return (psw.addr - ilc) & mask;
#endif #endif
} }
/*
* Function to stop a processor until the next interrupt occurs
*/
void enabled_wait(void);
/* /*
* Function to drop a processor into disabled wait state * Function to drop a processor into disabled wait state
*/ */
......
...@@ -161,6 +161,12 @@ static inline long regs_return_value(struct pt_regs *regs) ...@@ -161,6 +161,12 @@ static inline long regs_return_value(struct pt_regs *regs)
return regs->gprs[2]; return regs->gprs[2];
} }
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->psw.addr = val | PSW_ADDR_AMODE;
}
int regs_query_register_offset(const char *name); int regs_query_register_offset(const char *name);
const char *regs_query_register_name(unsigned int offset); const char *regs_query_register_name(unsigned int offset);
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset); unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
......
...@@ -55,8 +55,8 @@ extern void detect_memory_memblock(void); ...@@ -55,8 +55,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_FLAG_LPP (1UL << 13) #define MACHINE_FLAG_LPP (1UL << 13)
#define MACHINE_FLAG_TOPOLOGY (1UL << 14) #define MACHINE_FLAG_TOPOLOGY (1UL << 14)
#define MACHINE_FLAG_TE (1UL << 15) #define MACHINE_FLAG_TE (1UL << 15)
#define MACHINE_FLAG_RRBM (1UL << 16)
#define MACHINE_FLAG_TLB_LC (1UL << 17) #define MACHINE_FLAG_TLB_LC (1UL << 17)
#define MACHINE_FLAG_VX (1UL << 18)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
...@@ -78,8 +78,8 @@ extern void detect_memory_memblock(void); ...@@ -78,8 +78,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_LPP (0) #define MACHINE_HAS_LPP (0)
#define MACHINE_HAS_TOPOLOGY (0) #define MACHINE_HAS_TOPOLOGY (0)
#define MACHINE_HAS_TE (0) #define MACHINE_HAS_TE (0)
#define MACHINE_HAS_RRBM (0)
#define MACHINE_HAS_TLB_LC (0) #define MACHINE_HAS_TLB_LC (0)
#define MACHINE_HAS_VX (0)
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1) #define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1) #define MACHINE_HAS_CSP (1)
...@@ -91,8 +91,8 @@ extern void detect_memory_memblock(void); ...@@ -91,8 +91,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) #define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) #define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
/* /*
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#define SIGP_SET_ARCHITECTURE 18 #define SIGP_SET_ARCHITECTURE 18
#define SIGP_COND_EMERGENCY_SIGNAL 19 #define SIGP_COND_EMERGENCY_SIGNAL 19
#define SIGP_SENSE_RUNNING 21 #define SIGP_SENSE_RUNNING 21
#define SIGP_STORE_ADDITIONAL_STATUS 23
/* SIGP condition codes */ /* SIGP condition codes */
#define SIGP_CC_ORDER_CODE_ACCEPTED 0 #define SIGP_CC_ORDER_CODE_ACCEPTED 0
...@@ -33,9 +34,10 @@ ...@@ -33,9 +34,10 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status) static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
u32 *status)
{ {
register unsigned int reg1 asm ("1") = parm; register unsigned long reg1 asm ("1") = parm;
int cc; int cc;
asm volatile( asm volatile(
......
...@@ -29,7 +29,6 @@ extern int smp_find_processor_id(u16 address); ...@@ -29,7 +29,6 @@ extern int smp_find_processor_id(u16 address);
extern int smp_store_status(int cpu); extern int smp_store_status(int cpu);
extern int smp_vcpu_scheduled(int cpu); extern int smp_vcpu_scheduled(int cpu);
extern void smp_yield_cpu(int cpu); extern void smp_yield_cpu(int cpu);
extern void smp_yield(void);
extern void smp_cpu_set_polarization(int cpu, int val); extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu); extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void); extern void smp_fill_possible_mask(void);
...@@ -50,7 +49,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; } ...@@ -50,7 +49,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; }
static inline int smp_store_status(int cpu) { return 0; } static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; } static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { } static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
static inline void smp_fill_possible_mask(void) { } static inline void smp_fill_possible_mask(void) { }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -37,11 +37,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) ...@@ -37,11 +37,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
* (the type definitions are in asm/spinlock_types.h) * (the type definitions are in asm/spinlock_types.h)
*/ */
void arch_lock_relax(unsigned int cpu);
void arch_spin_lock_wait(arch_spinlock_t *); void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *); int arch_spin_trylock_retry(arch_spinlock_t *);
void arch_spin_relax(arch_spinlock_t *);
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
static inline void arch_spin_relax(arch_spinlock_t *lock)
{
arch_lock_relax(lock->lock);
}
static inline u32 arch_spin_lockval(int cpu) static inline u32 arch_spin_lockval(int cpu)
{ {
return ~cpu; return ~cpu;
...@@ -64,11 +70,6 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp) ...@@ -64,11 +70,6 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
_raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
} }
static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
{
return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
}
static inline void arch_spin_lock(arch_spinlock_t *lp) static inline void arch_spin_lock(arch_spinlock_t *lp)
{ {
if (!arch_spin_trylock_once(lp)) if (!arch_spin_trylock_once(lp))
...@@ -91,7 +92,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp) ...@@ -91,7 +92,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp) static inline void arch_spin_unlock(arch_spinlock_t *lp)
{ {
arch_spin_tryrelease_once(lp); typecheck(unsigned int, lp->lock);
asm volatile(
__ASM_BARRIER
"st %1,%0\n"
: "+Q" (lp->lock)
: "d" (0)
: "cc", "memory");
} }
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
...@@ -123,13 +130,12 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) ...@@ -123,13 +130,12 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
*/ */
#define arch_write_can_lock(x) ((x)->lock == 0) #define arch_write_can_lock(x) ((x)->lock == 0)
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
extern int _raw_read_trylock_retry(arch_rwlock_t *lp); extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
extern void _raw_write_lock_wait(arch_rwlock_t *lp);
extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
extern int _raw_write_trylock_retry(arch_rwlock_t *lp); extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
static inline int arch_read_trylock_once(arch_rwlock_t *rw) static inline int arch_read_trylock_once(arch_rwlock_t *rw)
{ {
unsigned int old = ACCESS_ONCE(rw->lock); unsigned int old = ACCESS_ONCE(rw->lock);
...@@ -144,16 +150,82 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw) ...@@ -144,16 +150,82 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
_raw_compare_and_swap(&rw->lock, 0, 0x80000000)); _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
} }
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __RAW_OP_OR "lao"
#define __RAW_OP_AND "lan"
#define __RAW_OP_ADD "laa"
#define __RAW_LOCK(ptr, op_val, op_string) \
({ \
unsigned int old_val; \
\
typecheck(unsigned int *, ptr); \
asm volatile( \
op_string " %0,%2,%1\n" \
"bcr 14,0\n" \
: "=d" (old_val), "+Q" (*ptr) \
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#define __RAW_UNLOCK(ptr, op_val, op_string) \
({ \
unsigned int old_val; \
\
typecheck(unsigned int *, ptr); \
asm volatile( \
"bcr 14,0\n" \
op_string " %0,%2,%1\n" \
: "=d" (old_val), "+Q" (*ptr) \
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
static inline void arch_read_lock(arch_rwlock_t *rw) static inline void arch_read_lock(arch_rwlock_t *rw)
{ {
if (!arch_read_trylock_once(rw)) unsigned int old;
old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
if ((int) old < 0)
_raw_read_lock_wait(rw); _raw_read_lock_wait(rw);
} }
static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) static inline void arch_read_unlock(arch_rwlock_t *rw)
{
__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int old;
old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
if (old != 0)
_raw_write_lock_wait(rw, old);
rw->owner = SPINLOCK_LOCKVAL;
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
rw->owner = 0;
__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
}
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
extern void _raw_write_lock_wait(arch_rwlock_t *lp);
static inline void arch_read_lock(arch_rwlock_t *rw)
{ {
if (!arch_read_trylock_once(rw)) if (!arch_read_trylock_once(rw))
_raw_read_lock_wait_flags(rw, flags); _raw_read_lock_wait(rw);
} }
static inline void arch_read_unlock(arch_rwlock_t *rw) static inline void arch_read_unlock(arch_rwlock_t *rw)
...@@ -169,19 +241,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw) ...@@ -169,19 +241,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{ {
if (!arch_write_trylock_once(rw)) if (!arch_write_trylock_once(rw))
_raw_write_lock_wait(rw); _raw_write_lock_wait(rw);
} rw->owner = SPINLOCK_LOCKVAL;
static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
if (!arch_write_trylock_once(rw))
_raw_write_lock_wait_flags(rw, flags);
} }
static inline void arch_write_unlock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw)
{ {
_raw_compare_and_swap(&rw->lock, 0x80000000, 0); typecheck(unsigned int, rw->lock);
rw->owner = 0;
asm volatile(
__ASM_BARRIER
"st %1,%0\n"
: "+Q" (rw->lock)
: "d" (0)
: "cc", "memory");
} }
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int arch_read_trylock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw)
{ {
if (!arch_read_trylock_once(rw)) if (!arch_read_trylock_once(rw))
...@@ -191,12 +268,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) ...@@ -191,12 +268,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw)
{ {
if (!arch_write_trylock_once(rw)) if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
return _raw_write_trylock_retry(rw); return 0;
rw->owner = SPINLOCK_LOCKVAL;
return 1; return 1;
} }
#define arch_read_relax(lock) cpu_relax() static inline void arch_read_relax(arch_rwlock_t *rw)
#define arch_write_relax(lock) cpu_relax() {
arch_lock_relax(rw->owner);
}
static inline void arch_write_relax(arch_rwlock_t *rw)
{
arch_lock_relax(rw->owner);
}
#endif /* __ASM_SPINLOCK_H */ #endif /* __ASM_SPINLOCK_H */
...@@ -13,6 +13,7 @@ typedef struct { ...@@ -13,6 +13,7 @@ typedef struct {
typedef struct { typedef struct {
unsigned int lock; unsigned int lock;
unsigned int owner;
} arch_rwlock_t; } arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 } #define __ARCH_RW_LOCK_UNLOCKED { 0 }
......
...@@ -103,6 +103,61 @@ static inline void restore_fp_regs(freg_t *fprs) ...@@ -103,6 +103,61 @@ static inline void restore_fp_regs(freg_t *fprs)
asm volatile("ld 15,%0" : : "Q" (fprs[15])); asm volatile("ld 15,%0" : : "Q" (fprs[15]));
} }
static inline void save_vx_regs(__vector128 *vxrs)
{
typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
asm volatile(
" la 1,%0\n"
" .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
: "=Q" (*(addrtype *) vxrs) : : "1");
}
static inline void save_vx_regs_safe(__vector128 *vxrs)
{
unsigned long cr0, flags;
flags = arch_local_irq_save();
__ctl_store(cr0, 0, 0);
__ctl_set_bit(0, 17);
__ctl_set_bit(0, 18);
save_vx_regs(vxrs);
__ctl_load(cr0, 0, 0);
arch_local_irq_restore(flags);
}
static inline void restore_vx_regs(__vector128 *vxrs)
{
typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
asm volatile(
" la 1,%0\n"
" .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
: : "Q" (*(addrtype *) vxrs) : "1");
}
static inline void save_fp_vx_regs(struct task_struct *task)
{
#ifdef CONFIG_64BIT
if (task->thread.vxrs)
save_vx_regs(task->thread.vxrs);
else
#endif
save_fp_regs(task->thread.fp_regs.fprs);
}
static inline void restore_fp_vx_regs(struct task_struct *task)
{
#ifdef CONFIG_64BIT
if (task->thread.vxrs)
restore_vx_regs(task->thread.vxrs);
else
#endif
restore_fp_regs(task->thread.fp_regs.fprs);
}
static inline void save_access_regs(unsigned int *acrs) static inline void save_access_regs(unsigned int *acrs)
{ {
typedef struct { int _[NUM_ACRS]; } acrstype; typedef struct { int _[NUM_ACRS]; } acrstype;
...@@ -120,16 +175,16 @@ static inline void restore_access_regs(unsigned int *acrs) ...@@ -120,16 +175,16 @@ static inline void restore_access_regs(unsigned int *acrs)
#define switch_to(prev,next,last) do { \ #define switch_to(prev,next,last) do { \
if (prev->mm) { \ if (prev->mm) { \
save_fp_ctl(&prev->thread.fp_regs.fpc); \ save_fp_ctl(&prev->thread.fp_regs.fpc); \
save_fp_regs(prev->thread.fp_regs.fprs); \ save_fp_vx_regs(prev); \
save_access_regs(&prev->thread.acrs[0]); \ save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \ save_ri_cb(prev->thread.ri_cb); \
} \ } \
if (next->mm) { \ if (next->mm) { \
update_cr_regs(next); \
restore_fp_ctl(&next->thread.fp_regs.fpc); \ restore_fp_ctl(&next->thread.fp_regs.fpc); \
restore_fp_regs(next->thread.fp_regs.fprs); \ restore_fp_vx_regs(next); \
restore_access_regs(&next->thread.acrs[0]); \ restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
update_cr_regs(next); \
} \ } \
prev = __switch_to(prev,next); \ prev = __switch_to(prev,next); \
} while (0) } while (0)
......
...@@ -84,11 +84,13 @@ static inline struct thread_info *current_thread_info(void) ...@@ -84,11 +84,13 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
#define TIF_SECCOMP 5 /* secure computing */ #define TIF_SECCOMP 5 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_UPROBE 7 /* breakpointed or single-stepping */
#define TIF_31BIT 16 /* 32bit process */ #define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
#define TIF_SINGLE_STEP 19 /* This task is single stepped */ #define TIF_SINGLE_STEP 19 /* This task is single stepped */
#define TIF_BLOCK_STEP 20 /* This task is block stepped */ #define TIF_BLOCK_STEP 20 /* This task is block stepped */
#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
...@@ -97,6 +99,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -97,6 +99,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_31BIT (1<<TIF_31BIT) #define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
......
/*
* User-space Probes (UProbes) for s390
*
* Copyright IBM Corp. 2014
* Author(s): Jan Willeke,
*/
#ifndef _ASM_UPROBES_H
#define _ASM_UPROBES_H
#include <linux/notifier.h>
typedef u16 uprobe_opcode_t;
#define UPROBE_XOL_SLOT_BYTES 256 /* cache aligned */
#define UPROBE_SWBP_INSN 0x0002
#define UPROBE_SWBP_INSN_SIZE 2
struct arch_uprobe {
union{
uprobe_opcode_t insn[3];
uprobe_opcode_t ixol[3];
};
unsigned int saved_per : 1;
unsigned int saved_int_code;
};
struct arch_uprobe_task {
};
int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm,
unsigned long addr);
int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
void *data);
void arch_uprobe_abort_xol(struct arch_uprobe *ap, struct pt_regs *regs);
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
struct pt_regs *regs);
#endif /* _ASM_UPROBES_H */
...@@ -22,13 +22,17 @@ struct vdso_data { ...@@ -22,13 +22,17 @@ struct vdso_data {
__u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */ __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
__u64 xtime_clock_sec; /* Kernel time 0x10 */ __u64 xtime_clock_sec; /* Kernel time 0x10 */
__u64 xtime_clock_nsec; /* 0x18 */ __u64 xtime_clock_nsec; /* 0x18 */
__u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */ __u64 xtime_coarse_sec; /* Coarse kernel time 0x20 */
__u64 wtom_clock_nsec; /* 0x28 */ __u64 xtime_coarse_nsec; /* 0x28 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ __u64 wtom_clock_sec; /* Wall to monotonic clock 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */ __u64 wtom_clock_nsec; /* 0x38 */
__u32 ectg_available; /* ECTG instruction present 0x38 */ __u64 wtom_coarse_sec; /* Coarse wall to monotonic 0x40 */
__u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */ __u64 wtom_coarse_nsec; /* 0x48 */
__u32 tk_shift; /* Shift used for xtime_nsec 0x40 */ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x50 */
__u32 tz_dsttime; /* Type of dst correction 0x54 */
__u32 ectg_available; /* ECTG instruction present 0x58 */
__u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
}; };
struct vdso_per_cpu_data { struct vdso_per_cpu_data {
......
...@@ -28,6 +28,4 @@ extern int del_virt_timer(struct vtimer_list *timer); ...@@ -28,6 +28,4 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void); extern void init_cpu_vtimer(void);
extern void vtime_init(void); extern void vtime_init(void);
extern void vtime_stop_cpu(void);
#endif /* _ASM_S390_TIMER_H */ #endif /* _ASM_S390_TIMER_H */
...@@ -7,10 +7,14 @@ ...@@ -7,10 +7,14 @@
#define _ASM_S390_SIGCONTEXT_H #define _ASM_S390_SIGCONTEXT_H
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/types.h>
#define __NUM_GPRS 16 #define __NUM_GPRS 16
#define __NUM_FPRS 16 #define __NUM_FPRS 16
#define __NUM_ACRS 16 #define __NUM_ACRS 16
#define __NUM_VXRS 32
#define __NUM_VXRS_LOW 16
#define __NUM_VXRS_HIGH 16
#ifndef __s390x__ #ifndef __s390x__
...@@ -59,6 +63,16 @@ typedef struct ...@@ -59,6 +63,16 @@ typedef struct
_s390_fp_regs fpregs; _s390_fp_regs fpregs;
} _sigregs; } _sigregs;
typedef struct
{
#ifndef __s390x__
unsigned long gprs_high[__NUM_GPRS];
#endif
unsigned long long vxrs_low[__NUM_VXRS_LOW];
__vector128 vxrs_high[__NUM_VXRS_HIGH];
unsigned char __reserved[128];
} _sigregs_ext;
struct sigcontext struct sigcontext
{ {
unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS]; unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS];
......
...@@ -17,6 +17,10 @@ ...@@ -17,6 +17,10 @@
typedef unsigned long addr_t; typedef unsigned long addr_t;
typedef __signed__ long saddr_t; typedef __signed__ long saddr_t;
typedef struct {
__u32 u[4];
} __vector128;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _UAPI_S390_TYPES_H */ #endif /* _UAPI_S390_TYPES_H */
...@@ -7,10 +7,15 @@ ...@@ -7,10 +7,15 @@
#ifndef _ASM_S390_UCONTEXT_H #ifndef _ASM_S390_UCONTEXT_H
#define _ASM_S390_UCONTEXT_H #define _ASM_S390_UCONTEXT_H
#define UC_EXTENDED 0x00000001 #define UC_GPRS_HIGH 1 /* uc_mcontext_ext has valid high gprs */
#define UC_VXRS 2 /* uc_mcontext_ext has valid vector regs */
#ifndef __s390x__
/*
* The struct ucontext_extended describes how the registers are stored
* on a rt signal frame. Please note that the structure is not fixed,
* if new CPU registers are added to the user state the size of the
* struct ucontext_extended will increase.
*/
struct ucontext_extended { struct ucontext_extended {
unsigned long uc_flags; unsigned long uc_flags;
struct ucontext *uc_link; struct ucontext *uc_link;
...@@ -19,11 +24,9 @@ struct ucontext_extended { ...@@ -19,11 +24,9 @@ struct ucontext_extended {
sigset_t uc_sigmask; sigset_t uc_sigmask;
/* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
unsigned char __unused[128 - sizeof(sigset_t)]; unsigned char __unused[128 - sizeof(sigset_t)];
unsigned long uc_gprs_high[16]; _sigregs_ext uc_mcontext_ext;
}; };
#endif
struct ucontext { struct ucontext {
unsigned long uc_flags; unsigned long uc_flags;
struct ucontext *uc_link; struct ucontext *uc_link;
......
...@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' ...@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
...@@ -52,11 +52,9 @@ obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y) ...@@ -52,11 +52,9 @@ obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y)
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
ifdef CONFIG_64BIT ifdef CONFIG_64BIT
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/cputime.h> #include <asm/idle.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -62,8 +62,12 @@ int main(void) ...@@ -62,8 +62,12 @@ int main(void)
DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp)); DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec)); DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
DEFINE(__VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
DEFINE(__VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec)); DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_WTOM_CRS_SEC, offsetof(struct vdso_data, wtom_coarse_sec));
DEFINE(__VDSO_WTOM_CRS_NSEC, offsetof(struct vdso_data, wtom_coarse_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult)); DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
...@@ -73,8 +77,11 @@ int main(void) ...@@ -73,8 +77,11 @@ int main(void)
/* constants used by the vdso */ /* constants used by the vdso */
DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID); DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
BLANK(); BLANK();
/* idle data offsets */ /* idle data offsets */
DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter)); DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
......
...@@ -50,6 +50,14 @@ typedef struct ...@@ -50,6 +50,14 @@ typedef struct
_s390_fp_regs32 fpregs; _s390_fp_regs32 fpregs;
} _sigregs32; } _sigregs32;
typedef struct
{
__u32 gprs_high[__NUM_GPRS];
__u64 vxrs_low[__NUM_VXRS_LOW];
__vector128 vxrs_high[__NUM_VXRS_HIGH];
__u8 __reserved[128];
} _sigregs_ext32;
#define _SIGCONTEXT_NSIG32 64 #define _SIGCONTEXT_NSIG32 64
#define _SIGCONTEXT_NSIG_BPW32 32 #define _SIGCONTEXT_NSIG_BPW32 32
#define __SIGNAL_FRAMESIZE32 96 #define __SIGNAL_FRAMESIZE32 96
...@@ -72,6 +80,7 @@ struct ucontext32 { ...@@ -72,6 +80,7 @@ struct ucontext32 {
compat_sigset_t uc_sigmask; compat_sigset_t uc_sigmask;
/* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
unsigned char __unused[128 - sizeof(compat_sigset_t)]; unsigned char __unused[128 - sizeof(compat_sigset_t)];
_sigregs_ext32 uc_mcontext_ext;
}; };
struct stat64_emu31; struct stat64_emu31;
......
...@@ -36,17 +36,16 @@ typedef struct ...@@ -36,17 +36,16 @@ typedef struct
struct sigcontext32 sc; struct sigcontext32 sc;
_sigregs32 sregs; _sigregs32 sregs;
int signo; int signo;
__u32 gprs_high[NUM_GPRS]; _sigregs_ext32 sregs_ext;
__u8 retcode[S390_SYSCALL_SIZE]; __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
} sigframe32; } sigframe32;
typedef struct typedef struct
{ {
__u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
__u8 retcode[S390_SYSCALL_SIZE]; __u16 svc_insn;
compat_siginfo_t info; compat_siginfo_t info;
struct ucontext32 uc; struct ucontext32 uc;
__u32 gprs_high[NUM_GPRS];
} rt_sigframe32; } rt_sigframe32;
int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
...@@ -151,6 +150,38 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) ...@@ -151,6 +150,38 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
return err ? -EFAULT : 0; return err ? -EFAULT : 0;
} }
/* Store registers needed to create the signal frame */
static void store_sigregs(void)
{
int i;
save_access_regs(current->thread.acrs);
save_fp_ctl(&current->thread.fp_regs.fpc);
if (current->thread.vxrs) {
save_vx_regs(current->thread.vxrs);
for (i = 0; i < __NUM_FPRS; i++)
current->thread.fp_regs.fprs[i] =
*(freg_t *)(current->thread.vxrs + i);
} else
save_fp_regs(current->thread.fp_regs.fprs);
}
/* Load registers after signal return */
static void load_sigregs(void)
{
int i;
restore_access_regs(current->thread.acrs);
/* restore_fp_ctl is done in restore_sigregs */
if (current->thread.vxrs) {
for (i = 0; i < __NUM_FPRS; i++)
*(freg_t *)(current->thread.vxrs + i) =
current->thread.fp_regs.fprs[i];
restore_vx_regs(current->thread.vxrs);
} else
restore_fp_regs(current->thread.fp_regs.fprs);
}
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
{ {
_sigregs32 user_sregs; _sigregs32 user_sregs;
...@@ -163,11 +194,8 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) ...@@ -163,11 +194,8 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
(__u32)(regs->psw.mask & PSW_MASK_BA); (__u32)(regs->psw.mask & PSW_MASK_BA);
for (i = 0; i < NUM_GPRS; i++) for (i = 0; i < NUM_GPRS; i++)
user_sregs.regs.gprs[i] = (__u32) regs->gprs[i]; user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
save_access_regs(current->thread.acrs);
memcpy(&user_sregs.regs.acrs, current->thread.acrs, memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs)); sizeof(user_sregs.regs.acrs));
save_fp_ctl(&current->thread.fp_regs.fpc);
save_fp_regs(current->thread.fp_regs.fprs);
memcpy(&user_sregs.fpregs, &current->thread.fp_regs, memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
sizeof(user_sregs.fpregs)); sizeof(user_sregs.fpregs));
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32))) if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
...@@ -207,37 +235,67 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) ...@@ -207,37 +235,67 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
memcpy(&current->thread.acrs, &user_sregs.regs.acrs, memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs)); sizeof(current->thread.acrs));
restore_access_regs(current->thread.acrs);
memcpy(&current->thread.fp_regs, &user_sregs.fpregs, memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
sizeof(current->thread.fp_regs)); sizeof(current->thread.fp_regs));
restore_fp_regs(current->thread.fp_regs.fprs);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0; return 0;
} }
static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) static int save_sigregs_ext32(struct pt_regs *regs,
_sigregs_ext32 __user *sregs_ext)
{ {
__u32 gprs_high[NUM_GPRS]; __u32 gprs_high[NUM_GPRS];
__u64 vxrs[__NUM_VXRS_LOW];
int i; int i;
/* Save high gprs to signal stack */
for (i = 0; i < NUM_GPRS; i++) for (i = 0; i < NUM_GPRS; i++)
gprs_high[i] = regs->gprs[i] >> 32; gprs_high[i] = regs->gprs[i] >> 32;
if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high))) if (__copy_to_user(&sregs_ext->gprs_high, &gprs_high,
sizeof(sregs_ext->gprs_high)))
return -EFAULT; return -EFAULT;
/* Save vector registers to signal stack */
if (current->thread.vxrs) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
sizeof(sregs_ext->vxrs_low)) ||
__copy_to_user(&sregs_ext->vxrs_high,
current->thread.vxrs + __NUM_VXRS_LOW,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
return 0; return 0;
} }
static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) static int restore_sigregs_ext32(struct pt_regs *regs,
_sigregs_ext32 __user *sregs_ext)
{ {
__u32 gprs_high[NUM_GPRS]; __u32 gprs_high[NUM_GPRS];
__u64 vxrs[__NUM_VXRS_LOW];
int i; int i;
if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high))) /* Restore high gprs from signal stack */
if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
sizeof(&sregs_ext->gprs_high)))
return -EFAULT; return -EFAULT;
for (i = 0; i < NUM_GPRS; i++) for (i = 0; i < NUM_GPRS; i++)
*(__u32 *)&regs->gprs[i] = gprs_high[i]; *(__u32 *)&regs->gprs[i] = gprs_high[i];
/* Restore vector registers from signal stack */
if (current->thread.vxrs) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
&sregs_ext->vxrs_high,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
for (i = 0; i < __NUM_VXRS_LOW; i++)
*((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
}
return 0; return 0;
} }
...@@ -252,8 +310,9 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) ...@@ -252,8 +310,9 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
set_current_blocked(&set); set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->sregs)) if (restore_sigregs32(regs, &frame->sregs))
goto badframe; goto badframe;
if (restore_sigregs_gprs_high(regs, frame->gprs_high)) if (restore_sigregs_ext32(regs, &frame->sregs_ext))
goto badframe; goto badframe;
load_sigregs();
return regs->gprs[2]; return regs->gprs[2];
badframe: badframe:
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
...@@ -269,12 +328,13 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) ...@@ -269,12 +328,13 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe; goto badframe;
set_current_blocked(&set); set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe; goto badframe;
if (restore_sigregs_gprs_high(regs, frame->gprs_high)) if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
goto badframe; goto badframe;
if (compat_restore_altstack(&frame->uc.uc_stack)) load_sigregs();
goto badframe;
return regs->gprs[2]; return regs->gprs[2];
badframe: badframe:
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
...@@ -324,37 +384,64 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set, ...@@ -324,37 +384,64 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs) struct pt_regs *regs)
{ {
int sig = ksig->sig; int sig = ksig->sig;
sigframe32 __user *frame = get_sigframe(&ksig->ka, regs, sizeof(sigframe32)); sigframe32 __user *frame;
struct sigcontext32 sc;
unsigned long restorer;
size_t frame_size;
/*
* gprs_high are always present for 31-bit compat tasks.
* The space for vector registers is only allocated if
* the machine supports it
*/
frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
if (!MACHINE_HAS_VX)
frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
sizeof(frame->sregs_ext.vxrs_high);
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL) if (frame == (void __user *) -1UL)
return -EFAULT; return -EFAULT;
if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) /* Set up backchain. */
if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
return -EFAULT;
/* Create struct sigcontext32 on the signal stack */
memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
return -EFAULT; return -EFAULT;
/* Store registers needed to create the signal frame */
store_sigregs();
/* Create _sigregs32 on the signal stack */
if (save_sigregs32(regs, &frame->sregs)) if (save_sigregs32(regs, &frame->sregs))
return -EFAULT; return -EFAULT;
if (save_sigregs_gprs_high(regs, frame->gprs_high))
/* Place signal number on stack to allow backtrace from handler. */
if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
return -EFAULT; return -EFAULT;
if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs))
/* Create _sigregs_ext32 on the signal stack */
if (save_sigregs_ext32(regs, &frame->sregs_ext))
return -EFAULT; return -EFAULT;
/* Set up to return from userspace. If provided, use a stub /* Set up to return from userspace. If provided, use a stub
already in userspace. */ already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) { if (ksig->ka.sa.sa_flags & SA_RESTORER) {
regs->gprs[14] = (__u64 __force) ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; restorer = (unsigned long __force)
ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
} else { } else {
regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; /* Signal frames without vectors registers are short ! */
if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, __u16 __user *svc = (void *) frame + frame_size - 2;
(u16 __force __user *)(frame->retcode))) if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
return -EFAULT; return -EFAULT;
restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
} }
/* Set up backchain. */
if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
return -EFAULT;
/* Set up registers for signal handler */ /* Set up registers for signal handler */
regs->gprs[14] = restorer;
regs->gprs[15] = (__force __u64) frame; regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */ /* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA | regs->psw.mask = PSW_MASK_BA |
...@@ -375,50 +462,69 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set, ...@@ -375,50 +462,69 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
regs->gprs[6] = task_thread_info(current)->last_break; regs->gprs[6] = task_thread_info(current)->last_break;
} }
/* Place signal number on stack to allow backtrace from handler. */
if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
return -EFAULT;
return 0; return 0;
} }
static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs) struct pt_regs *regs)
{ {
int err = 0; rt_sigframe32 __user *frame;
rt_sigframe32 __user *frame = get_sigframe(&ksig->ka, regs, sizeof(rt_sigframe32)); unsigned long restorer;
size_t frame_size;
u32 uc_flags;
frame_size = sizeof(*frame) -
sizeof(frame->uc.uc_mcontext_ext.__reserved);
/*
* gprs_high are always present for 31-bit compat tasks.
* The space for vector registers is only allocated if
* the machine supports it
*/
uc_flags = UC_GPRS_HIGH;
if (MACHINE_HAS_VX) {
if (current->thread.vxrs)
uc_flags |= UC_VXRS;
} else
frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL) if (frame == (void __user *) -1UL)
return -EFAULT; return -EFAULT;
if (copy_siginfo_to_user32(&frame->info, &ksig->info)) /* Set up backchain. */
return -EFAULT; if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
/* Create the ucontext. */
err |= __put_user(UC_EXTENDED, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]);
err |= save_sigregs32(regs, &frame->uc.uc_mcontext);
err |= save_sigregs_gprs_high(regs, frame->gprs_high);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
return -EFAULT; return -EFAULT;
/* Set up to return from userspace. If provided, use a stub /* Set up to return from userspace. If provided, use a stub
already in userspace. */ already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) { if (ksig->ka.sa.sa_flags & SA_RESTORER) {
regs->gprs[14] = (__u64 __force) ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; restorer = (unsigned long __force)
ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
} else { } else {
regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; __u16 __user *svc = &frame->svc_insn;
if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
(u16 __force __user *)(frame->retcode)))
return -EFAULT; return -EFAULT;
restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
} }
/* Set up backchain. */ /* Create siginfo on the signal stack */
if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame)) if (copy_siginfo_to_user32(&frame->info, &ksig->info))
return -EFAULT;
/* Store registers needed to create the signal frame */
store_sigregs();
/* Create ucontext on the signal stack. */
if (__put_user(uc_flags, &frame->uc.uc_flags) ||
__put_user(0, &frame->uc.uc_link) ||
__compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
save_sigregs32(regs, &frame->uc.uc_mcontext) ||
__copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT; return -EFAULT;
/* Set up registers for signal handler */ /* Set up registers for signal handler */
regs->gprs[14] = restorer;
regs->gprs[15] = (__force __u64) frame; regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */ /* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA | regs->psw.mask = PSW_MASK_BA |
......
...@@ -46,9 +46,9 @@ struct dump_save_areas dump_save_areas; ...@@ -46,9 +46,9 @@ struct dump_save_areas dump_save_areas;
/* /*
* Allocate and add a save area for a CPU * Allocate and add a save area for a CPU
*/ */
struct save_area *dump_save_area_create(int cpu) struct save_area_ext *dump_save_area_create(int cpu)
{ {
struct save_area **save_areas, *save_area; struct save_area_ext **save_areas, *save_area;
save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
if (!save_area) if (!save_area)
...@@ -385,10 +385,46 @@ static void *nt_s390_prefix(void *ptr, struct save_area *sa) ...@@ -385,10 +385,46 @@ static void *nt_s390_prefix(void *ptr, struct save_area *sa)
sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME); sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
} }
/*
* Initialize vxrs high note (full 128 bit VX registers 16-31)
*/
static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs)
{
return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16],
16 * sizeof(__vector128), KEXEC_CORE_NOTE_NAME);
}
/*
* Initialize vxrs low note (lower halves of VX registers 0-15)
*/
static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
{
Elf64_Nhdr *note;
u64 len;
int i;
note = (Elf64_Nhdr *)ptr;
note->n_namesz = strlen(KEXEC_CORE_NOTE_NAME) + 1;
note->n_descsz = 16 * 8;
note->n_type = NT_S390_VXRS_LOW;
len = sizeof(Elf64_Nhdr);
memcpy(ptr + len, KEXEC_CORE_NOTE_NAME, note->n_namesz);
len = roundup(len + note->n_namesz, 4);
ptr += len;
/* Copy lower halves of SIMD registers 0-15 */
for (i = 0; i < 16; i++) {
memcpy(ptr, &vx_regs[i], 8);
ptr += 8;
}
return ptr;
}
/* /*
* Fill ELF notes for one CPU with save area registers * Fill ELF notes for one CPU with save area registers
*/ */
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa) void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vx_regs)
{ {
ptr = nt_prstatus(ptr, sa); ptr = nt_prstatus(ptr, sa);
ptr = nt_fpregset(ptr, sa); ptr = nt_fpregset(ptr, sa);
...@@ -397,6 +433,10 @@ void *fill_cpu_elf_notes(void *ptr, struct save_area *sa) ...@@ -397,6 +433,10 @@ void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
ptr = nt_s390_tod_preg(ptr, sa); ptr = nt_s390_tod_preg(ptr, sa);
ptr = nt_s390_ctrs(ptr, sa); ptr = nt_s390_ctrs(ptr, sa);
ptr = nt_s390_prefix(ptr, sa); ptr = nt_s390_prefix(ptr, sa);
if (MACHINE_HAS_VX && vx_regs) {
ptr = nt_s390_vx_low(ptr, vx_regs);
ptr = nt_s390_vx_high(ptr, vx_regs);
}
return ptr; return ptr;
} }
...@@ -484,7 +524,7 @@ static int get_cpu_cnt(void) ...@@ -484,7 +524,7 @@ static int get_cpu_cnt(void)
int i, cpus = 0; int i, cpus = 0;
for (i = 0; i < dump_save_areas.count; i++) { for (i = 0; i < dump_save_areas.count; i++) {
if (dump_save_areas.areas[i]->pref_reg == 0) if (dump_save_areas.areas[i]->sa.pref_reg == 0)
continue; continue;
cpus++; cpus++;
} }
...@@ -530,17 +570,17 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset) ...@@ -530,17 +570,17 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
*/ */
static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
{ {
struct save_area *sa; struct save_area_ext *sa_ext;
void *ptr_start = ptr; void *ptr_start = ptr;
int i; int i;
ptr = nt_prpsinfo(ptr); ptr = nt_prpsinfo(ptr);
for (i = 0; i < dump_save_areas.count; i++) { for (i = 0; i < dump_save_areas.count; i++) {
sa = dump_save_areas.areas[i]; sa_ext = dump_save_areas.areas[i];
if (sa->pref_reg == 0) if (sa_ext->sa.pref_reg == 0)
continue; continue;
ptr = fill_cpu_elf_notes(ptr, sa); ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
} }
ptr = nt_vmcoreinfo(ptr); ptr = nt_vmcoreinfo(ptr);
memset(phdr, 0, sizeof(*phdr)); memset(phdr, 0, sizeof(*phdr));
...@@ -581,7 +621,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) ...@@ -581,7 +621,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
mem_chunk_cnt = get_mem_chunk_cnt(); mem_chunk_cnt = get_mem_chunk_cnt();
alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
mem_chunk_cnt * sizeof(Elf64_Phdr); mem_chunk_cnt * sizeof(Elf64_Phdr);
hdr = kzalloc_panic(alloc_size); hdr = kzalloc_panic(alloc_size);
/* Init elf header */ /* Init elf header */
......
...@@ -60,6 +60,11 @@ enum { ...@@ -60,6 +60,11 @@ enum {
A_28, /* Access reg. starting at position 28 */ A_28, /* Access reg. starting at position 28 */
C_8, /* Control reg. starting at position 8 */ C_8, /* Control reg. starting at position 8 */
C_12, /* Control reg. starting at position 12 */ C_12, /* Control reg. starting at position 12 */
V_8, /* Vector reg. starting at position 8, extension bit at 36 */
V_12, /* Vector reg. starting at position 12, extension bit at 37 */
V_16, /* Vector reg. starting at position 16, extension bit at 38 */
V_32, /* Vector reg. starting at position 32, extension bit at 39 */
W_12, /* Vector reg. at bit 12, extension at bit 37, used as index */
B_16, /* Base register starting at position 16 */ B_16, /* Base register starting at position 16 */
B_32, /* Base register starting at position 32 */ B_32, /* Base register starting at position 32 */
X_12, /* Index register starting at position 12 */ X_12, /* Index register starting at position 12 */
...@@ -82,6 +87,8 @@ enum { ...@@ -82,6 +87,8 @@ enum {
U8_24, /* 8 bit unsigned value starting at 24 */ U8_24, /* 8 bit unsigned value starting at 24 */
U8_32, /* 8 bit unsigned value starting at 32 */ U8_32, /* 8 bit unsigned value starting at 32 */
I8_8, /* 8 bit signed value starting at 8 */ I8_8, /* 8 bit signed value starting at 8 */
I8_16, /* 8 bit signed value starting at 16 */
I8_24, /* 8 bit signed value starting at 24 */
I8_32, /* 8 bit signed value starting at 32 */ I8_32, /* 8 bit signed value starting at 32 */
J12_12, /* PC relative offset at 12 */ J12_12, /* PC relative offset at 12 */
I16_16, /* 16 bit signed value starting at 16 */ I16_16, /* 16 bit signed value starting at 16 */
...@@ -96,6 +103,9 @@ enum { ...@@ -96,6 +103,9 @@ enum {
U32_16, /* 32 bit unsigned value starting at 16 */ U32_16, /* 32 bit unsigned value starting at 16 */
M_16, /* 4 bit optional mask starting at 16 */ M_16, /* 4 bit optional mask starting at 16 */
M_20, /* 4 bit optional mask starting at 20 */ M_20, /* 4 bit optional mask starting at 20 */
M_24, /* 4 bit optional mask starting at 24 */
M_28, /* 4 bit optional mask starting at 28 */
M_32, /* 4 bit optional mask starting at 32 */
RO_28, /* optional GPR starting at position 28 */ RO_28, /* optional GPR starting at position 28 */
}; };
...@@ -130,7 +140,7 @@ enum { ...@@ -130,7 +140,7 @@ enum {
INSTR_RSY_RDRM, INSTR_RSY_RDRM,
INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
INSTR_RS_RURD, INSTR_RS_RURD,
INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
INSTR_RXF_FRRDF, INSTR_RXF_FRRDF,
INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD,
INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD,
...@@ -143,6 +153,17 @@ enum { ...@@ -143,6 +153,17 @@ enum {
INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
INSTR_S_00, INSTR_S_RD, INSTR_S_00, INSTR_S_RD,
INSTR_VRI_V0IM, INSTR_VRI_V0I0, INSTR_VRI_V0IIM, INSTR_VRI_VVIM,
INSTR_VRI_VVV0IM, INSTR_VRI_VVV0I0, INSTR_VRI_VVIMM,
INSTR_VRR_VV00MMM, INSTR_VRR_VV000MM, INSTR_VRR_VV0000M,
INSTR_VRR_VV00000, INSTR_VRR_VVV0M0M, INSTR_VRR_VV00M0M,
INSTR_VRR_VVV000M, INSTR_VRR_VVV000V, INSTR_VRR_VVV0000,
INSTR_VRR_VVV0MMM, INSTR_VRR_VVV00MM, INSTR_VRR_VVVMM0V,
INSTR_VRR_VVVM0MV, INSTR_VRR_VVVM00V, INSTR_VRR_VRR0000,
INSTR_VRS_VVRDM, INSTR_VRS_VVRD0, INSTR_VRS_VRRDM, INSTR_VRS_VRRD0,
INSTR_VRS_RVRDM,
INSTR_VRV_VVRDM, INSTR_VRV_VWRDM,
INSTR_VRX_VRRDM, INSTR_VRX_VRRD0,
}; };
static const struct s390_operand operands[] = static const struct s390_operand operands[] =
...@@ -168,6 +189,11 @@ static const struct s390_operand operands[] = ...@@ -168,6 +189,11 @@ static const struct s390_operand operands[] =
[A_28] = { 4, 28, OPERAND_AR }, [A_28] = { 4, 28, OPERAND_AR },
[C_8] = { 4, 8, OPERAND_CR }, [C_8] = { 4, 8, OPERAND_CR },
[C_12] = { 4, 12, OPERAND_CR }, [C_12] = { 4, 12, OPERAND_CR },
[V_8] = { 4, 8, OPERAND_VR },
[V_12] = { 4, 12, OPERAND_VR },
[V_16] = { 4, 16, OPERAND_VR },
[V_32] = { 4, 32, OPERAND_VR },
[W_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
[B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR }, [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR },
[B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR }, [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR },
[X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR }, [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
...@@ -190,6 +216,11 @@ static const struct s390_operand operands[] = ...@@ -190,6 +216,11 @@ static const struct s390_operand operands[] =
[U8_24] = { 8, 24, 0 }, [U8_24] = { 8, 24, 0 },
[U8_32] = { 8, 32, 0 }, [U8_32] = { 8, 32, 0 },
[J12_12] = { 12, 12, OPERAND_PCREL }, [J12_12] = { 12, 12, OPERAND_PCREL },
[I8_8] = { 8, 8, OPERAND_SIGNED },
[I8_16] = { 8, 16, OPERAND_SIGNED },
[I8_24] = { 8, 24, OPERAND_SIGNED },
[I8_32] = { 8, 32, OPERAND_SIGNED },
[I16_32] = { 16, 32, OPERAND_SIGNED },
[I16_16] = { 16, 16, OPERAND_SIGNED }, [I16_16] = { 16, 16, OPERAND_SIGNED },
[U16_16] = { 16, 16, 0 }, [U16_16] = { 16, 16, 0 },
[U16_32] = { 16, 32, 0 }, [U16_32] = { 16, 32, 0 },
...@@ -202,6 +233,9 @@ static const struct s390_operand operands[] = ...@@ -202,6 +233,9 @@ static const struct s390_operand operands[] =
[U32_16] = { 32, 16, 0 }, [U32_16] = { 32, 16, 0 },
[M_16] = { 4, 16, 0 }, [M_16] = { 4, 16, 0 },
[M_20] = { 4, 20, 0 }, [M_20] = { 4, 20, 0 },
[M_24] = { 4, 24, 0 },
[M_28] = { 4, 28, 0 },
[M_32] = { 4, 32, 0 },
[RO_28] = { 4, 28, OPERAND_GPR } [RO_28] = { 4, 28, OPERAND_GPR }
}; };
...@@ -283,6 +317,7 @@ static const unsigned char formats[][7] = { ...@@ -283,6 +317,7 @@ static const unsigned char formats[][7] = {
[INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
[INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
[INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
[INSTR_RXE_RRRDM] = { 0xff, R_8,D_20,X_12,B_16,M_32,0 },
[INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
[INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 }, [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },
[INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 }, [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },
...@@ -307,6 +342,37 @@ static const unsigned char formats[][7] = { ...@@ -307,6 +342,37 @@ static const unsigned char formats[][7] = {
[INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
[INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 },
[INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
[INSTR_VRI_V0IM] = { 0xff, V_8,I16_16,M_32,0,0,0 },
[INSTR_VRI_V0I0] = { 0xff, V_8,I16_16,0,0,0,0 },
[INSTR_VRI_V0IIM] = { 0xff, V_8,I8_16,I8_24,M_32,0,0 },
[INSTR_VRI_VVIM] = { 0xff, V_8,I16_16,V_12,M_32,0,0 },
[INSTR_VRI_VVV0IM]= { 0xff, V_8,V_12,V_16,I8_24,M_32,0 },
[INSTR_VRI_VVV0I0]= { 0xff, V_8,V_12,V_16,I8_24,0,0 },
[INSTR_VRI_VVIMM] = { 0xff, V_8,V_12,I16_16,M_32,M_28,0 },
[INSTR_VRR_VV00MMM]={ 0xff, V_8,V_12,M_32,M_28,M_24,0 },
[INSTR_VRR_VV000MM]={ 0xff, V_8,V_12,M_32,M_28,0,0 },
[INSTR_VRR_VV0000M]={ 0xff, V_8,V_12,M_32,0,0,0 },
[INSTR_VRR_VV00000]={ 0xff, V_8,V_12,0,0,0,0 },
[INSTR_VRR_VVV0M0M]={ 0xff, V_8,V_12,V_16,M_32,M_24,0 },
[INSTR_VRR_VV00M0M]={ 0xff, V_8,V_12,M_32,M_24,0,0 },
[INSTR_VRR_VVV000M]={ 0xff, V_8,V_12,V_16,M_32,0,0 },
[INSTR_VRR_VVV000V]={ 0xff, V_8,V_12,V_16,V_32,0,0 },
[INSTR_VRR_VVV0000]={ 0xff, V_8,V_12,V_16,0,0,0 },
[INSTR_VRR_VVV0MMM]={ 0xff, V_8,V_12,V_16,M_32,M_28,M_24 },
[INSTR_VRR_VVV00MM]={ 0xff, V_8,V_12,V_16,M_32,M_28,0 },
[INSTR_VRR_VVVMM0V]={ 0xff, V_8,V_12,V_16,V_32,M_20,M_24 },
[INSTR_VRR_VVVM0MV]={ 0xff, V_8,V_12,V_16,V_32,M_28,M_20 },
[INSTR_VRR_VVVM00V]={ 0xff, V_8,V_12,V_16,V_32,M_20,0 },
[INSTR_VRR_VRR0000]={ 0xff, V_8,R_12,R_16,0,0,0 },
[INSTR_VRS_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
[INSTR_VRS_VVRD0] = { 0xff, V_8,V_12,D_20,B_16,0,0 },
[INSTR_VRS_VRRDM] = { 0xff, V_8,R_12,D_20,B_16,M_32,0 },
[INSTR_VRS_VRRD0] = { 0xff, V_8,R_12,D_20,B_16,0,0 },
[INSTR_VRS_RVRDM] = { 0xff, R_8,V_12,D_20,B_16,M_32,0 },
[INSTR_VRV_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
[INSTR_VRV_VWRDM] = { 0xff, V_8,D_20,W_12,B_16,M_32,0 },
[INSTR_VRX_VRRDM] = { 0xff, V_8,D_20,X_12,B_16,M_32,0 },
[INSTR_VRX_VRRD0] = { 0xff, V_8,D_20,X_12,B_16,0,0 },
}; };
enum { enum {
...@@ -381,6 +447,11 @@ enum { ...@@ -381,6 +447,11 @@ enum {
LONG_INSN_MPCIFC, LONG_INSN_MPCIFC,
LONG_INSN_STPCIFC, LONG_INSN_STPCIFC,
LONG_INSN_PCISTB, LONG_INSN_PCISTB,
LONG_INSN_VPOPCT,
LONG_INSN_VERLLV,
LONG_INSN_VESRAV,
LONG_INSN_VESRLV,
LONG_INSN_VSBCBI
}; };
static char *long_insn_name[] = { static char *long_insn_name[] = {
...@@ -455,6 +526,11 @@ static char *long_insn_name[] = { ...@@ -455,6 +526,11 @@ static char *long_insn_name[] = {
[LONG_INSN_MPCIFC] = "mpcifc", [LONG_INSN_MPCIFC] = "mpcifc",
[LONG_INSN_STPCIFC] = "stpcifc", [LONG_INSN_STPCIFC] = "stpcifc",
[LONG_INSN_PCISTB] = "pcistb", [LONG_INSN_PCISTB] = "pcistb",
[LONG_INSN_VPOPCT] = "vpopct",
[LONG_INSN_VERLLV] = "verllv",
[LONG_INSN_VESRAV] = "vesrav",
[LONG_INSN_VESRLV] = "vesrlv",
[LONG_INSN_VSBCBI] = "vsbcbi",
}; };
static struct s390_insn opcode[] = { static struct s390_insn opcode[] = {
...@@ -1369,6 +1445,150 @@ static struct s390_insn opcode_e5[] = { ...@@ -1369,6 +1445,150 @@ static struct s390_insn opcode_e5[] = {
{ "", 0, INSTR_INVALID } { "", 0, INSTR_INVALID }
}; };
static struct s390_insn opcode_e7[] = {
#ifdef CONFIG_64BIT
{ "lcbb", 0x27, INSTR_RXE_RRRDM },
{ "vgef", 0x13, INSTR_VRV_VVRDM },
{ "vgeg", 0x12, INSTR_VRV_VVRDM },
{ "vgbm", 0x44, INSTR_VRI_V0I0 },
{ "vgm", 0x46, INSTR_VRI_V0IIM },
{ "vl", 0x06, INSTR_VRX_VRRD0 },
{ "vlr", 0x56, INSTR_VRR_VV00000 },
{ "vlrp", 0x05, INSTR_VRX_VRRDM },
{ "vleb", 0x00, INSTR_VRX_VRRDM },
{ "vleh", 0x01, INSTR_VRX_VRRDM },
{ "vlef", 0x03, INSTR_VRX_VRRDM },
{ "vleg", 0x02, INSTR_VRX_VRRDM },
{ "vleib", 0x40, INSTR_VRI_V0IM },
{ "vleih", 0x41, INSTR_VRI_V0IM },
{ "vleif", 0x43, INSTR_VRI_V0IM },
{ "vleig", 0x42, INSTR_VRI_V0IM },
{ "vlgv", 0x21, INSTR_VRS_RVRDM },
{ "vllez", 0x04, INSTR_VRX_VRRDM },
{ "vlm", 0x36, INSTR_VRS_VVRD0 },
{ "vlbb", 0x07, INSTR_VRX_VRRDM },
{ "vlvg", 0x22, INSTR_VRS_VRRDM },
{ "vlvgp", 0x62, INSTR_VRR_VRR0000 },
{ "vll", 0x37, INSTR_VRS_VRRD0 },
{ "vmrh", 0x61, INSTR_VRR_VVV000M },
{ "vmrl", 0x60, INSTR_VRR_VVV000M },
{ "vpk", 0x94, INSTR_VRR_VVV000M },
{ "vpks", 0x97, INSTR_VRR_VVV0M0M },
{ "vpkls", 0x95, INSTR_VRR_VVV0M0M },
{ "vperm", 0x8c, INSTR_VRR_VVV000V },
{ "vpdi", 0x84, INSTR_VRR_VVV000M },
{ "vrep", 0x4d, INSTR_VRI_VVIM },
{ "vrepi", 0x45, INSTR_VRI_V0IM },
{ "vscef", 0x1b, INSTR_VRV_VWRDM },
{ "vsceg", 0x1a, INSTR_VRV_VWRDM },
{ "vsel", 0x8d, INSTR_VRR_VVV000V },
{ "vseg", 0x5f, INSTR_VRR_VV0000M },
{ "vst", 0x0e, INSTR_VRX_VRRD0 },
{ "vsteb", 0x08, INSTR_VRX_VRRDM },
{ "vsteh", 0x09, INSTR_VRX_VRRDM },
{ "vstef", 0x0b, INSTR_VRX_VRRDM },
{ "vsteg", 0x0a, INSTR_VRX_VRRDM },
{ "vstm", 0x3e, INSTR_VRS_VVRD0 },
{ "vstl", 0x3f, INSTR_VRS_VRRD0 },
{ "vuph", 0xd7, INSTR_VRR_VV0000M },
{ "vuplh", 0xd5, INSTR_VRR_VV0000M },
{ "vupl", 0xd6, INSTR_VRR_VV0000M },
{ "vupll", 0xd4, INSTR_VRR_VV0000M },
{ "va", 0xf3, INSTR_VRR_VVV000M },
{ "vacc", 0xf1, INSTR_VRR_VVV000M },
{ "vac", 0xbb, INSTR_VRR_VVVM00V },
{ "vaccc", 0xb9, INSTR_VRR_VVVM00V },
{ "vn", 0x68, INSTR_VRR_VVV0000 },
{ "vnc", 0x69, INSTR_VRR_VVV0000 },
{ "vavg", 0xf2, INSTR_VRR_VVV000M },
{ "vavgl", 0xf0, INSTR_VRR_VVV000M },
{ "vcksm", 0x66, INSTR_VRR_VVV0000 },
{ "vec", 0xdb, INSTR_VRR_VV0000M },
{ "vecl", 0xd9, INSTR_VRR_VV0000M },
{ "vceq", 0xf8, INSTR_VRR_VVV0M0M },
{ "vch", 0xfb, INSTR_VRR_VVV0M0M },
{ "vchl", 0xf9, INSTR_VRR_VVV0M0M },
{ "vclz", 0x53, INSTR_VRR_VV0000M },
{ "vctz", 0x52, INSTR_VRR_VV0000M },
{ "vx", 0x6d, INSTR_VRR_VVV0000 },
{ "vgfm", 0xb4, INSTR_VRR_VVV000M },
{ "vgfma", 0xbc, INSTR_VRR_VVVM00V },
{ "vlc", 0xde, INSTR_VRR_VV0000M },
{ "vlp", 0xdf, INSTR_VRR_VV0000M },
{ "vmx", 0xff, INSTR_VRR_VVV000M },
{ "vmxl", 0xfd, INSTR_VRR_VVV000M },
{ "vmn", 0xfe, INSTR_VRR_VVV000M },
{ "vmnl", 0xfc, INSTR_VRR_VVV000M },
{ "vmal", 0xaa, INSTR_VRR_VVVM00V },
{ "vmae", 0xae, INSTR_VRR_VVVM00V },
{ "vmale", 0xac, INSTR_VRR_VVVM00V },
{ "vmah", 0xab, INSTR_VRR_VVVM00V },
{ "vmalh", 0xa9, INSTR_VRR_VVVM00V },
{ "vmao", 0xaf, INSTR_VRR_VVVM00V },
{ "vmalo", 0xad, INSTR_VRR_VVVM00V },
{ "vmh", 0xa3, INSTR_VRR_VVV000M },
{ "vmlh", 0xa1, INSTR_VRR_VVV000M },
{ "vml", 0xa2, INSTR_VRR_VVV000M },
{ "vme", 0xa6, INSTR_VRR_VVV000M },
{ "vmle", 0xa4, INSTR_VRR_VVV000M },
{ "vmo", 0xa7, INSTR_VRR_VVV000M },
{ "vmlo", 0xa5, INSTR_VRR_VVV000M },
{ "vno", 0x6b, INSTR_VRR_VVV0000 },
{ "vo", 0x6a, INSTR_VRR_VVV0000 },
{ { 0, LONG_INSN_VPOPCT }, 0x50, INSTR_VRR_VV0000M },
{ { 0, LONG_INSN_VERLLV }, 0x73, INSTR_VRR_VVV000M },
{ "verll", 0x33, INSTR_VRS_VVRDM },
{ "verim", 0x72, INSTR_VRI_VVV0IM },
{ "veslv", 0x70, INSTR_VRR_VVV000M },
{ "vesl", 0x30, INSTR_VRS_VVRDM },
{ { 0, LONG_INSN_VESRAV }, 0x7a, INSTR_VRR_VVV000M },
{ "vesra", 0x3a, INSTR_VRS_VVRDM },
{ { 0, LONG_INSN_VESRLV }, 0x78, INSTR_VRR_VVV000M },
{ "vesrl", 0x38, INSTR_VRS_VVRDM },
{ "vsl", 0x74, INSTR_VRR_VVV0000 },
{ "vslb", 0x75, INSTR_VRR_VVV0000 },
{ "vsldb", 0x77, INSTR_VRI_VVV0I0 },
{ "vsra", 0x7e, INSTR_VRR_VVV0000 },
{ "vsrab", 0x7f, INSTR_VRR_VVV0000 },
{ "vsrl", 0x7c, INSTR_VRR_VVV0000 },
{ "vsrlb", 0x7d, INSTR_VRR_VVV0000 },
{ "vs", 0xf7, INSTR_VRR_VVV000M },
{ "vscb", 0xf5, INSTR_VRR_VVV000M },
{ "vsb", 0xbf, INSTR_VRR_VVVM00V },
{ { 0, LONG_INSN_VSBCBI }, 0xbd, INSTR_VRR_VVVM00V },
{ "vsumg", 0x65, INSTR_VRR_VVV000M },
{ "vsumq", 0x67, INSTR_VRR_VVV000M },
{ "vsum", 0x64, INSTR_VRR_VVV000M },
{ "vtm", 0xd8, INSTR_VRR_VV00000 },
{ "vfae", 0x82, INSTR_VRR_VVV0M0M },
{ "vfee", 0x80, INSTR_VRR_VVV0M0M },
{ "vfene", 0x81, INSTR_VRR_VVV0M0M },
{ "vistr", 0x5c, INSTR_VRR_VV00M0M },
{ "vstrc", 0x8a, INSTR_VRR_VVVMM0V },
{ "vfa", 0xe3, INSTR_VRR_VVV00MM },
{ "wfc", 0xcb, INSTR_VRR_VV000MM },
{ "wfk", 0xca, INSTR_VRR_VV000MM },
{ "vfce", 0xe8, INSTR_VRR_VVV0MMM },
{ "vfch", 0xeb, INSTR_VRR_VVV0MMM },
{ "vfche", 0xea, INSTR_VRR_VVV0MMM },
{ "vcdg", 0xc3, INSTR_VRR_VV00MMM },
{ "vcdlg", 0xc1, INSTR_VRR_VV00MMM },
{ "vcgd", 0xc2, INSTR_VRR_VV00MMM },
{ "vclgd", 0xc0, INSTR_VRR_VV00MMM },
{ "vfd", 0xe5, INSTR_VRR_VVV00MM },
{ "vfi", 0xc7, INSTR_VRR_VV00MMM },
{ "vlde", 0xc4, INSTR_VRR_VV000MM },
{ "vled", 0xc5, INSTR_VRR_VV00MMM },
{ "vfm", 0xe7, INSTR_VRR_VVV00MM },
{ "vfma", 0x8f, INSTR_VRR_VVVM0MV },
{ "vfms", 0x8e, INSTR_VRR_VVVM0MV },
{ "vfpso", 0xcc, INSTR_VRR_VV00MMM },
{ "vfsq", 0xce, INSTR_VRR_VV000MM },
{ "vfs", 0xe2, INSTR_VRR_VVV00MM },
{ "vftci", 0x4a, INSTR_VRI_VVIMM },
#endif
};
static struct s390_insn opcode_eb[] = { static struct s390_insn opcode_eb[] = {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
{ "lmg", 0x04, INSTR_RSY_RRRD }, { "lmg", 0x04, INSTR_RSY_RRRD },
...@@ -1552,16 +1772,17 @@ static struct s390_insn opcode_ed[] = { ...@@ -1552,16 +1772,17 @@ static struct s390_insn opcode_ed[] = {
static unsigned int extract_operand(unsigned char *code, static unsigned int extract_operand(unsigned char *code,
const struct s390_operand *operand) const struct s390_operand *operand)
{ {
unsigned char *cp;
unsigned int val; unsigned int val;
int bits; int bits;
/* Extract fragments of the operand byte for byte. */ /* Extract fragments of the operand byte for byte. */
code += operand->shift / 8; cp = code + operand->shift / 8;
bits = (operand->shift & 7) + operand->bits; bits = (operand->shift & 7) + operand->bits;
val = 0; val = 0;
do { do {
val <<= 8; val <<= 8;
val |= (unsigned int) *code++; val |= (unsigned int) *cp++;
bits -= 8; bits -= 8;
} while (bits > 0); } while (bits > 0);
val >>= -bits; val >>= -bits;
...@@ -1571,6 +1792,18 @@ static unsigned int extract_operand(unsigned char *code, ...@@ -1571,6 +1792,18 @@ static unsigned int extract_operand(unsigned char *code,
if (operand->bits == 20 && operand->shift == 20) if (operand->bits == 20 && operand->shift == 20)
val = (val & 0xff) << 12 | (val & 0xfff00) >> 8; val = (val & 0xff) << 12 | (val & 0xfff00) >> 8;
/* Check for register extensions bits for vector registers. */
if (operand->flags & OPERAND_VR) {
if (operand->shift == 8)
val |= (code[4] & 8) << 1;
else if (operand->shift == 12)
val |= (code[4] & 4) << 2;
else if (operand->shift == 16)
val |= (code[4] & 2) << 3;
else if (operand->shift == 32)
val |= (code[4] & 1) << 4;
}
/* Sign extend value if the operand is signed or pc relative. */ /* Sign extend value if the operand is signed or pc relative. */
if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) && if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) &&
(val & (1U << (operand->bits - 1)))) (val & (1U << (operand->bits - 1))))
...@@ -1639,6 +1872,10 @@ struct s390_insn *find_insn(unsigned char *code) ...@@ -1639,6 +1872,10 @@ struct s390_insn *find_insn(unsigned char *code)
case 0xe5: case 0xe5:
table = opcode_e5; table = opcode_e5;
break; break;
case 0xe7:
table = opcode_e7;
opfrag = code[5];
break;
case 0xeb: case 0xeb:
table = opcode_eb; table = opcode_eb;
opfrag = code[5]; opfrag = code[5];
...@@ -1734,6 +1971,8 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) ...@@ -1734,6 +1971,8 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr += sprintf(ptr, "%%a%i", value); ptr += sprintf(ptr, "%%a%i", value);
else if (operand->flags & OPERAND_CR) else if (operand->flags & OPERAND_CR)
ptr += sprintf(ptr, "%%c%i", value); ptr += sprintf(ptr, "%%c%i", value);
else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%v%i", value);
else if (operand->flags & OPERAND_PCREL) else if (operand->flags & OPERAND_PCREL)
ptr += sprintf(ptr, "%lx", (signed int) value ptr += sprintf(ptr, "%lx", (signed int) value
+ addr); + addr);
......
...@@ -390,10 +390,10 @@ static __init void detect_machine_facilities(void) ...@@ -390,10 +390,10 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
if (test_facility(50) && test_facility(73)) if (test_facility(50) && test_facility(73))
S390_lowcore.machine_flags |= MACHINE_FLAG_TE; S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
if (test_facility(66))
S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
if (test_facility(51)) if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129))
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
#endif #endif
} }
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/cputime.h> #include <asm/idle.h>
extern void *restart_stack; extern void *restart_stack;
extern unsigned long suspend_zero_pages; extern unsigned long suspend_zero_pages;
...@@ -21,6 +21,8 @@ void psw_idle(struct s390_idle_data *, unsigned long); ...@@ -21,6 +21,8 @@ void psw_idle(struct s390_idle_data *, unsigned long);
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
int alloc_vector_registers(struct task_struct *tsk);
void do_protection_exception(struct pt_regs *regs); void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs); void do_dat_exception(struct pt_regs *regs);
...@@ -43,8 +45,10 @@ void special_op_exception(struct pt_regs *regs); ...@@ -43,8 +45,10 @@ void special_op_exception(struct pt_regs *regs);
void specification_exception(struct pt_regs *regs); void specification_exception(struct pt_regs *regs);
void transaction_exception(struct pt_regs *regs); void transaction_exception(struct pt_regs *regs);
void translation_exception(struct pt_regs *regs); void translation_exception(struct pt_regs *regs);
void vector_exception(struct pt_regs *regs);
void do_per_trap(struct pt_regs *regs); void do_per_trap(struct pt_regs *regs);
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
void syscall_trace(struct pt_regs *regs, int entryexit); void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs); void kernel_stack_overflow(struct pt_regs * regs);
void do_signal(struct pt_regs *regs); void do_signal(struct pt_regs *regs);
......
...@@ -42,7 +42,8 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER ...@@ -42,7 +42,8 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_UPROBE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT) _TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
...@@ -265,6 +266,10 @@ sysc_work: ...@@ -265,6 +266,10 @@ sysc_work:
jo sysc_mcck_pending jo sysc_mcck_pending
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule jo sysc_reschedule
#ifdef CONFIG_UPROBES
tm __TI_flags+7(%r12),_TIF_UPROBE
jo sysc_uprobe_notify
#endif
tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
jo sysc_singlestep jo sysc_singlestep
tm __TI_flags+7(%r12),_TIF_SIGPENDING tm __TI_flags+7(%r12),_TIF_SIGPENDING
...@@ -322,6 +327,16 @@ sysc_notify_resume: ...@@ -322,6 +327,16 @@ sysc_notify_resume:
larl %r14,sysc_return larl %r14,sysc_return
jg do_notify_resume jg do_notify_resume
#
# _TIF_UPROBE is set, call uprobe_notify_resume
#
#ifdef CONFIG_UPROBES
sysc_uprobe_notify:
lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return
jg uprobe_notify_resume
#endif
# #
# _PIF_PER_TRAP is set, call do_per_trap # _PIF_PER_TRAP is set, call do_per_trap
# #
......
/* /*
* Dynamic function tracer architecture backend. * Dynamic function tracer architecture backend.
* *
* Copyright IBM Corp. 2009 * Copyright IBM Corp. 2009,2014
* *
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
...@@ -17,100 +17,76 @@ ...@@ -17,100 +17,76 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include "entry.h" #include "entry.h"
#ifdef CONFIG_DYNAMIC_FTRACE void mcount_replace_code(void);
void ftrace_disable_code(void); void ftrace_disable_code(void);
void ftrace_enable_insn(void); void ftrace_enable_insn(void);
#ifdef CONFIG_64BIT
/* /*
* The 64-bit mcount code looks like this: * The mcount code looks like this:
* stg %r14,8(%r15) # offset 0 * stg %r14,8(%r15) # offset 0
* > larl %r1,<&counter> # offset 6 * larl %r1,<&counter> # offset 6
* > brasl %r14,_mcount # offset 12 * brasl %r14,_mcount # offset 12
* lg %r14,8(%r15) # offset 18 * lg %r14,8(%r15) # offset 18
* Total length is 24 bytes. The middle two instructions of the mcount * Total length is 24 bytes. The complete mcount block initially gets replaced
* block get overwritten by ftrace_make_nop / ftrace_make_call. * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop
* The 64-bit enabled ftrace code block looks like this: * only patch the jg/lg instruction within the block.
* stg %r14,8(%r15) # offset 0 * Note: we do not patch the first instruction to an unconditional branch,
* since that would break kprobes/jprobes. It is easier to leave the larl
* instruction in and only modify the second instruction.
* The enabled ftrace code block looks like this:
* larl %r0,.+24 # offset 0
* > lg %r1,__LC_FTRACE_FUNC # offset 6 * > lg %r1,__LC_FTRACE_FUNC # offset 6
* > lgr %r0,%r0 # offset 12 * br %r1 # offset 12
* > basr %r14,%r1 # offset 16 * brcl 0,0 # offset 14
* lg %r14,8(%15) # offset 18 * brc 0,0 # offset 20
* The return points of the mcount/ftrace function have the same offset 18. * The ftrace function gets called with a non-standard C function call ABI
* The 64-bit disable ftrace code block looks like this: * where r0 contains the return address. It is also expected that the called
* stg %r14,8(%r15) # offset 0 * function only clobbers r0 and r1, but restores r2-r15.
* The return point of the ftrace function has offset 24, so execution
* continues behind the mcount block.
* larl %r0,.+24 # offset 0
* > jg .+18 # offset 6 * > jg .+18 # offset 6
* > lgr %r0,%r0 # offset 12 * br %r1 # offset 12
* > basr %r14,%r1 # offset 16 * brcl 0,0 # offset 14
* lg %r14,8(%15) # offset 18 * brc 0,0 # offset 20
* The jg instruction branches to offset 24 to skip as many instructions * The jg instruction branches to offset 24 to skip as many instructions
* as possible. * as possible.
*/ */
asm( asm(
" .align 4\n" " .align 4\n"
"mcount_replace_code:\n"
" larl %r0,0f\n"
"ftrace_disable_code:\n" "ftrace_disable_code:\n"
" jg 0f\n" " jg 0f\n"
" lgr %r0,%r0\n" " br %r1\n"
" basr %r14,%r1\n" " brcl 0,0\n"
" brc 0,0\n"
"0:\n" "0:\n"
" .align 4\n" " .align 4\n"
"ftrace_enable_insn:\n" "ftrace_enable_insn:\n"
" lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
#define MCOUNT_BLOCK_SIZE 24
#define MCOUNT_INSN_OFFSET 6
#define FTRACE_INSN_SIZE 6 #define FTRACE_INSN_SIZE 6
#else /* CONFIG_64BIT */ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
/* unsigned long addr)
* The 31-bit mcount code looks like this: {
* st %r14,4(%r15) # offset 0 return 0;
* > bras %r1,0f # offset 4 }
* > .long _mcount # offset 8
* > .long <&counter> # offset 12
* > 0: l %r14,0(%r1) # offset 16
* > l %r1,4(%r1) # offset 20
* basr %r14,%r14 # offset 24
* l %r14,4(%r15) # offset 26
* Total length is 30 bytes. The twenty bytes starting from offset 4
* to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
* The 31-bit enabled ftrace code block looks like this:
* st %r14,4(%r15) # offset 0
* > l %r14,__LC_FTRACE_FUNC # offset 4
* > j 0f # offset 8
* > .fill 12,1,0x07 # offset 12
* 0: basr %r14,%r14 # offset 24
* l %r14,4(%r14) # offset 26
* The return points of the mcount/ftrace function have the same offset 26.
* The 31-bit disabled ftrace code block looks like this:
* st %r14,4(%r15) # offset 0
* > j .+26 # offset 4
* > j 0f # offset 8
* > .fill 12,1,0x07 # offset 12
* 0: basr %r14,%r14 # offset 24
* l %r14,4(%r14) # offset 26
* The j instruction branches to offset 30 to skip as many instructions
* as possible.
*/
asm(
" .align 4\n"
"ftrace_disable_code:\n"
" j 1f\n"
" j 0f\n"
" .fill 12,1,0x07\n"
"0: basr %r14,%r14\n"
"1:\n"
" .align 4\n"
"ftrace_enable_insn:\n"
" l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
#define FTRACE_INSN_SIZE 4
#endif /* CONFIG_64BIT */
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr) unsigned long addr)
{ {
/* Initial replacement of the whole mcount block */
if (addr == MCOUNT_ADDR) {
if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
mcount_replace_code,
MCOUNT_BLOCK_SIZE))
return -EPERM;
return 0;
}
if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
MCOUNT_INSN_SIZE)) MCOUNT_INSN_SIZE))
return -EPERM; return -EPERM;
...@@ -135,8 +111,6 @@ int __init ftrace_dyn_arch_init(void) ...@@ -135,8 +111,6 @@ int __init ftrace_dyn_arch_init(void)
return 0; return 0;
} }
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* /*
* Hook the return address and push it in the stack of return addresses * Hook the return address and push it in the stack of return addresses
...@@ -162,31 +136,26 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, ...@@ -162,31 +136,26 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
return parent; return parent;
} }
#ifdef CONFIG_DYNAMIC_FTRACE
/* /*
* Patch the kernel code at ftrace_graph_caller location. The instruction * Patch the kernel code at ftrace_graph_caller location. The instruction
* there is branch relative and save to prepare_ftrace_return. To disable * there is branch relative on condition. To enable the ftrace graph code
* the call to prepare_ftrace_return we patch the bras offset to point * block, we simply patch the mask field of the instruction to zero and
* directly after the instructions. To enable the call we calculate * turn the instruction into a nop.
* the original offset to prepare_ftrace_return and put it back. * To disable the ftrace graph code the mask field will be patched to
* all ones, which turns the instruction into an unconditional branch.
*/ */
int ftrace_enable_ftrace_graph_caller(void) int ftrace_enable_ftrace_graph_caller(void)
{ {
unsigned short offset; u8 op = 0x04; /* set mask field to zero */
offset = ((void *) prepare_ftrace_return - return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
(void *) ftrace_graph_caller) / 2;
return probe_kernel_write((void *) ftrace_graph_caller + 2,
&offset, sizeof(offset));
} }
int ftrace_disable_ftrace_graph_caller(void) int ftrace_disable_ftrace_graph_caller(void)
{ {
static unsigned short offset = 0x0002; u8 op = 0xf4; /* set mask field to all ones */
return probe_kernel_write((void *) ftrace_graph_caller + 2, return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
&offset, sizeof(offset));
} }
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -398,7 +398,7 @@ ENTRY(startup_kdump) ...@@ -398,7 +398,7 @@ ENTRY(startup_kdump)
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
#ifndef CONFIG_MARCH_G5 #ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
.insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST
tm __LC_STFL_FAC_LIST,0x01 # stfle available ? tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
jz 0f jz 0f
la %r0,1 la %r0,1
......
/*
* Idle functions for s390.
*
* Copyright IBM Corp. 2014
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include "entry.h"
static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
void __kprobes enabled_wait(void)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
unsigned long long idle_time;
unsigned long psw_mask;
trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
/* Account time spent with enabled wait psw loaded as idle time. */
idle->sequence++;
smp_wmb();
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
idle->idle_count++;
account_idle_time(idle_time);
smp_wmb();
idle->sequence++;
}
static ssize_t show_idle_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long idle_count;
unsigned int sequence;
do {
sequence = ACCESS_ONCE(idle->sequence);
idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return sprintf(buf, "%llu\n", idle_count);
}
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long now, idle_time, idle_enter, idle_exit;
unsigned int sequence;
do {
now = get_tod_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
cputime64_t arch_cpu_idle_time(int cpu)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
unsigned long long now, idle_enter, idle_exit;
unsigned int sequence;
do {
now = get_tod_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
void arch_cpu_idle_enter(void)
{
local_mcck_disable();
}
void arch_cpu_idle(void)
{
if (!test_cpu_flag(CIF_MCCK_PENDING))
/* Halt the cpu and keep track of cpu time accounting. */
enabled_wait();
local_irq_enable();
}
void arch_cpu_idle_exit(void)
{
local_mcck_enable();
if (test_cpu_flag(CIF_MCCK_PENDING))
s390_handle_mcck();
}
void arch_cpu_idle_dead(void)
{
cpu_die();
}
...@@ -70,6 +70,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { ...@@ -70,6 +70,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
{.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
{.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
{.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
{.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
{.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"}, {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
...@@ -258,7 +259,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy) ...@@ -258,7 +259,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
ext_code = *(struct ext_code *) &regs->int_code; ext_code = *(struct ext_code *) &regs->int_code;
if (ext_code.code != EXT_IRQ_CLK_COMP) if (ext_code.code != EXT_IRQ_CLK_COMP)
__get_cpu_var(s390_idle).nohz_delay = 1; set_cpu_flag(CIF_NOHZ_DELAY);
index = ext_hash(ext_code.code); index = ext_hash(ext_code.code);
rcu_read_lock(); rcu_read_lock();
......
...@@ -58,161 +58,13 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = { ...@@ -58,161 +58,13 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
.insn_size = MAX_INSN_SIZE, .insn_size = MAX_INSN_SIZE,
}; };
static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
{
if (!is_known_insn((unsigned char *)insn))
return -EINVAL;
switch (insn[0] >> 8) {
case 0x0c: /* bassm */
case 0x0b: /* bsm */
case 0x83: /* diag */
case 0x44: /* ex */
case 0xac: /* stnsm */
case 0xad: /* stosm */
return -EINVAL;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x00: /* exrl */
return -EINVAL;
}
}
switch (insn[0]) {
case 0x0101: /* pr */
case 0xb25a: /* bsa */
case 0xb240: /* bakr */
case 0xb258: /* bsg */
case 0xb218: /* pc */
case 0xb228: /* pt */
case 0xb98d: /* epsw */
return -EINVAL;
}
return 0;
}
static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
{
/* default fixup method */
int fixup = FIXUP_PSW_NORMAL;
switch (insn[0] >> 8) {
case 0x05: /* balr */
case 0x0d: /* basr */
fixup = FIXUP_RETURN_REGISTER;
/* if r2 = 0, no branch will be taken */
if ((insn[0] & 0x0f) == 0)
fixup |= FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x06: /* bctr */
case 0x07: /* bcr */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x45: /* bal */
case 0x4d: /* bas */
fixup = FIXUP_RETURN_REGISTER;
break;
case 0x47: /* bc */
case 0x46: /* bct */
case 0x86: /* bxh */
case 0x87: /* bxle */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x82: /* lpsw */
fixup = FIXUP_NOT_REQUIRED;
break;
case 0xb2: /* lpswe */
if ((insn[0] & 0xff) == 0xb2)
fixup = FIXUP_NOT_REQUIRED;
break;
case 0xa7: /* bras */
if ((insn[0] & 0x0f) == 0x05)
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xc0:
if ((insn[0] & 0x0f) == 0x05) /* brasl */
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xeb:
switch (insn[2] & 0xff) {
case 0x44: /* bxhg */
case 0x45: /* bxleg */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
case 0xe3: /* bctg */
if ((insn[2] & 0xff) == 0x46)
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0xec:
switch (insn[2] & 0xff) {
case 0xe5: /* clgrb */
case 0xe6: /* cgrb */
case 0xf6: /* crb */
case 0xf7: /* clrb */
case 0xfc: /* cgib */
case 0xfd: /* cglib */
case 0xfe: /* cib */
case 0xff: /* clib */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
}
return fixup;
}
static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
{
/* Check if we have a RIL-b or RIL-c format instruction which
* we need to modify in order to avoid instruction emulation. */
switch (insn[0] >> 8) {
case 0xc0:
if ((insn[0] & 0x0f) == 0x00) /* larl */
return true;
break;
case 0xc4:
switch (insn[0] & 0x0f) {
case 0x02: /* llhrl */
case 0x04: /* lghrl */
case 0x05: /* lhrl */
case 0x06: /* llghrl */
case 0x07: /* sthrl */
case 0x08: /* lgrl */
case 0x0b: /* stgrl */
case 0x0c: /* lgfrl */
case 0x0d: /* lrl */
case 0x0e: /* llgfrl */
case 0x0f: /* strl */
return true;
}
break;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x02: /* pfdrl */
case 0x04: /* cghrl */
case 0x05: /* chrl */
case 0x06: /* clghrl */
case 0x07: /* clhrl */
case 0x08: /* cgrl */
case 0x0a: /* clgrl */
case 0x0c: /* cgfrl */
case 0x0d: /* crl */
case 0x0e: /* clgfrl */
case 0x0f: /* clrl */
return true;
}
break;
}
return false;
}
static void __kprobes copy_instruction(struct kprobe *p) static void __kprobes copy_instruction(struct kprobe *p)
{ {
s64 disp, new_disp; s64 disp, new_disp;
u64 addr, new_addr; u64 addr, new_addr;
memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
if (!is_insn_relative_long(p->ainsn.insn)) if (!probe_is_insn_relative_long(p->ainsn.insn))
return; return;
/* /*
* For pc-relative instructions in RIL-b or RIL-c format patch the * For pc-relative instructions in RIL-b or RIL-c format patch the
...@@ -276,7 +128,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) ...@@ -276,7 +128,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if ((unsigned long) p->addr & 0x01) if ((unsigned long) p->addr & 0x01)
return -EINVAL; return -EINVAL;
/* Make sure the probe isn't going on a difficult instruction */ /* Make sure the probe isn't going on a difficult instruction */
if (is_prohibited_opcode(p->addr)) if (probe_is_prohibited_opcode(p->addr))
return -EINVAL; return -EINVAL;
if (s390_get_insn_slot(p)) if (s390_get_insn_slot(p))
return -ENOMEM; return -ENOMEM;
...@@ -605,7 +457,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) ...@@ -605,7 +457,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{ {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
int fixup = get_fixup_type(p->ainsn.insn); int fixup = probe_get_fixup_type(p->ainsn.insn);
if (fixup & FIXUP_PSW_NORMAL) if (fixup & FIXUP_PSW_NORMAL)
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
...@@ -789,11 +641,6 @@ void __kprobes jprobe_return(void) ...@@ -789,11 +641,6 @@ void __kprobes jprobe_return(void)
asm volatile(".word 0x0002"); asm volatile(".word 0x0002");
} }
static void __used __kprobes jprobe_return_end(void)
{
asm volatile("bcr 0,0");
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{ {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/os_info.h> #include <asm/os_info.h>
#include <asm/switch_to.h>
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
...@@ -43,7 +44,7 @@ static void add_elf_notes(int cpu) ...@@ -43,7 +44,7 @@ static void add_elf_notes(int cpu)
memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa)); memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
ptr = fill_cpu_elf_notes(ptr, sa); ptr = fill_cpu_elf_notes(ptr, sa, NULL);
memset(ptr, 0, sizeof(struct elf_note)); memset(ptr, 0, sizeof(struct elf_note));
} }
...@@ -53,8 +54,11 @@ static void add_elf_notes(int cpu) ...@@ -53,8 +54,11 @@ static void add_elf_notes(int cpu)
static void setup_regs(void) static void setup_regs(void)
{ {
unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
struct _lowcore *lc;
int cpu, this_cpu; int cpu, this_cpu;
/* Get lowcore pointer from store status of this CPU (absolute zero) */
lc = (struct _lowcore *)(unsigned long)S390_lowcore.prefixreg_save_area;
this_cpu = smp_find_processor_id(stap()); this_cpu = smp_find_processor_id(stap());
add_elf_notes(this_cpu); add_elf_notes(this_cpu);
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
...@@ -64,6 +68,8 @@ static void setup_regs(void) ...@@ -64,6 +68,8 @@ static void setup_regs(void)
continue; continue;
add_elf_notes(cpu); add_elf_notes(cpu);
} }
if (MACHINE_HAS_VX)
save_vx_regs_safe((void *) lc->vector_save_area_addr);
/* Copy dump CPU store status info to absolute zero */ /* Copy dump CPU store status info to absolute zero */
memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
} }
......
...@@ -8,62 +8,72 @@ ...@@ -8,62 +8,72 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/ptrace.h>
.section .kprobes.text, "ax" .section .kprobes.text, "ax"
ENTRY(ftrace_stub) ENTRY(ftrace_stub)
br %r14 br %r14
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
ENTRY(_mcount) ENTRY(_mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14 br %r14
ENTRY(ftrace_caller) ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
lgr %r1,%r15
aghi %r15,-STACK_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
aghik %r2,%r0,-MCOUNT_INSN_SIZE
lgrl %r4,function_trace_op
lgrl %r1,ftrace_trace_function
#else
lgr %r2,%r0
aghi %r2,-MCOUNT_INSN_SIZE
larl %r4,function_trace_op
lg %r4,0(%r4)
larl %r1,ftrace_trace_function
lg %r1,0(%r1)
#endif #endif
stm %r2,%r5,16(%r15) lgr %r3,%r14
bras %r1,1f la %r5,STACK_PTREGS(%r15)
0: .long ftrace_trace_function basr %r14,%r1
1: st %r14,56(%r15)
lr %r0,%r15
ahi %r15,-96
l %r3,100(%r15)
la %r2,0(%r14)
st %r0,__SF_BACKCHAIN(%r15)
la %r3,0(%r3)
ahi %r2,-MCOUNT_INSN_SIZE
l %r14,0b-0b(%r1)
l %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
l %r2,100(%r15) # The j instruction gets runtime patched to a nop instruction.
l %r3,152(%r15) # See ftrace_enable_ftrace_graph_caller.
ENTRY(ftrace_graph_caller) ENTRY(ftrace_graph_caller)
# The bras instruction gets runtime patched to call prepare_ftrace_return. j ftrace_graph_caller_end
# See ftrace_enable_ftrace_graph_caller. The patched instruction is: lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
# bras %r14,prepare_ftrace_return lg %r3,(STACK_PTREGS_PSW+8)(%r15)
bras %r14,0f brasl %r14,prepare_ftrace_return
0: st %r2,100(%r15) stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
ftrace_graph_caller_end:
.globl ftrace_graph_caller_end
#endif #endif
ahi %r15,96 lg %r1,(STACK_PTREGS_PSW+8)(%r15)
l %r14,56(%r15) lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
lm %r2,%r5,16(%r15) br %r1
br %r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(return_to_handler) ENTRY(return_to_handler)
stm %r2,%r5,16(%r15) stmg %r2,%r5,32(%r15)
st %r14,56(%r15) lgr %r1,%r15
lr %r0,%r15 aghi %r15,-STACK_FRAME_OVERHEAD
ahi %r15,-96 stg %r1,__SF_BACKCHAIN(%r15)
st %r0,__SF_BACKCHAIN(%r15) brasl %r14,ftrace_return_to_handler
bras %r1,0f aghi %r15,STACK_FRAME_OVERHEAD
.long ftrace_return_to_handler lgr %r14,%r2
0: l %r2,0b-0b(%r1) lmg %r2,%r5,32(%r15)
basr %r14,%r2
lr %r14,%r2
ahi %r15,96
lm %r2,%r5,16(%r15)
br %r14 br %r14
#endif #endif
/*
* Copyright IBM Corp. 2008, 2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
.section .kprobes.text, "ax"
ENTRY(ftrace_stub)
br %r14
ENTRY(_mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
ENTRY(ftrace_caller)
#endif
stmg %r2,%r5,32(%r15)
stg %r14,112(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,__SF_BACKCHAIN(%r15)
lgr %r2,%r14
lg %r3,168(%r15)
aghi %r2,-MCOUNT_INSN_SIZE
larl %r14,ftrace_trace_function
lg %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
lg %r2,168(%r15)
lg %r3,272(%r15)
ENTRY(ftrace_graph_caller)
# The bras instruction gets runtime patched to call prepare_ftrace_return.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# bras %r14,prepare_ftrace_return
bras %r14,0f
0: stg %r2,168(%r15)
#endif
aghi %r15,160
lmg %r2,%r5,32(%r15)
lg %r14,112(%r15)
br %r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(return_to_handler)
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,__SF_BACKCHAIN(%r15)
brasl %r14,ftrace_return_to_handler
aghi %r15,160
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
br %r14
#endif
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/cputime.h> #include <asm/cputime.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/crw.h> #include <asm/crw.h>
#include <asm/switch_to.h>
struct mcck_struct { struct mcck_struct {
int kill_task; int kill_task;
...@@ -163,6 +164,21 @@ static int notrace s390_revalidate_registers(struct mci *mci) ...@@ -163,6 +164,21 @@ static int notrace s390_revalidate_registers(struct mci *mci)
" ld 15,120(%0)\n" " ld 15,120(%0)\n"
: : "a" (fpt_save_area)); : : "a" (fpt_save_area));
} }
#ifdef CONFIG_64BIT
/* Revalidate vector registers */
if (MACHINE_HAS_VX && current->thread.vxrs) {
if (!mci->vr) {
/*
* Vector registers can't be restored and therefore
* the process needs to be terminated.
*/
kill_task = 1;
}
restore_vx_regs((__vector128 *)
S390_lowcore.vector_save_area_addr);
}
#endif
/* Revalidate access registers */ /* Revalidate access registers */
asm volatile( asm volatile(
" lam 0,15,0(%0)" " lam 0,15,0(%0)"
......
...@@ -49,7 +49,7 @@ PGM_CHECK_DEFAULT /* 17 */ ...@@ -49,7 +49,7 @@ PGM_CHECK_DEFAULT /* 17 */
PGM_CHECK_64BIT(transaction_exception) /* 18 */ PGM_CHECK_64BIT(transaction_exception) /* 18 */
PGM_CHECK_DEFAULT /* 19 */ PGM_CHECK_DEFAULT /* 19 */
PGM_CHECK_DEFAULT /* 1a */ PGM_CHECK_DEFAULT /* 1a */
PGM_CHECK_DEFAULT /* 1b */ PGM_CHECK_64BIT(vector_exception) /* 1b */
PGM_CHECK(space_switch_exception) /* 1c */ PGM_CHECK(space_switch_exception) /* 1c */
PGM_CHECK(hfp_sqrt_exception) /* 1d */ PGM_CHECK(hfp_sqrt_exception) /* 1d */
PGM_CHECK_DEFAULT /* 1e */ PGM_CHECK_DEFAULT /* 1e */
......
...@@ -61,30 +61,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) ...@@ -61,30 +61,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8]; return sf->gprs[8];
} }
void arch_cpu_idle(void)
{
local_mcck_disable();
if (test_cpu_flag(CIF_MCCK_PENDING)) {
local_mcck_enable();
local_irq_enable();
return;
}
/* Halt the cpu and keep track of cpu time accounting. */
vtime_stop_cpu();
local_irq_enable();
}
void arch_cpu_idle_exit(void)
{
if (test_cpu_flag(CIF_MCCK_PENDING))
s390_handle_mcck();
}
void arch_cpu_idle_dead(void)
{
cpu_die();
}
extern void __kprobes kernel_thread_starter(void); extern void __kprobes kernel_thread_starter(void);
/* /*
......
...@@ -23,7 +23,6 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id); ...@@ -23,7 +23,6 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
*/ */
void cpu_init(void) void cpu_init(void)
{ {
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
struct cpuid *id = &__get_cpu_var(cpu_id); struct cpuid *id = &__get_cpu_var(cpu_id);
get_cpu_id(id); get_cpu_id(id);
...@@ -31,7 +30,6 @@ void cpu_init(void) ...@@ -31,7 +30,6 @@ void cpu_init(void)
current->active_mm = &init_mm; current->active_mm = &init_mm;
BUG_ON(current->mm); BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current); enter_lazy_tlb(&init_mm, current);
memset(idle, 0, sizeof(*idle));
} }
/* /*
...@@ -41,7 +39,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -41,7 +39,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
{ {
static const char *hwcap_str[] = { static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs", "te" "edat", "etf3eh", "highgprs", "te", "vx"
}; };
unsigned long n = (unsigned long) v - 1; unsigned long n = (unsigned long) v - 1;
int i; int i;
......
...@@ -38,15 +38,6 @@ ...@@ -38,15 +38,6 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h> #include <trace/events/syscalls.h>
enum s390_regset {
REGSET_GENERAL,
REGSET_FP,
REGSET_LAST_BREAK,
REGSET_TDB,
REGSET_SYSTEM_CALL,
REGSET_GENERAL_EXTENDED,
};
void update_cr_regs(struct task_struct *task) void update_cr_regs(struct task_struct *task)
{ {
struct pt_regs *regs = task_pt_regs(task); struct pt_regs *regs = task_pt_regs(task);
...@@ -55,27 +46,39 @@ void update_cr_regs(struct task_struct *task) ...@@ -55,27 +46,39 @@ void update_cr_regs(struct task_struct *task)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* Take care of the enable/disable of transactional execution. */ /* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE) { if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
unsigned long cr, cr_new; unsigned long cr, cr_new;
__ctl_store(cr, 0, 0); __ctl_store(cr, 0, 0);
/* Set or clear transaction execution TXC bit 8. */ cr_new = cr;
cr_new = cr | (1UL << 55); if (MACHINE_HAS_TE) {
if (task->thread.per_flags & PER_FLAG_NO_TE) /* Set or clear transaction execution TXC bit 8. */
cr_new &= ~(1UL << 55); cr_new |= (1UL << 55);
if (task->thread.per_flags & PER_FLAG_NO_TE)
cr_new &= ~(1UL << 55);
}
if (MACHINE_HAS_VX) {
/* Enable/disable of vector extension */
cr_new &= ~(1UL << 17);
if (task->thread.vxrs)
cr_new |= (1UL << 17);
}
if (cr_new != cr) if (cr_new != cr)
__ctl_load(cr_new, 0, 0); __ctl_load(cr_new, 0, 0);
/* Set or clear transaction execution TDC bits 62 and 63. */ if (MACHINE_HAS_TE) {
__ctl_store(cr, 2, 2); /* Set/clear transaction execution TDC bits 62/63. */
cr_new = cr & ~3UL; __ctl_store(cr, 2, 2);
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { cr_new = cr & ~3UL;
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
cr_new |= 1UL; if (task->thread.per_flags &
else PER_FLAG_TE_ABORT_RAND_TEND)
cr_new |= 2UL; cr_new |= 1UL;
else
cr_new |= 2UL;
}
if (cr_new != cr)
__ctl_load(cr_new, 2, 2);
} }
if (cr_new != cr)
__ctl_load(cr_new, 2, 2);
} }
#endif #endif
/* Copy user specified PER registers */ /* Copy user specified PER registers */
...@@ -84,7 +87,8 @@ void update_cr_regs(struct task_struct *task) ...@@ -84,7 +87,8 @@ void update_cr_regs(struct task_struct *task)
new.end = thread->per_user.end; new.end = thread->per_user.end;
/* merge TIF_SINGLE_STEP into user specified PER registers. */ /* merge TIF_SINGLE_STEP into user specified PER registers. */
if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
if (test_tsk_thread_flag(task, TIF_BLOCK_STEP)) if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
new.control |= PER_EVENT_BRANCH; new.control |= PER_EVENT_BRANCH;
else else
...@@ -93,6 +97,8 @@ void update_cr_regs(struct task_struct *task) ...@@ -93,6 +97,8 @@ void update_cr_regs(struct task_struct *task)
new.control |= PER_CONTROL_SUSPENSION; new.control |= PER_CONTROL_SUSPENSION;
new.control |= PER_EVENT_TRANSACTION_END; new.control |= PER_EVENT_TRANSACTION_END;
#endif #endif
if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
new.control |= PER_EVENT_IFETCH;
new.start = 0; new.start = 0;
new.end = PSW_ADDR_INSN; new.end = PSW_ADDR_INSN;
} }
...@@ -923,7 +929,15 @@ static int s390_fpregs_get(struct task_struct *target, ...@@ -923,7 +929,15 @@ static int s390_fpregs_get(struct task_struct *target,
save_fp_ctl(&target->thread.fp_regs.fpc); save_fp_ctl(&target->thread.fp_regs.fpc);
save_fp_regs(target->thread.fp_regs.fprs); save_fp_regs(target->thread.fp_regs.fprs);
} }
#ifdef CONFIG_64BIT
else if (target->thread.vxrs) {
int i;
for (i = 0; i < __NUM_VXRS_LOW; i++)
target->thread.fp_regs.fprs[i] =
*(freg_t *)(target->thread.vxrs + i);
}
#endif
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_regs, 0, -1); &target->thread.fp_regs, 0, -1);
} }
...@@ -957,9 +971,20 @@ static int s390_fpregs_set(struct task_struct *target, ...@@ -957,9 +971,20 @@ static int s390_fpregs_set(struct task_struct *target,
target->thread.fp_regs.fprs, target->thread.fp_regs.fprs,
offsetof(s390_fp_regs, fprs), -1); offsetof(s390_fp_regs, fprs), -1);
if (rc == 0 && target == current) { if (rc == 0) {
restore_fp_ctl(&target->thread.fp_regs.fpc); if (target == current) {
restore_fp_regs(target->thread.fp_regs.fprs); restore_fp_ctl(&target->thread.fp_regs.fpc);
restore_fp_regs(target->thread.fp_regs.fprs);
}
#ifdef CONFIG_64BIT
else if (target->thread.vxrs) {
int i;
for (i = 0; i < __NUM_VXRS_LOW; i++)
*(freg_t *)(target->thread.vxrs + i) =
target->thread.fp_regs.fprs[i];
}
#endif
} }
return rc; return rc;
...@@ -1015,6 +1040,95 @@ static int s390_tdb_set(struct task_struct *target, ...@@ -1015,6 +1040,95 @@ static int s390_tdb_set(struct task_struct *target,
return 0; return 0;
} }
static int s390_vxrs_active(struct task_struct *target,
const struct user_regset *regset)
{
return !!target->thread.vxrs;
}
static int s390_vxrs_low_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
__u64 vxrs[__NUM_VXRS_LOW];
int i;
if (target->thread.vxrs) {
if (target == current)
save_vx_regs(target->thread.vxrs);
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1);
} else
memset(vxrs, 0, sizeof(vxrs));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
}
static int s390_vxrs_low_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
__u64 vxrs[__NUM_VXRS_LOW];
int i, rc;
if (!target->thread.vxrs) {
rc = alloc_vector_registers(target);
if (rc)
return rc;
} else if (target == current)
save_vx_regs(target->thread.vxrs);
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
*((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i];
if (target == current)
restore_vx_regs(target->thread.vxrs);
}
return rc;
}
static int s390_vxrs_high_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
__vector128 vxrs[__NUM_VXRS_HIGH];
if (target->thread.vxrs) {
if (target == current)
save_vx_regs(target->thread.vxrs);
memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW,
sizeof(vxrs));
} else
memset(vxrs, 0, sizeof(vxrs));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
}
static int s390_vxrs_high_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int rc;
if (!target->thread.vxrs) {
rc = alloc_vector_registers(target);
if (rc)
return rc;
} else if (target == current)
save_vx_regs(target->thread.vxrs);
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.vxrs + __NUM_VXRS_LOW, 0, -1);
if (rc == 0 && target == current)
restore_vx_regs(target->thread.vxrs);
return rc;
}
#endif #endif
static int s390_system_call_get(struct task_struct *target, static int s390_system_call_get(struct task_struct *target,
...@@ -1038,7 +1152,7 @@ static int s390_system_call_set(struct task_struct *target, ...@@ -1038,7 +1152,7 @@ static int s390_system_call_set(struct task_struct *target,
} }
static const struct user_regset s390_regsets[] = { static const struct user_regset s390_regsets[] = {
[REGSET_GENERAL] = { {
.core_note_type = NT_PRSTATUS, .core_note_type = NT_PRSTATUS,
.n = sizeof(s390_regs) / sizeof(long), .n = sizeof(s390_regs) / sizeof(long),
.size = sizeof(long), .size = sizeof(long),
...@@ -1046,7 +1160,7 @@ static const struct user_regset s390_regsets[] = { ...@@ -1046,7 +1160,7 @@ static const struct user_regset s390_regsets[] = {
.get = s390_regs_get, .get = s390_regs_get,
.set = s390_regs_set, .set = s390_regs_set,
}, },
[REGSET_FP] = { {
.core_note_type = NT_PRFPREG, .core_note_type = NT_PRFPREG,
.n = sizeof(s390_fp_regs) / sizeof(long), .n = sizeof(s390_fp_regs) / sizeof(long),
.size = sizeof(long), .size = sizeof(long),
...@@ -1054,8 +1168,16 @@ static const struct user_regset s390_regsets[] = { ...@@ -1054,8 +1168,16 @@ static const struct user_regset s390_regsets[] = {
.get = s390_fpregs_get, .get = s390_fpregs_get,
.set = s390_fpregs_set, .set = s390_fpregs_set,
}, },
{
.core_note_type = NT_S390_SYSTEM_CALL,
.n = 1,
.size = sizeof(unsigned int),
.align = sizeof(unsigned int),
.get = s390_system_call_get,
.set = s390_system_call_set,
},
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
[REGSET_LAST_BREAK] = { {
.core_note_type = NT_S390_LAST_BREAK, .core_note_type = NT_S390_LAST_BREAK,
.n = 1, .n = 1,
.size = sizeof(long), .size = sizeof(long),
...@@ -1063,7 +1185,7 @@ static const struct user_regset s390_regsets[] = { ...@@ -1063,7 +1185,7 @@ static const struct user_regset s390_regsets[] = {
.get = s390_last_break_get, .get = s390_last_break_get,
.set = s390_last_break_set, .set = s390_last_break_set,
}, },
[REGSET_TDB] = { {
.core_note_type = NT_S390_TDB, .core_note_type = NT_S390_TDB,
.n = 1, .n = 1,
.size = 256, .size = 256,
...@@ -1071,15 +1193,25 @@ static const struct user_regset s390_regsets[] = { ...@@ -1071,15 +1193,25 @@ static const struct user_regset s390_regsets[] = {
.get = s390_tdb_get, .get = s390_tdb_get,
.set = s390_tdb_set, .set = s390_tdb_set,
}, },
#endif {
[REGSET_SYSTEM_CALL] = { .core_note_type = NT_S390_VXRS_LOW,
.core_note_type = NT_S390_SYSTEM_CALL, .n = __NUM_VXRS_LOW,
.n = 1, .size = sizeof(__u64),
.size = sizeof(unsigned int), .align = sizeof(__u64),
.align = sizeof(unsigned int), .active = s390_vxrs_active,
.get = s390_system_call_get, .get = s390_vxrs_low_get,
.set = s390_system_call_set, .set = s390_vxrs_low_set,
}, },
{
.core_note_type = NT_S390_VXRS_HIGH,
.n = __NUM_VXRS_HIGH,
.size = sizeof(__vector128),
.align = sizeof(__vector128),
.active = s390_vxrs_active,
.get = s390_vxrs_high_get,
.set = s390_vxrs_high_set,
},
#endif
}; };
static const struct user_regset_view user_s390_view = { static const struct user_regset_view user_s390_view = {
...@@ -1244,7 +1376,7 @@ static int s390_compat_last_break_set(struct task_struct *target, ...@@ -1244,7 +1376,7 @@ static int s390_compat_last_break_set(struct task_struct *target,
} }
static const struct user_regset s390_compat_regsets[] = { static const struct user_regset s390_compat_regsets[] = {
[REGSET_GENERAL] = { {
.core_note_type = NT_PRSTATUS, .core_note_type = NT_PRSTATUS,
.n = sizeof(s390_compat_regs) / sizeof(compat_long_t), .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
.size = sizeof(compat_long_t), .size = sizeof(compat_long_t),
...@@ -1252,7 +1384,7 @@ static const struct user_regset s390_compat_regsets[] = { ...@@ -1252,7 +1384,7 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_compat_regs_get, .get = s390_compat_regs_get,
.set = s390_compat_regs_set, .set = s390_compat_regs_set,
}, },
[REGSET_FP] = { {
.core_note_type = NT_PRFPREG, .core_note_type = NT_PRFPREG,
.n = sizeof(s390_fp_regs) / sizeof(compat_long_t), .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
.size = sizeof(compat_long_t), .size = sizeof(compat_long_t),
...@@ -1260,7 +1392,15 @@ static const struct user_regset s390_compat_regsets[] = { ...@@ -1260,7 +1392,15 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_fpregs_get, .get = s390_fpregs_get,
.set = s390_fpregs_set, .set = s390_fpregs_set,
}, },
[REGSET_LAST_BREAK] = { {
.core_note_type = NT_S390_SYSTEM_CALL,
.n = 1,
.size = sizeof(compat_uint_t),
.align = sizeof(compat_uint_t),
.get = s390_system_call_get,
.set = s390_system_call_set,
},
{
.core_note_type = NT_S390_LAST_BREAK, .core_note_type = NT_S390_LAST_BREAK,
.n = 1, .n = 1,
.size = sizeof(long), .size = sizeof(long),
...@@ -1268,7 +1408,7 @@ static const struct user_regset s390_compat_regsets[] = { ...@@ -1268,7 +1408,7 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_compat_last_break_get, .get = s390_compat_last_break_get,
.set = s390_compat_last_break_set, .set = s390_compat_last_break_set,
}, },
[REGSET_TDB] = { {
.core_note_type = NT_S390_TDB, .core_note_type = NT_S390_TDB,
.n = 1, .n = 1,
.size = 256, .size = 256,
...@@ -1276,15 +1416,25 @@ static const struct user_regset s390_compat_regsets[] = { ...@@ -1276,15 +1416,25 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_tdb_get, .get = s390_tdb_get,
.set = s390_tdb_set, .set = s390_tdb_set,
}, },
[REGSET_SYSTEM_CALL] = { {
.core_note_type = NT_S390_SYSTEM_CALL, .core_note_type = NT_S390_VXRS_LOW,
.n = 1, .n = __NUM_VXRS_LOW,
.size = sizeof(compat_uint_t), .size = sizeof(__u64),
.align = sizeof(compat_uint_t), .align = sizeof(__u64),
.get = s390_system_call_get, .active = s390_vxrs_active,
.set = s390_system_call_set, .get = s390_vxrs_low_get,
.set = s390_vxrs_low_set,
},
{
.core_note_type = NT_S390_VXRS_HIGH,
.n = __NUM_VXRS_HIGH,
.size = sizeof(__vector128),
.align = sizeof(__vector128),
.active = s390_vxrs_active,
.get = s390_vxrs_high_get,
.set = s390_vxrs_high_set,
}, },
[REGSET_GENERAL_EXTENDED] = { {
.core_note_type = NT_S390_HIGH_GPRS, .core_note_type = NT_S390_HIGH_GPRS,
.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
.size = sizeof(compat_long_t), .size = sizeof(compat_long_t),
......
...@@ -343,6 +343,9 @@ static void __init setup_lowcore(void) ...@@ -343,6 +343,9 @@ static void __init setup_lowcore(void)
__ctl_set_bit(14, 29); __ctl_set_bit(14, 29);
} }
#else #else
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
#endif #endif
lc->sync_enter_timer = S390_lowcore.sync_enter_timer; lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
...@@ -452,8 +455,8 @@ static void __init setup_memory_end(void) ...@@ -452,8 +455,8 @@ static void __init setup_memory_end(void)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
if (tmp <= (1UL << 42)) if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42))
vmax = 1UL << 42; /* 3-level kernel page table */ vmax = 1UL << 42; /* 3-level kernel page table */
else else
vmax = 1UL << 53; /* 4-level kernel page table */ vmax = 1UL << 53; /* 4-level kernel page table */
...@@ -765,6 +768,12 @@ static void __init setup_hwcaps(void) ...@@ -765,6 +768,12 @@ static void __init setup_hwcaps(void)
*/ */
if (test_facility(50) && test_facility(73)) if (test_facility(50) && test_facility(73))
elf_hwcap |= HWCAP_S390_TE; elf_hwcap |= HWCAP_S390_TE;
/*
* Vector extension HWCAP_S390_VXRS is bit 11.
*/
if (test_facility(129))
elf_hwcap |= HWCAP_S390_VXRS;
#endif #endif
get_cpu_id(&cpu_id); get_cpu_id(&cpu_id);
......
...@@ -31,30 +31,117 @@ ...@@ -31,30 +31,117 @@
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include "entry.h" #include "entry.h"
typedef struct /*
* Layout of an old-style signal-frame:
* -----------------------------------------
* | save area (_SIGNAL_FRAMESIZE) |
* -----------------------------------------
* | struct sigcontext |
* | oldmask |
* | _sigregs * |
* -----------------------------------------
* | _sigregs with |
* | _s390_regs_common |
* | _s390_fp_regs |
* -----------------------------------------
* | int signo |
* -----------------------------------------
* | _sigregs_ext with |
* | gprs_high 64 byte (opt) |
* | vxrs_low 128 byte (opt) |
* | vxrs_high 256 byte (opt) |
* | reserved 128 byte (opt) |
* -----------------------------------------
* | __u16 svc_insn |
* -----------------------------------------
* The svc_insn entry with the sigreturn system call opcode does not
* have a fixed position and moves if gprs_high or vxrs exist.
* Future extensions will be added to _sigregs_ext.
*/
struct sigframe
{ {
__u8 callee_used_stack[__SIGNAL_FRAMESIZE]; __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
struct sigcontext sc; struct sigcontext sc;
_sigregs sregs; _sigregs sregs;
int signo; int signo;
__u8 retcode[S390_SYSCALL_SIZE]; _sigregs_ext sregs_ext;
} sigframe; __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
};
typedef struct /*
* Layout of an rt signal-frame:
* -----------------------------------------
* | save area (_SIGNAL_FRAMESIZE) |
* -----------------------------------------
* | svc __NR_rt_sigreturn 2 byte |
* -----------------------------------------
* | struct siginfo |
* -----------------------------------------
* | struct ucontext_extended with |
* | unsigned long uc_flags |
* | struct ucontext *uc_link |
* | stack_t uc_stack |
* | _sigregs uc_mcontext with |
* | _s390_regs_common |
* | _s390_fp_regs |
* | sigset_t uc_sigmask |
* | _sigregs_ext uc_mcontext_ext |
* | gprs_high 64 byte (opt) |
* | vxrs_low 128 byte (opt) |
* | vxrs_high 256 byte (opt)|
* | reserved 128 byte (opt) |
* -----------------------------------------
* Future extensions will be added to _sigregs_ext.
*/
struct rt_sigframe
{ {
__u8 callee_used_stack[__SIGNAL_FRAMESIZE]; __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
__u8 retcode[S390_SYSCALL_SIZE]; __u16 svc_insn;
struct siginfo info; struct siginfo info;
struct ucontext uc; struct ucontext_extended uc;
} rt_sigframe; };
/* Store registers needed to create the signal frame */
static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
save_fp_ctl(&current->thread.fp_regs.fpc);
#ifdef CONFIG_64BIT
if (current->thread.vxrs) {
int i;
save_vx_regs(current->thread.vxrs);
for (i = 0; i < __NUM_FPRS; i++)
current->thread.fp_regs.fprs[i] =
*(freg_t *)(current->thread.vxrs + i);
} else
#endif
save_fp_regs(current->thread.fp_regs.fprs);
}
/* Load registers after signal return */
static void load_sigregs(void)
{
restore_access_regs(current->thread.acrs);
/* restore_fp_ctl is done in restore_sigregs */
#ifdef CONFIG_64BIT
if (current->thread.vxrs) {
int i;
for (i = 0; i < __NUM_FPRS; i++)
*(freg_t *)(current->thread.vxrs + i) =
current->thread.fp_regs.fprs[i];
restore_vx_regs(current->thread.vxrs);
} else
#endif
restore_fp_regs(current->thread.fp_regs.fprs);
}
/* Returns non-zero on fault. */ /* Returns non-zero on fault. */
static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{ {
_sigregs user_sregs; _sigregs user_sregs;
save_access_regs(current->thread.acrs);
/* Copy a 'clean' PSW mask to the user to avoid leaking /* Copy a 'clean' PSW mask to the user to avoid leaking
information about whether PER is currently on. */ information about whether PER is currently on. */
user_sregs.regs.psw.mask = PSW_USER_BITS | user_sregs.regs.psw.mask = PSW_USER_BITS |
...@@ -63,12 +150,6 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) ...@@ -63,12 +150,6 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
memcpy(&user_sregs.regs.acrs, current->thread.acrs, memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs)); sizeof(user_sregs.regs.acrs));
/*
* We have to store the fp registers to current->thread.fp_regs
* to merge them with the emulated registers.
*/
save_fp_ctl(&current->thread.fp_regs.fpc);
save_fp_regs(current->thread.fp_regs.fprs);
memcpy(&user_sregs.fpregs, &current->thread.fp_regs, memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
sizeof(user_sregs.fpregs)); sizeof(user_sregs.fpregs));
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs))) if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
...@@ -107,20 +188,64 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) ...@@ -107,20 +188,64 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
memcpy(&current->thread.acrs, &user_sregs.regs.acrs, memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs)); sizeof(current->thread.acrs));
restore_access_regs(current->thread.acrs);
memcpy(&current->thread.fp_regs, &user_sregs.fpregs, memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
sizeof(current->thread.fp_regs)); sizeof(current->thread.fp_regs));
restore_fp_regs(current->thread.fp_regs.fprs);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0; return 0;
} }
/* Returns non-zero on fault. */
static int save_sigregs_ext(struct pt_regs *regs,
_sigregs_ext __user *sregs_ext)
{
#ifdef CONFIG_64BIT
__u64 vxrs[__NUM_VXRS_LOW];
int i;
/* Save vector registers to signal stack */
if (current->thread.vxrs) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
sizeof(sregs_ext->vxrs_low)) ||
__copy_to_user(&sregs_ext->vxrs_high,
current->thread.vxrs + __NUM_VXRS_LOW,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
#endif
return 0;
}
static int restore_sigregs_ext(struct pt_regs *regs,
_sigregs_ext __user *sregs_ext)
{
#ifdef CONFIG_64BIT
__u64 vxrs[__NUM_VXRS_LOW];
int i;
/* Restore vector registers from signal stack */
if (current->thread.vxrs) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
&sregs_ext->vxrs_high,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
for (i = 0; i < __NUM_VXRS_LOW; i++)
*((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
}
#endif
return 0;
}
SYSCALL_DEFINE0(sigreturn) SYSCALL_DEFINE0(sigreturn)
{ {
struct pt_regs *regs = task_pt_regs(current); struct pt_regs *regs = task_pt_regs(current);
sigframe __user *frame = (sigframe __user *)regs->gprs[15]; struct sigframe __user *frame =
(struct sigframe __user *) regs->gprs[15];
sigset_t set; sigset_t set;
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
...@@ -128,6 +253,9 @@ SYSCALL_DEFINE0(sigreturn) ...@@ -128,6 +253,9 @@ SYSCALL_DEFINE0(sigreturn)
set_current_blocked(&set); set_current_blocked(&set);
if (restore_sigregs(regs, &frame->sregs)) if (restore_sigregs(regs, &frame->sregs))
goto badframe; goto badframe;
if (restore_sigregs_ext(regs, &frame->sregs_ext))
goto badframe;
load_sigregs();
return regs->gprs[2]; return regs->gprs[2];
badframe: badframe:
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
...@@ -137,27 +265,26 @@ SYSCALL_DEFINE0(sigreturn) ...@@ -137,27 +265,26 @@ SYSCALL_DEFINE0(sigreturn)
SYSCALL_DEFINE0(rt_sigreturn) SYSCALL_DEFINE0(rt_sigreturn)
{ {
struct pt_regs *regs = task_pt_regs(current); struct pt_regs *regs = task_pt_regs(current);
rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15]; struct rt_sigframe __user *frame =
(struct rt_sigframe __user *)regs->gprs[15];
sigset_t set; sigset_t set;
if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe; goto badframe;
set_current_blocked(&set); set_current_blocked(&set);
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
if (restore_sigregs(regs, &frame->uc.uc_mcontext)) if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe; goto badframe;
if (restore_altstack(&frame->uc.uc_stack)) if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
goto badframe; goto badframe;
load_sigregs();
return regs->gprs[2]; return regs->gprs[2];
badframe: badframe:
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
return 0; return 0;
} }
/*
* Set up a signal frame.
*/
/* /*
* Determine which stack to use.. * Determine which stack to use..
*/ */
...@@ -195,39 +322,63 @@ static inline int map_signal(int sig) ...@@ -195,39 +322,63 @@ static inline int map_signal(int sig)
static int setup_frame(int sig, struct k_sigaction *ka, static int setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs * regs) sigset_t *set, struct pt_regs * regs)
{ {
sigframe __user *frame; struct sigframe __user *frame;
struct sigcontext sc;
frame = get_sigframe(ka, regs, sizeof(sigframe)); unsigned long restorer;
size_t frame_size;
/*
* gprs_high are only present for a 31-bit task running on
* a 64-bit kernel (see compat_signal.c) but the space for
* gprs_high need to be allocated if vector registers are
* included in the signal frame on a 31-bit system.
*/
frame_size = sizeof(*frame) - sizeof(frame->sregs_ext);
if (MACHINE_HAS_VX)
frame_size += sizeof(frame->sregs_ext);
frame = get_sigframe(ka, regs, frame_size);
if (frame == (void __user *) -1UL) if (frame == (void __user *) -1UL)
return -EFAULT; return -EFAULT;
if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) /* Set up backchain. */
if (__put_user(regs->gprs[15], (addr_t __user *) frame))
return -EFAULT; return -EFAULT;
/* Create struct sigcontext on the signal stack */
memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE);
sc.sregs = (_sigregs __user __force *) &frame->sregs;
if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
return -EFAULT;
/* Store registers needed to create the signal frame */
store_sigregs();
/* Create _sigregs on the signal stack */
if (save_sigregs(regs, &frame->sregs)) if (save_sigregs(regs, &frame->sregs))
return -EFAULT; return -EFAULT;
if (__put_user(&frame->sregs, &frame->sc.sregs))
/* Place signal number on stack to allow backtrace from handler. */
if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
return -EFAULT;
/* Create _sigregs_ext on the signal stack */
if (save_sigregs_ext(regs, &frame->sregs_ext))
return -EFAULT; return -EFAULT;
/* Set up to return from userspace. If provided, use a stub /* Set up to return from userspace. If provided, use a stub
already in userspace. */ already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) { if (ka->sa.sa_flags & SA_RESTORER) {
regs->gprs[14] = (unsigned long) restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
ka->sa.sa_restorer | PSW_ADDR_AMODE;
} else { } else {
regs->gprs[14] = (unsigned long) /* Signal frame without vector registers are short ! */
frame->retcode | PSW_ADDR_AMODE; __u16 __user *svc = (void *) frame + frame_size - 2;
if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
(u16 __user *)(frame->retcode)))
return -EFAULT; return -EFAULT;
restorer = (unsigned long) svc | PSW_ADDR_AMODE;
} }
/* Set up backchain. */
if (__put_user(regs->gprs[15], (addr_t __user *) frame))
return -EFAULT;
/* Set up registers for signal handler */ /* Set up registers for signal handler */
regs->gprs[14] = restorer;
regs->gprs[15] = (unsigned long) frame; regs->gprs[15] = (unsigned long) frame;
/* Force default amode and default user address space control. */ /* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
...@@ -247,54 +398,69 @@ static int setup_frame(int sig, struct k_sigaction *ka, ...@@ -247,54 +398,69 @@ static int setup_frame(int sig, struct k_sigaction *ka,
regs->gprs[5] = regs->int_parm_long; regs->gprs[5] = regs->int_parm_long;
regs->gprs[6] = task_thread_info(current)->last_break; regs->gprs[6] = task_thread_info(current)->last_break;
} }
/* Place signal number on stack to allow backtrace from handler. */
if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
return -EFAULT;
return 0; return 0;
} }
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs) struct pt_regs *regs)
{ {
int err = 0; struct rt_sigframe __user *frame;
rt_sigframe __user *frame; unsigned long uc_flags, restorer;
size_t frame_size;
frame = get_sigframe(&ksig->ka, regs, sizeof(rt_sigframe));
frame_size = sizeof(struct rt_sigframe) - sizeof(_sigregs_ext);
/*
* gprs_high are only present for a 31-bit task running on
* a 64-bit kernel (see compat_signal.c) but the space for
* gprs_high need to be allocated if vector registers are
* included in the signal frame on a 31-bit system.
*/
uc_flags = 0;
#ifdef CONFIG_64BIT
if (MACHINE_HAS_VX) {
frame_size += sizeof(_sigregs_ext);
if (current->thread.vxrs)
uc_flags |= UC_VXRS;
}
#endif
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL) if (frame == (void __user *) -1UL)
return -EFAULT; return -EFAULT;
if (copy_siginfo_to_user(&frame->info, &ksig->info)) /* Set up backchain. */
return -EFAULT; if (__put_user(regs->gprs[15], (addr_t __user *) frame))
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(NULL, &frame->uc.uc_link);
err |= __save_altstack(&frame->uc.uc_stack, regs->gprs[15]);
err |= save_sigregs(regs, &frame->uc.uc_mcontext);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
return -EFAULT; return -EFAULT;
/* Set up to return from userspace. If provided, use a stub /* Set up to return from userspace. If provided, use a stub
already in userspace. */ already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) { if (ksig->ka.sa.sa_flags & SA_RESTORER) {
regs->gprs[14] = (unsigned long) restorer = (unsigned long)
ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE; ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE;
} else { } else {
regs->gprs[14] = (unsigned long) __u16 __user *svc = &frame->svc_insn;
frame->retcode | PSW_ADDR_AMODE; if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
(u16 __user *)(frame->retcode)))
return -EFAULT; return -EFAULT;
restorer = (unsigned long) svc | PSW_ADDR_AMODE;
} }
/* Set up backchain. */ /* Create siginfo on the signal stack */
if (__put_user(regs->gprs[15], (addr_t __user *) frame)) if (copy_siginfo_to_user(&frame->info, &ksig->info))
return -EFAULT;
/* Store registers needed to create the signal frame */
store_sigregs();
/* Create ucontext on the signal stack. */
if (__put_user(uc_flags, &frame->uc.uc_flags) ||
__put_user(NULL, &frame->uc.uc_link) ||
__save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
save_sigregs(regs, &frame->uc.uc_mcontext) ||
__copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
save_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT; return -EFAULT;
/* Set up registers for signal handler */ /* Set up registers for signal handler */
regs->gprs[14] = restorer;
regs->gprs[15] = (unsigned long) frame; regs->gprs[15] = (unsigned long) frame;
/* Force default amode and default user address space control. */ /* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/os_info.h> #include <asm/os_info.h>
#include <asm/sigp.h> #include <asm/sigp.h>
#include <asm/idle.h>
#include "entry.h" #include "entry.h"
enum { enum {
...@@ -82,7 +83,8 @@ DEFINE_MUTEX(smp_cpu_state_mutex); ...@@ -82,7 +83,8 @@ DEFINE_MUTEX(smp_cpu_state_mutex);
/* /*
* Signal processor helper functions. * Signal processor helper functions.
*/ */
static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm,
u32 *status)
{ {
int cc; int cc;
...@@ -178,6 +180,9 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) ...@@ -178,6 +180,9 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
goto out; goto out;
} }
#else #else
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
if (vdso_alloc_per_cpu(lc)) if (vdso_alloc_per_cpu(lc))
goto out; goto out;
#endif #endif
...@@ -333,12 +338,6 @@ int smp_vcpu_scheduled(int cpu) ...@@ -333,12 +338,6 @@ int smp_vcpu_scheduled(int cpu)
return pcpu_running(pcpu_devices + cpu); return pcpu_running(pcpu_devices + cpu);
} }
void smp_yield(void)
{
if (MACHINE_HAS_DIAG44)
asm volatile("diag 0,0,0x44");
}
void smp_yield_cpu(int cpu) void smp_yield_cpu(int cpu)
{ {
if (MACHINE_HAS_DIAG9C) if (MACHINE_HAS_DIAG9C)
...@@ -517,35 +516,53 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); ...@@ -517,35 +516,53 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
static void __init smp_get_save_area(int cpu, u16 address) static void __init smp_get_save_area(int cpu, u16 address)
{ {
void *lc = pcpu_devices[0].lowcore; void *lc = pcpu_devices[0].lowcore;
struct save_area *save_area; struct save_area_ext *sa_ext;
unsigned long vx_sa;
if (is_kdump_kernel()) if (is_kdump_kernel())
return; return;
if (!OLDMEM_BASE && (address == boot_cpu_address || if (!OLDMEM_BASE && (address == boot_cpu_address ||
ipl_info.type != IPL_TYPE_FCP_DUMP)) ipl_info.type != IPL_TYPE_FCP_DUMP))
return; return;
save_area = dump_save_area_create(cpu); sa_ext = dump_save_area_create(cpu);
if (!save_area) if (!sa_ext)
panic("could not allocate memory for save area\n"); panic("could not allocate memory for save area\n");
if (address == boot_cpu_address) { if (address == boot_cpu_address) {
/* Copy the registers of the boot cpu. */ /* Copy the registers of the boot cpu. */
copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
SAVE_AREA_BASE - PAGE_SIZE, 0); SAVE_AREA_BASE - PAGE_SIZE, 0);
if (MACHINE_HAS_VX)
save_vx_regs_safe(sa_ext->vx_regs);
return; return;
} }
/* Get the registers of a non-boot cpu. */ /* Get the registers of a non-boot cpu. */
__pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa));
if (!MACHINE_HAS_VX)
return;
/* Get the VX registers */
vx_sa = __get_free_page(GFP_KERNEL);
if (!vx_sa)
panic("could not allocate memory for VX save area\n");
__pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
free_page(vx_sa);
} }
int smp_store_status(int cpu) int smp_store_status(int cpu)
{ {
unsigned long vx_sa;
struct pcpu *pcpu; struct pcpu *pcpu;
pcpu = pcpu_devices + cpu; pcpu = pcpu_devices + cpu;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO; return -EIO;
if (!MACHINE_HAS_VX)
return 0;
vx_sa = __pa(pcpu->lowcore->vector_save_area_addr);
__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
vx_sa, NULL);
return 0; return 0;
} }
...@@ -667,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid) ...@@ -667,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid)
cpu_init(); cpu_init();
preempt_disable(); preempt_disable();
init_cpu_timer(); init_cpu_timer();
init_cpu_vtimer(); vtime_init();
pfault_init(); pfault_init();
notify_cpu_starting(smp_processor_id()); notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), true); set_cpu_online(smp_processor_id(), true);
...@@ -726,6 +743,7 @@ int __cpu_disable(void) ...@@ -726,6 +743,7 @@ int __cpu_disable(void)
cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
__ctl_load(cregs, 0, 15); __ctl_load(cregs, 0, 15);
clear_cpu_flag(CIF_NOHZ_DELAY);
return 0; return 0;
} }
...@@ -898,42 +916,6 @@ static struct attribute_group cpu_common_attr_group = { ...@@ -898,42 +916,6 @@ static struct attribute_group cpu_common_attr_group = {
.attrs = cpu_common_attrs, .attrs = cpu_common_attrs,
}; };
static ssize_t show_idle_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long idle_count;
unsigned int sequence;
do {
sequence = ACCESS_ONCE(idle->sequence);
idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return sprintf(buf, "%llu\n", idle_count);
}
static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long now, idle_time, idle_enter, idle_exit;
unsigned int sequence;
do {
now = get_tod_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
static struct attribute *cpu_online_attrs[] = { static struct attribute *cpu_online_attrs[] = {
&dev_attr_idle_count.attr, &dev_attr_idle_count.attr,
&dev_attr_idle_time_us.attr, &dev_attr_idle_time_us.attr,
......
...@@ -232,6 +232,19 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -232,6 +232,19 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtom_clock_nsec -= nsecps; vdso_data->wtom_clock_nsec -= nsecps;
vdso_data->wtom_clock_sec++; vdso_data->wtom_clock_sec++;
} }
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec =
(long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
vdso_data->wtom_coarse_sec =
vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_coarse_nsec =
vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) {
vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC;
vdso_data->wtom_coarse_sec++;
}
vdso_data->tk_mult = tk->tkr.mult; vdso_data->tk_mult = tk->tkr.mult;
vdso_data->tk_shift = tk->tkr.shift; vdso_data->tk_shift = tk->tkr.shift;
smp_wmb(); smp_wmb();
......
...@@ -464,15 +464,17 @@ static struct sched_domain_topology_level s390_topology[] = { ...@@ -464,15 +464,17 @@ static struct sched_domain_topology_level s390_topology[] = {
static int __init topology_init(void) static int __init topology_init(void)
{ {
if (!MACHINE_HAS_TOPOLOGY) { if (MACHINE_HAS_TOPOLOGY)
set_topology_timer();
else
topology_update_polarization_simple(); topology_update_polarization_simple();
goto out;
}
set_topology_timer();
out:
set_sched_topology(s390_topology);
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
} }
device_initcall(topology_init); device_initcall(topology_init);
static int __init early_topology_init(void)
{
set_sched_topology(s390_topology);
return 0;
}
early_initcall(early_topology_init);
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h>
#include <asm/switch_to.h>
#include "entry.h" #include "entry.h"
int show_unhandled_signals = 1; int show_unhandled_signals = 1;
...@@ -58,15 +60,10 @@ int is_valid_bugaddr(unsigned long addr) ...@@ -58,15 +60,10 @@ int is_valid_bugaddr(unsigned long addr)
return 1; return 1;
} }
static void __kprobes do_trap(struct pt_regs *regs, void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
int si_signo, int si_code, char *str)
{ {
siginfo_t info; siginfo_t info;
if (notify_die(DIE_TRAP, str, regs, 0,
regs->int_code, si_signo) == NOTIFY_STOP)
return;
if (user_mode(regs)) { if (user_mode(regs)) {
info.si_signo = si_signo; info.si_signo = si_signo;
info.si_errno = 0; info.si_errno = 0;
...@@ -90,6 +87,15 @@ static void __kprobes do_trap(struct pt_regs *regs, ...@@ -90,6 +87,15 @@ static void __kprobes do_trap(struct pt_regs *regs,
} }
} }
static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code,
char *str)
{
if (notify_die(DIE_TRAP, str, regs, 0,
regs->int_code, si_signo) == NOTIFY_STOP)
return;
do_report_trap(regs, si_signo, si_code, str);
}
void __kprobes do_per_trap(struct pt_regs *regs) void __kprobes do_per_trap(struct pt_regs *regs)
{ {
siginfo_t info; siginfo_t info;
...@@ -178,6 +184,7 @@ void __kprobes illegal_op(struct pt_regs *regs) ...@@ -178,6 +184,7 @@ void __kprobes illegal_op(struct pt_regs *regs)
siginfo_t info; siginfo_t info;
__u8 opcode[6]; __u8 opcode[6];
__u16 __user *location; __u16 __user *location;
int is_uprobe_insn = 0;
int signal = 0; int signal = 0;
location = get_trap_ip(regs); location = get_trap_ip(regs);
...@@ -194,6 +201,10 @@ void __kprobes illegal_op(struct pt_regs *regs) ...@@ -194,6 +201,10 @@ void __kprobes illegal_op(struct pt_regs *regs)
force_sig_info(SIGTRAP, &info, current); force_sig_info(SIGTRAP, &info, current);
} else } else
signal = SIGILL; signal = SIGILL;
#ifdef CONFIG_UPROBES
} else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
is_uprobe_insn = 1;
#endif
#ifdef CONFIG_MATHEMU #ifdef CONFIG_MATHEMU
} else if (opcode[0] == 0xb3) { } else if (opcode[0] == 0xb3) {
if (get_user(*((__u16 *) (opcode+2)), location+1)) if (get_user(*((__u16 *) (opcode+2)), location+1))
...@@ -219,11 +230,13 @@ void __kprobes illegal_op(struct pt_regs *regs) ...@@ -219,11 +230,13 @@ void __kprobes illegal_op(struct pt_regs *regs)
#endif #endif
} else } else
signal = SIGILL; signal = SIGILL;
} else { }
/* /*
* If we get an illegal op in kernel mode, send it through the * We got either an illegal op in kernel mode, or user space trapped
* kprobes notifier. If kprobes doesn't pick it up, SIGILL * on a uprobes illegal instruction. See if kprobes or uprobes picks
*/ * it up. If not, SIGILL.
*/
if (is_uprobe_insn || !user_mode(regs)) {
if (notify_die(DIE_BPT, "bpt", regs, 0, if (notify_die(DIE_BPT, "bpt", regs, 0,
3, SIGTRAP) != NOTIFY_STOP) 3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL; signal = SIGILL;
...@@ -292,6 +305,74 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, ...@@ -292,6 +305,74 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception"); "specification exception");
#endif #endif
#ifdef CONFIG_64BIT
int alloc_vector_registers(struct task_struct *tsk)
{
__vector128 *vxrs;
int i;
/* Allocate vector register save area. */
vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS,
GFP_KERNEL|__GFP_REPEAT);
if (!vxrs)
return -ENOMEM;
preempt_disable();
if (tsk == current)
save_fp_regs(tsk->thread.fp_regs.fprs);
/* Copy the 16 floating point registers */
for (i = 0; i < 16; i++)
*(freg_t *) &vxrs[i] = tsk->thread.fp_regs.fprs[i];
tsk->thread.vxrs = vxrs;
if (tsk == current) {
__ctl_set_bit(0, 17);
restore_vx_regs(vxrs);
}
preempt_enable();
return 0;
}
void vector_exception(struct pt_regs *regs)
{
int si_code, vic;
if (!MACHINE_HAS_VX) {
do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
return;
}
/* get vector interrupt code from fpc */
asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
switch (vic) {
case 1: /* invalid vector operation */
si_code = FPE_FLTINV;
break;
case 2: /* division by zero */
si_code = FPE_FLTDIV;
break;
case 3: /* overflow */
si_code = FPE_FLTOVF;
break;
case 4: /* underflow */
si_code = FPE_FLTUND;
break;
case 5: /* inexact */
si_code = FPE_FLTRES;
break;
default: /* unknown cause */
si_code = 0;
}
do_trap(regs, SIGFPE, si_code, "vector exception");
}
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
return 1;
}
__setup("novx", disable_vector_extension);
#endif
void data_exception(struct pt_regs *regs) void data_exception(struct pt_regs *regs)
{ {
__u16 __user *location; __u16 __user *location;
...@@ -357,6 +438,18 @@ void data_exception(struct pt_regs *regs) ...@@ -357,6 +438,18 @@ void data_exception(struct pt_regs *regs)
} }
} }
#endif #endif
#ifdef CONFIG_64BIT
/* Check for vector register enablement */
if (MACHINE_HAS_VX && !current->thread.vxrs &&
(current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
alloc_vector_registers(current);
/* Vector data exception is suppressing, rewind psw. */
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
clear_pt_regs_flag(regs, PIF_PER_TRAP);
return;
}
#endif
if (current->thread.fp_regs.fpc & FPC_DXC_MASK) if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
signal = SIGFPE; signal = SIGFPE;
else else
......
/*
* User-space Probes (UProbes) for s390
*
* Copyright IBM Corp. 2014
* Author(s): Jan Willeke,
*/
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/uprobes.h>
#include <linux/compat.h>
#include <linux/kdebug.h>
#include <asm/switch_to.h>
#include <asm/facility.h>
#include <asm/dis.h>
#include "entry.h"
#define UPROBE_TRAP_NR UINT_MAX
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long addr)
{
return probe_is_prohibited_opcode(auprobe->insn);
}
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT)
return -EINVAL;
if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT)
return -EINVAL;
clear_pt_regs_flag(regs, PIF_PER_TRAP);
auprobe->saved_per = psw_bits(regs->psw).r;
auprobe->saved_int_code = regs->int_code;
regs->int_code = UPROBE_TRAP_NR;
regs->psw.addr = current->utask->xol_vaddr;
set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
update_cr_regs(current);
return 0;
}
bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
{
struct pt_regs *regs = task_pt_regs(tsk);
if (regs->int_code != UPROBE_TRAP_NR)
return true;
return false;
}
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
int fixup = probe_get_fixup_type(auprobe->insn);
struct uprobe_task *utask = current->utask;
clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
update_cr_regs(current);
psw_bits(regs->psw).r = auprobe->saved_per;
regs->int_code = auprobe->saved_int_code;
if (fixup & FIXUP_PSW_NORMAL)
regs->psw.addr += utask->vaddr - utask->xol_vaddr;
if (fixup & FIXUP_RETURN_REGISTER) {
int reg = (auprobe->insn[0] & 0xf0) >> 4;
regs->gprs[reg] += utask->vaddr - utask->xol_vaddr;
}
if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
int ilen = insn_length(auprobe->insn[0] >> 8);
if (regs->psw.addr - utask->xol_vaddr == ilen)
regs->psw.addr = utask->vaddr + ilen;
}
/* If per tracing was active generate trap */
if (regs->psw.mask & PSW_MASK_PER)
do_per_trap(regs);
return 0;
}
int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
void *data)
{
struct die_args *args = data;
struct pt_regs *regs = args->regs;
if (!user_mode(regs))
return NOTIFY_DONE;
if (regs->int_code & 0x200) /* Trap during transaction */
return NOTIFY_DONE;
switch (val) {
case DIE_BPT:
if (uprobe_pre_sstep_notifier(regs))
return NOTIFY_STOP;
break;
case DIE_SSTEP:
if (uprobe_post_sstep_notifier(regs))
return NOTIFY_STOP;
default:
break;
}
return NOTIFY_DONE;
}
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
clear_thread_flag(TIF_UPROBE_SINGLESTEP);
regs->int_code = auprobe->saved_int_code;
regs->psw.addr = current->utask->vaddr;
}
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
struct pt_regs *regs)
{
unsigned long orig;
orig = regs->gprs[14];
regs->gprs[14] = trampoline;
return orig;
}
/* Instruction Emulation */
static void adjust_psw_addr(psw_t *psw, unsigned long len)
{
psw->addr = __rewind_psw(*psw, -len);
}
#define EMU_ILLEGAL_OP 1
#define EMU_SPECIFICATION 2
#define EMU_ADDRESSING 3
#define emu_load_ril(ptr, output) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
else \
*(output) = input; \
__rc; \
})
#define emu_store_ril(ptr, input) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (put_user(*(input), ptr)) \
__rc = EMU_ADDRESSING; \
__rc; \
})
#define emu_cmp_ril(regs, ptr, cmp) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
else if (input > *(cmp)) \
psw_bits((regs)->psw).cc = 1; \
else if (input < *(cmp)) \
psw_bits((regs)->psw).cc = 2; \
else \
psw_bits((regs)->psw).cc = 0; \
__rc; \
})
struct insn_ril {
u8 opc0;
u8 reg : 4;
u8 opc1 : 4;
s32 disp;
} __packed;
union split_register {
u64 u64;
u32 u32[2];
u16 u16[4];
s64 s64;
s32 s32[2];
s16 s16[4];
};
/*
* pc relative instructions are emulated, since parameters may not be
* accessible from the xol area due to range limitations.
*/
static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
union split_register *rx;
struct insn_ril *insn;
unsigned int ilen;
void *uptr;
int rc = 0;
insn = (struct insn_ril *) &auprobe->insn;
rx = (union split_register *) &regs->gprs[insn->reg];
uptr = (void *)(regs->psw.addr + (insn->disp * 2));
ilen = insn_length(insn->opc0);
switch (insn->opc0) {
case 0xc0:
switch (insn->opc1) {
case 0x00: /* larl */
rx->u64 = (unsigned long)uptr;
break;
}
break;
case 0xc4:
switch (insn->opc1) {
case 0x02: /* llhrl */
rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]);
break;
case 0x04: /* lghrl */
rc = emu_load_ril((s16 __user *)uptr, &rx->u64);
break;
case 0x05: /* lhrl */
rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]);
break;
case 0x06: /* llghrl */
rc = emu_load_ril((u16 __user *)uptr, &rx->u64);
break;
case 0x08: /* lgrl */
rc = emu_load_ril((u64 __user *)uptr, &rx->u64);
break;
case 0x0c: /* lgfrl */
rc = emu_load_ril((s32 __user *)uptr, &rx->u64);
break;
case 0x0d: /* lrl */
rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]);
break;
case 0x0e: /* llgfrl */
rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
break;
case 0x07: /* sthrl */
rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
break;
case 0x0b: /* stgrl */
rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
break;
case 0x0f: /* strl */
rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
break;
}
break;
case 0xc6:
switch (insn->opc1) {
case 0x02: /* pfdrl */
if (!test_facility(34))
rc = EMU_ILLEGAL_OP;
break;
case 0x04: /* cghrl */
rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
break;
case 0x05: /* chrl */
rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]);
break;
case 0x06: /* clghrl */
rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64);
break;
case 0x07: /* clhrl */
rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]);
break;
case 0x08: /* cgrl */
rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64);
break;
case 0x0a: /* clgrl */
rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64);
break;
case 0x0c: /* cgfrl */
rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64);
break;
case 0x0d: /* crl */
rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]);
break;
case 0x0e: /* clgfrl */
rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64);
break;
case 0x0f: /* clrl */
rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
break;
}
break;
}
adjust_psw_addr(&regs->psw, ilen);
switch (rc) {
case EMU_ILLEGAL_OP:
regs->int_code = ilen << 16 | 0x0001;
do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL);
break;
case EMU_SPECIFICATION:
regs->int_code = ilen << 16 | 0x0006;
do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL);
break;
case EMU_ADDRESSING:
regs->int_code = ilen << 16 | 0x0005;
do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL);
break;
}
}
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) ||
((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) &&
!is_compat_task())) {
regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
return true;
}
if (probe_is_insn_relative_long(auprobe->insn)) {
handle_insn_ril(auprobe, regs);
return true;
}
return false;
}
...@@ -19,14 +19,20 @@ ...@@ -19,14 +19,20 @@
.type __kernel_clock_getres,@function .type __kernel_clock_getres,@function
__kernel_clock_getres: __kernel_clock_getres:
.cfi_startproc .cfi_startproc
basr %r1,0
la %r1,4f-.(%r1)
chi %r2,__CLOCK_REALTIME chi %r2,__CLOCK_REALTIME
je 0f je 0f
chi %r2,__CLOCK_MONOTONIC chi %r2,__CLOCK_MONOTONIC
je 0f
la %r1,5f-4f(%r1)
chi %r2,__CLOCK_REALTIME_COARSE
je 0f
chi %r2,__CLOCK_MONOTONIC_COARSE
jne 3f jne 3f
0: ltr %r3,%r3 0: ltr %r3,%r3
jz 2f /* res == NULL */ jz 2f /* res == NULL */
basr %r1,0 1: l %r0,0(%r1)
1: l %r0,4f-1b(%r1)
xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */ xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */
st %r0,4(%r3) /* store tp->tv_usec */ st %r0,4(%r3) /* store tp->tv_usec */
2: lhi %r2,0 2: lhi %r2,0
...@@ -35,5 +41,6 @@ __kernel_clock_getres: ...@@ -35,5 +41,6 @@ __kernel_clock_getres:
svc 0 svc 0
br %r14 br %r14
4: .long __CLOCK_REALTIME_RES 4: .long __CLOCK_REALTIME_RES
5: .long __CLOCK_COARSE_RES
.cfi_endproc .cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres .size __kernel_clock_getres,.-__kernel_clock_getres
...@@ -21,8 +21,12 @@ __kernel_clock_gettime: ...@@ -21,8 +21,12 @@ __kernel_clock_gettime:
.cfi_startproc .cfi_startproc
basr %r5,0 basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */ 0: al %r5,21f-0b(%r5) /* get &_vdso_data */
chi %r2,__CLOCK_REALTIME_COARSE
je 10f
chi %r2,__CLOCK_REALTIME chi %r2,__CLOCK_REALTIME
je 11f je 11f
chi %r2,__CLOCK_MONOTONIC_COARSE
je 9f
chi %r2,__CLOCK_MONOTONIC chi %r2,__CLOCK_MONOTONIC
jne 19f jne 19f
...@@ -30,8 +34,8 @@ __kernel_clock_gettime: ...@@ -30,8 +34,8 @@ __kernel_clock_gettime:
1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */ tml %r4,0x0001 /* pending update ? loop */
jnz 1b jnz 1b
stck 24(%r15) /* Store TOD clock */ stcke 24(%r15) /* Store TOD clock */
lm %r0,%r1,24(%r15) lm %r0,%r1,25(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5) sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,2f brc 3,2f
...@@ -68,12 +72,32 @@ __kernel_clock_gettime: ...@@ -68,12 +72,32 @@ __kernel_clock_gettime:
lhi %r2,0 lhi %r2,0
br %r14 br %r14
/* CLOCK_MONOTONIC_COARSE */
9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 9b
l %r2,__VDSO_WTOM_CRS_SEC+4(%r5)
l %r1,__VDSO_WTOM_CRS_NSEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 9b
j 8b
/* CLOCK_REALTIME_COARSE */
10: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 10b
l %r2,__VDSO_XTIME_CRS_SEC+4(%r5)
l %r1,__VDSO_XTIME_CRS_NSEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 10b
j 17f
/* CLOCK_REALTIME */ /* CLOCK_REALTIME */
11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */ tml %r4,0x0001 /* pending update ? loop */
jnz 11b jnz 11b
stck 24(%r15) /* Store TOD clock */ stcke 24(%r15) /* Store TOD clock */
lm %r0,%r1,24(%r15) lm %r0,%r1,25(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5) sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f brc 3,12f
......
...@@ -29,8 +29,8 @@ __kernel_gettimeofday: ...@@ -29,8 +29,8 @@ __kernel_gettimeofday:
l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */ tml %r4,0x0001 /* pending update ? loop */
jnz 1b jnz 1b
stck 24(%r15) /* Store TOD clock */ stcke 24(%r15) /* Store TOD clock */
lm %r0,%r1,24(%r15) lm %r0,%r1,25(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5) sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f brc 3,3f
......
...@@ -19,6 +19,12 @@ ...@@ -19,6 +19,12 @@
.type __kernel_clock_getres,@function .type __kernel_clock_getres,@function
__kernel_clock_getres: __kernel_clock_getres:
.cfi_startproc .cfi_startproc
larl %r1,4f
cghi %r2,__CLOCK_REALTIME_COARSE
je 0f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 0f
larl %r1,3f
cghi %r2,__CLOCK_REALTIME cghi %r2,__CLOCK_REALTIME
je 0f je 0f
cghi %r2,__CLOCK_MONOTONIC cghi %r2,__CLOCK_MONOTONIC
...@@ -32,7 +38,6 @@ __kernel_clock_getres: ...@@ -32,7 +38,6 @@ __kernel_clock_getres:
jz 2f jz 2f
0: ltgr %r3,%r3 0: ltgr %r3,%r3
jz 1f /* res == NULL */ jz 1f /* res == NULL */
larl %r1,3f
lg %r0,0(%r1) lg %r0,0(%r1)
xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
stg %r0,8(%r3) /* store tp->tv_usec */ stg %r0,8(%r3) /* store tp->tv_usec */
...@@ -42,5 +47,6 @@ __kernel_clock_getres: ...@@ -42,5 +47,6 @@ __kernel_clock_getres:
svc 0 svc 0
br %r14 br %r14
3: .quad __CLOCK_REALTIME_RES 3: .quad __CLOCK_REALTIME_RES
4: .quad __CLOCK_COARSE_RES
.cfi_endproc .cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres .size __kernel_clock_getres,.-__kernel_clock_getres
...@@ -20,12 +20,16 @@ ...@@ -20,12 +20,16 @@
__kernel_clock_gettime: __kernel_clock_gettime:
.cfi_startproc .cfi_startproc
larl %r5,_vdso_data larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME_COARSE
je 4f
cghi %r2,__CLOCK_REALTIME cghi %r2,__CLOCK_REALTIME
je 5f je 5f
cghi %r2,__CLOCK_THREAD_CPUTIME_ID cghi %r2,__CLOCK_THREAD_CPUTIME_ID
je 9f je 9f
cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
je 9f je 9f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 3f
cghi %r2,__CLOCK_MONOTONIC cghi %r2,__CLOCK_MONOTONIC
jne 12f jne 12f
...@@ -33,10 +37,10 @@ __kernel_clock_gettime: ...@@ -33,10 +37,10 @@ __kernel_clock_gettime:
0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */ tmll %r4,0x0001 /* pending update ? loop */
jnz 0b jnz 0b
stck 48(%r15) /* Store TOD clock */ stcke 48(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r0,__VDSO_WTOM_SEC(%r5) lg %r0,__VDSO_WTOM_SEC(%r5)
lg %r1,48(%r15) lg %r1,49(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_WTOM_NSEC(%r5) alg %r1,__VDSO_WTOM_NSEC(%r5)
...@@ -54,13 +58,33 @@ __kernel_clock_gettime: ...@@ -54,13 +58,33 @@ __kernel_clock_gettime:
lghi %r2,0 lghi %r2,0
br %r14 br %r14
/* CLOCK_MONOTONIC_COARSE */
3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 3b
lg %r0,__VDSO_WTOM_CRS_SEC(%r5)
lg %r1,__VDSO_WTOM_CRS_NSEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 3b
j 2b
/* CLOCK_REALTIME_COARSE */
4: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 4b
lg %r0,__VDSO_XTIME_CRS_SEC(%r5)
lg %r1,__VDSO_XTIME_CRS_NSEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 4b
j 7f
/* CLOCK_REALTIME */ /* CLOCK_REALTIME */
5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */ tmll %r4,0x0001 /* pending update ? loop */
jnz 5b jnz 5b
stck 48(%r15) /* Store TOD clock */ stcke 48(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r1,48(%r15) lg %r1,49(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
......
...@@ -28,8 +28,8 @@ __kernel_gettimeofday: ...@@ -28,8 +28,8 @@ __kernel_gettimeofday:
lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */ tmll %r4,0x0001 /* pending update ? loop */
jnz 0b jnz 0b
stck 48(%r15) /* Store TOD clock */ stcke 48(%r15) /* Store TOD clock */
lg %r1,48(%r15) lg %r1,49(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
......
...@@ -6,27 +6,18 @@ ...@@ -6,27 +6,18 @@
*/ */
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <asm/irq_regs.h>
#include <asm/cputime.h> #include <asm/cputime.h>
#include <asm/vtimer.h> #include <asm/vtimer.h>
#include <asm/vtime.h> #include <asm/vtime.h>
#include <asm/irq.h>
#include "entry.h"
static void virt_timer_expire(void); static void virt_timer_expire(void);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
static LIST_HEAD(virt_timer_list); static LIST_HEAD(virt_timer_list);
static DEFINE_SPINLOCK(virt_timer_lock); static DEFINE_SPINLOCK(virt_timer_lock);
static atomic64_t virt_timer_current; static atomic64_t virt_timer_current;
...@@ -152,49 +143,6 @@ void vtime_account_system(struct task_struct *tsk) ...@@ -152,49 +143,6 @@ void vtime_account_system(struct task_struct *tsk)
__attribute__((alias("vtime_account_irq_enter"))); __attribute__((alias("vtime_account_irq_enter")));
EXPORT_SYMBOL_GPL(vtime_account_system); EXPORT_SYMBOL_GPL(vtime_account_system);
void __kprobes vtime_stop_cpu(void)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
unsigned long long idle_time;
unsigned long psw_mask;
trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
idle->nohz_delay = 0;
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
/* Account time spent with enabled wait psw loaded as idle time. */
idle->sequence++;
smp_wmb();
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
idle->idle_count++;
account_idle_time(idle_time);
smp_wmb();
idle->sequence++;
}
cputime64_t s390_get_idle_time(int cpu)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
unsigned long long now, idle_enter, idle_exit;
unsigned int sequence;
do {
now = get_tod_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
/* /*
* Sorted add to a list. List is linear searched until first bigger * Sorted add to a list. List is linear searched until first bigger
* element is found. * element is found.
...@@ -372,31 +320,8 @@ EXPORT_SYMBOL(del_virt_timer); ...@@ -372,31 +320,8 @@ EXPORT_SYMBOL(del_virt_timer);
/* /*
* Start the virtual CPU timer on the current CPU. * Start the virtual CPU timer on the current CPU.
*/ */
void init_cpu_vtimer(void) void vtime_init(void)
{ {
/* set initial cpu timer */ /* set initial cpu timer */
set_vtimer(VTIMER_MAX_SLICE); set_vtimer(VTIMER_MAX_SLICE);
} }
static int s390_nohz_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
struct s390_idle_data *idle;
long cpu = (long) hcpu;
idle = &per_cpu(s390_idle, cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DYING:
idle->nohz_delay = 0;
default:
break;
}
return NOTIFY_OK;
}
void __init vtime_init(void)
{
/* Enable cpu timer interrupts on the boot cpu. */
init_cpu_vtimer();
cpu_notifier(s390_nohz_notify, 0);
}
...@@ -6,3 +6,5 @@ lib-y += delay.o string.o uaccess.o find.o ...@@ -6,3 +6,5 @@ lib-y += delay.o string.o uaccess.o find.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
obj-$(CONFIG_64BIT) += mem64.o obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_SMP) += spinlock.o lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
...@@ -43,7 +43,7 @@ static void __udelay_disabled(unsigned long long usecs) ...@@ -43,7 +43,7 @@ static void __udelay_disabled(unsigned long long usecs)
lockdep_off(); lockdep_off();
do { do {
set_clock_comparator(end); set_clock_comparator(end);
vtime_stop_cpu(); enabled_wait();
} while (get_tod_clock_fast() < end); } while (get_tod_clock_fast() < end);
lockdep_on(); lockdep_on();
__ctl_load(cr0, 0, 0); __ctl_load(cr0, 0, 0);
...@@ -62,7 +62,7 @@ static void __udelay_enabled(unsigned long long usecs) ...@@ -62,7 +62,7 @@ static void __udelay_enabled(unsigned long long usecs)
clock_saved = local_tick_disable(); clock_saved = local_tick_disable();
set_clock_comparator(end); set_clock_comparator(end);
} }
vtime_stop_cpu(); enabled_wait();
if (clock_saved) if (clock_saved)
local_tick_enable(clock_saved); local_tick_enable(clock_saved);
} while (get_tod_clock_fast() < end); } while (get_tod_clock_fast() < end);
......
/*
* Common helper functions for kprobes and uprobes
*
* Copyright IBM Corp. 2014
*/
#include <linux/kprobes.h>
#include <asm/dis.h>
int probe_is_prohibited_opcode(u16 *insn)
{
if (!is_known_insn((unsigned char *)insn))
return -EINVAL;
switch (insn[0] >> 8) {
case 0x0c: /* bassm */
case 0x0b: /* bsm */
case 0x83: /* diag */
case 0x44: /* ex */
case 0xac: /* stnsm */
case 0xad: /* stosm */
return -EINVAL;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x00: /* exrl */
return -EINVAL;
}
}
switch (insn[0]) {
case 0x0101: /* pr */
case 0xb25a: /* bsa */
case 0xb240: /* bakr */
case 0xb258: /* bsg */
case 0xb218: /* pc */
case 0xb228: /* pt */
case 0xb98d: /* epsw */
case 0xe560: /* tbegin */
case 0xe561: /* tbeginc */
case 0xb2f8: /* tend */
return -EINVAL;
}
return 0;
}
int probe_get_fixup_type(u16 *insn)
{
/* default fixup method */
int fixup = FIXUP_PSW_NORMAL;
switch (insn[0] >> 8) {
case 0x05: /* balr */
case 0x0d: /* basr */
fixup = FIXUP_RETURN_REGISTER;
/* if r2 = 0, no branch will be taken */
if ((insn[0] & 0x0f) == 0)
fixup |= FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x06: /* bctr */
case 0x07: /* bcr */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x45: /* bal */
case 0x4d: /* bas */
fixup = FIXUP_RETURN_REGISTER;
break;
case 0x47: /* bc */
case 0x46: /* bct */
case 0x86: /* bxh */
case 0x87: /* bxle */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x82: /* lpsw */
fixup = FIXUP_NOT_REQUIRED;
break;
case 0xb2: /* lpswe */
if ((insn[0] & 0xff) == 0xb2)
fixup = FIXUP_NOT_REQUIRED;
break;
case 0xa7: /* bras */
if ((insn[0] & 0x0f) == 0x05)
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xc0:
if ((insn[0] & 0x0f) == 0x05) /* brasl */
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xeb:
switch (insn[2] & 0xff) {
case 0x44: /* bxhg */
case 0x45: /* bxleg */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
case 0xe3: /* bctg */
if ((insn[2] & 0xff) == 0x46)
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0xec:
switch (insn[2] & 0xff) {
case 0xe5: /* clgrb */
case 0xe6: /* cgrb */
case 0xf6: /* crb */
case 0xf7: /* clrb */
case 0xfc: /* cgib */
case 0xfd: /* cglib */
case 0xfe: /* cib */
case 0xff: /* clib */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
}
return fixup;
}
int probe_is_insn_relative_long(u16 *insn)
{
/* Check if we have a RIL-b or RIL-c format instruction which
* we need to modify in order to avoid instruction emulation. */
switch (insn[0] >> 8) {
case 0xc0:
if ((insn[0] & 0x0f) == 0x00) /* larl */
return true;
break;
case 0xc4:
switch (insn[0] & 0x0f) {
case 0x02: /* llhrl */
case 0x04: /* lghrl */
case 0x05: /* lhrl */
case 0x06: /* llghrl */
case 0x07: /* sthrl */
case 0x08: /* lgrl */
case 0x0b: /* stgrl */
case 0x0c: /* lgfrl */
case 0x0d: /* lrl */
case 0x0e: /* llgfrl */
case 0x0f: /* strl */
return true;
}
break;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x02: /* pfdrl */
case 0x04: /* cghrl */
case 0x05: /* chrl */
case 0x06: /* clghrl */
case 0x07: /* clhrl */
case 0x08: /* cgrl */
case 0x0a: /* clgrl */
case 0x0c: /* cgfrl */
case 0x0d: /* crl */
case 0x0e: /* clgfrl */
case 0x0f: /* clrl */
return true;
}
break;
}
return false;
}
...@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) ...@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
} }
EXPORT_SYMBOL(arch_spin_lock_wait_flags); EXPORT_SYMBOL(arch_spin_lock_wait_flags);
void arch_spin_relax(arch_spinlock_t *lp)
{
unsigned int cpu = lp->lock;
if (cpu != 0) {
if (MACHINE_IS_VM || MACHINE_IS_KVM ||
!smp_vcpu_scheduled(~cpu))
smp_yield_cpu(~cpu);
}
}
EXPORT_SYMBOL(arch_spin_relax);
int arch_spin_trylock_retry(arch_spinlock_t *lp) int arch_spin_trylock_retry(arch_spinlock_t *lp)
{ {
int count; int count;
...@@ -122,15 +111,21 @@ EXPORT_SYMBOL(arch_spin_trylock_retry); ...@@ -122,15 +111,21 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
void _raw_read_lock_wait(arch_rwlock_t *rw) void _raw_read_lock_wait(arch_rwlock_t *rw)
{ {
unsigned int old; unsigned int owner, old;
int count = spin_retry; int count = spin_retry;
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
#endif
owner = 0;
while (1) { while (1) {
if (count-- <= 0) { if (count-- <= 0) {
smp_yield(); if (owner && !smp_vcpu_scheduled(~owner))
smp_yield_cpu(~owner);
count = spin_retry; count = spin_retry;
} }
old = ACCESS_ONCE(rw->lock); old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner);
if ((int) old < 0) if ((int) old < 0)
continue; continue;
if (_raw_compare_and_swap(&rw->lock, old, old + 1)) if (_raw_compare_and_swap(&rw->lock, old, old + 1))
...@@ -139,28 +134,6 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) ...@@ -139,28 +134,6 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
} }
EXPORT_SYMBOL(_raw_read_lock_wait); EXPORT_SYMBOL(_raw_read_lock_wait);
void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
int count = spin_retry;
local_irq_restore(flags);
while (1) {
if (count-- <= 0) {
smp_yield();
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
if ((int) old < 0)
continue;
local_irq_disable();
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return;
local_irq_restore(flags);
}
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
int _raw_read_trylock_retry(arch_rwlock_t *rw) int _raw_read_trylock_retry(arch_rwlock_t *rw)
{ {
unsigned int old; unsigned int old;
...@@ -177,46 +150,62 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) ...@@ -177,46 +150,62 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
} }
EXPORT_SYMBOL(_raw_read_trylock_retry); EXPORT_SYMBOL(_raw_read_trylock_retry);
void _raw_write_lock_wait(arch_rwlock_t *rw) #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
{ {
unsigned int old; unsigned int owner, old;
int count = spin_retry; int count = spin_retry;
owner = 0;
while (1) { while (1) {
if (count-- <= 0) { if (count-- <= 0) {
smp_yield(); if (owner && !smp_vcpu_scheduled(~owner))
smp_yield_cpu(~owner);
count = spin_retry; count = spin_retry;
} }
old = ACCESS_ONCE(rw->lock); old = ACCESS_ONCE(rw->lock);
if (old) owner = ACCESS_ONCE(rw->owner);
continue; smp_rmb();
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) if ((int) old >= 0) {
return; prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
old = prev;
}
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
break;
} }
} }
EXPORT_SYMBOL(_raw_write_lock_wait); EXPORT_SYMBOL(_raw_write_lock_wait);
void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
void _raw_write_lock_wait(arch_rwlock_t *rw)
{ {
unsigned int old; unsigned int owner, old, prev;
int count = spin_retry; int count = spin_retry;
local_irq_restore(flags); prev = 0x80000000;
owner = 0;
while (1) { while (1) {
if (count-- <= 0) { if (count-- <= 0) {
smp_yield(); if (owner && !smp_vcpu_scheduled(~owner))
smp_yield_cpu(~owner);
count = spin_retry; count = spin_retry;
} }
old = ACCESS_ONCE(rw->lock); old = ACCESS_ONCE(rw->lock);
if (old) owner = ACCESS_ONCE(rw->owner);
continue; if ((int) old >= 0 &&
local_irq_disable(); _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) prev = old;
return; else
local_irq_restore(flags); smp_rmb();
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
break;
} }
} }
EXPORT_SYMBOL(_raw_write_lock_wait_flags); EXPORT_SYMBOL(_raw_write_lock_wait);
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
int _raw_write_trylock_retry(arch_rwlock_t *rw) int _raw_write_trylock_retry(arch_rwlock_t *rw)
{ {
...@@ -233,3 +222,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) ...@@ -233,3 +222,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
return 0; return 0;
} }
EXPORT_SYMBOL(_raw_write_trylock_retry); EXPORT_SYMBOL(_raw_write_trylock_retry);
void arch_lock_relax(unsigned int cpu)
{
if (!cpu)
return;
if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
return;
smp_yield_cpu(~cpu);
}
EXPORT_SYMBOL(arch_lock_relax);
...@@ -54,7 +54,6 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level) ...@@ -54,7 +54,6 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
return; return;
} }
seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW "); seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
...@@ -129,7 +128,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, ...@@ -129,7 +128,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO) #define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
#else #else
#define _PMD_PROT_MASK 0 #define _PMD_PROT_MASK 0
#endif #endif
...@@ -157,7 +156,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, ...@@ -157,7 +156,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define _PUD_PROT_MASK (_REGION3_ENTRY_RO | _REGION3_ENTRY_CO) #define _PUD_PROT_MASK _REGION3_ENTRY_RO
#else #else
#define _PUD_PROT_MASK 0 #define _PUD_PROT_MASK 0
#endif #endif
......
...@@ -88,7 +88,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -88,7 +88,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= pte_page(pte)[1].index; pmd_val(pmd) |= pte_page(pte)[1].index;
} else } else
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO; pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
*(pmd_t *) ptep = pmd; *(pmd_t *) ptep = pmd;
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/facility.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -103,27 +104,50 @@ int set_memory_x(unsigned long addr, int numpages) ...@@ -103,27 +104,50 @@ int set_memory_x(unsigned long addr, int numpages)
} }
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
static void ipte_range(pte_t *pte, unsigned long address, int nr)
{
int i;
if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) {
__ptep_ipte_range(address, nr - 1, pte);
return;
}
for (i = 0; i < nr; i++) {
__ptep_ipte(address, pte);
address += PAGE_SIZE;
pte++;
}
}
void kernel_map_pages(struct page *page, int numpages, int enable) void kernel_map_pages(struct page *page, int numpages, int enable)
{ {
unsigned long address; unsigned long address;
int nr, i, j;
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
int i;
for (i = 0; i < numpages; i++) { for (i = 0; i < numpages;) {
address = page_to_phys(page + i); address = page_to_phys(page + i);
pgd = pgd_offset_k(address); pgd = pgd_offset_k(address);
pud = pud_offset(pgd, address); pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address); pte = pte_offset_kernel(pmd, address);
if (!enable) { nr = (unsigned long)pte >> ilog2(sizeof(long));
__ptep_ipte(address, pte); nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
pte_val(*pte) = _PAGE_INVALID; nr = min(numpages - i, nr);
continue; if (enable) {
for (j = 0; j < nr; j++) {
pte_val(*pte) = __pa(address);
address += PAGE_SIZE;
pte++;
}
} else {
ipte_range(pte, address, nr);
} }
pte_val(*pte) = __pa(address); i += nr;
} }
} }
......
...@@ -236,8 +236,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) ...@@ -236,8 +236,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!new_page) if (!new_page)
goto out; goto out;
pmd_val(*pm_dir) = __pa(new_page) | pmd_val(*pm_dir) = __pa(new_page) |
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
_SEGMENT_ENTRY_CO;
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
...@@ -253,9 +252,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) ...@@ -253,9 +252,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pt_dir = pte_offset_kernel(pm_dir, address); pt_dir = pte_offset_kernel(pm_dir, address);
if (pte_none(*pt_dir)) { if (pte_none(*pt_dir)) {
unsigned long new_page; void *new_page;
new_page =__pa(vmem_alloc_pages(0)); new_page = vmemmap_alloc_block(PAGE_SIZE, node);
if (!new_page) if (!new_page)
goto out; goto out;
pte_val(*pt_dir) = pte_val(*pt_dir) =
...@@ -263,7 +262,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) ...@@ -263,7 +262,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
} }
address += PAGE_SIZE; address += PAGE_SIZE;
} }
memset((void *)start, 0, end - start);
ret = 0; ret = 0;
out: out:
return ret; return ret;
......
此差异已折叠。
...@@ -1432,6 +1432,29 @@ static ssize_t dasd_reservation_state_store(struct device *dev, ...@@ -1432,6 +1432,29 @@ static ssize_t dasd_reservation_state_store(struct device *dev,
static DEVICE_ATTR(last_known_reservation_state, 0644, static DEVICE_ATTR(last_known_reservation_state, 0644,
dasd_reservation_state_show, dasd_reservation_state_store); dasd_reservation_state_show, dasd_reservation_state_store);
static ssize_t dasd_pm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
u8 opm, nppm, cablepm, cuirpm, hpfpm;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return sprintf(buf, "0\n");
opm = device->path_data.opm;
nppm = device->path_data.npm;
cablepm = device->path_data.cablepm;
cuirpm = device->path_data.cuirpm;
hpfpm = device->path_data.hpfpm;
dasd_put_device(device);
return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm,
cablepm, cuirpm, hpfpm);
}
static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
static struct attribute * dasd_attrs[] = { static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr, &dev_attr_readonly.attr,
&dev_attr_discipline.attr, &dev_attr_discipline.attr,
...@@ -1450,6 +1473,7 @@ static struct attribute * dasd_attrs[] = { ...@@ -1450,6 +1473,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_reservation_policy.attr, &dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr, &dev_attr_last_known_reservation_state.attr,
&dev_attr_safe_offline.attr, &dev_attr_safe_offline.attr,
&dev_attr_path_masks.attr,
NULL, NULL,
}; };
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -33,3 +33,6 @@ obj-$(CONFIG_S390_VMUR) += vmur.o ...@@ -33,3 +33,6 @@ obj-$(CONFIG_S390_VMUR) += vmur.o
zcore_mod-objs := sclp_sdias.o zcore.o zcore_mod-objs := sclp_sdias.o zcore.o
obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o
hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
obj-$(CONFIG_HMC_DRV) += hmcdrv.o
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册