提交 043d0708 编写于 作者: M Martin Schwidefsky

[S390] Remove data execution protection

The noexec support on s390 does not rely on a bit in the page table
entry but utilizes the secondary space mode to distinguish between
memory accesses for instructions vs. data. The noexec code relies
on the assumption that the cpu will always use the secondary space
page table for data accesses while it is running in the secondary
space mode. Up to the z9-109 class machines this has been the case.
Unfortunately this is not true anymore with z10 and later machines.
The load-relative-long instructions lrl, lgrl and lgfrl access the
memory operand using the same addressing-space mode that has been
used to fetch the instruction.
This breaks the noexec mode for all user space binaries compiled
with march=z10 or later. The only option is to remove the current
noexec support.
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 9bf05098
...@@ -230,17 +230,6 @@ config SYSVIPC_COMPAT ...@@ -230,17 +230,6 @@ config SYSVIPC_COMPAT
config AUDIT_ARCH config AUDIT_ARCH
def_bool y def_bool y
config S390_EXEC_PROTECT
def_bool y
prompt "Data execute protection"
help
This option allows to enable a buffer overflow protection for user
space programs and it also selects the addressing mode option above.
The kernel parameter noexec=on will enable this feature and also
switch the addressing modes, default is disabled. Enabling this (via
kernel parameter) on machines earlier than IBM System z9 this will
reduce system performance.
comment "Code generation options" comment "Code generation options"
choice choice
......
...@@ -196,18 +196,6 @@ do { \ ...@@ -196,18 +196,6 @@ do { \
} while (0) } while (0)
#endif /* __s390x__ */ #endif /* __s390x__ */
/*
* An executable for which elf_read_implies_exec() returns TRUE will
* have the READ_IMPLIES_EXEC personality flag set automatically.
*/
#define elf_read_implies_exec(ex, executable_stack) \
({ \
if (current->mm->context.noexec && \
executable_stack != EXSTACK_DISABLE_X) \
disable_noexec(current->mm, current); \
current->mm->context.noexec == 0; \
})
#define STACK_RND_MASK 0x7ffUL #define STACK_RND_MASK 0x7ffUL
#define ARCH_DLINFO \ #define ARCH_DLINFO \
......
...@@ -111,21 +111,10 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm, ...@@ -111,21 +111,10 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
{ {
pmd_t *pmdp = (pmd_t *) ptep; pmd_t *pmdp = (pmd_t *) ptep;
if (!MACHINE_HAS_IDTE) { if (MACHINE_HAS_IDTE)
__pmd_csp(pmdp);
if (mm->context.noexec) {
pmdp = get_shadow_table(pmdp);
__pmd_csp(pmdp);
}
return;
}
__pmd_idte(address, pmdp);
if (mm->context.noexec) {
pmdp = get_shadow_table(pmdp);
__pmd_idte(address, pmdp); __pmd_idte(address, pmdp);
} else
return; __pmd_csp(pmdp);
} }
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
......
...@@ -124,7 +124,7 @@ struct _lowcore { ...@@ -124,7 +124,7 @@ struct _lowcore {
/* Address space pointer. */ /* Address space pointer. */
__u32 kernel_asce; /* 0x02ac */ __u32 kernel_asce; /* 0x02ac */
__u32 user_asce; /* 0x02b0 */ __u32 user_asce; /* 0x02b0 */
__u32 user_exec_asce; /* 0x02b4 */ __u8 pad_0x02b4[0x02b8-0x02b4]; /* 0x02b4 */
/* SMP info area */ /* SMP info area */
__u32 cpu_nr; /* 0x02b8 */ __u32 cpu_nr; /* 0x02b8 */
...@@ -255,7 +255,7 @@ struct _lowcore { ...@@ -255,7 +255,7 @@ struct _lowcore {
/* Address space pointer. */ /* Address space pointer. */
__u64 kernel_asce; /* 0x0310 */ __u64 kernel_asce; /* 0x0310 */
__u64 user_asce; /* 0x0318 */ __u64 user_asce; /* 0x0318 */
__u64 user_exec_asce; /* 0x0320 */ __u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */
/* SMP info area */ /* SMP info area */
__u32 cpu_nr; /* 0x0328 */ __u32 cpu_nr; /* 0x0328 */
......
...@@ -5,19 +5,16 @@ typedef struct { ...@@ -5,19 +5,16 @@ typedef struct {
atomic_t attach_count; atomic_t attach_count;
unsigned int flush_mm; unsigned int flush_mm;
spinlock_t list_lock; spinlock_t list_lock;
struct list_head crst_list;
struct list_head pgtable_list; struct list_head pgtable_list;
unsigned long asce_bits; unsigned long asce_bits;
unsigned long asce_limit; unsigned long asce_limit;
unsigned long vdso_base; unsigned long vdso_base;
int noexec;
int has_pgste; /* The mmu context has extended page tables */ int has_pgste; /* The mmu context has extended page tables */
int alloc_pgste; /* cloned contexts will have extended page tables */ int alloc_pgste; /* cloned contexts will have extended page tables */
} mm_context_t; } mm_context_t;
#define INIT_MM_CONTEXT(name) \ #define INIT_MM_CONTEXT(name) \
.context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
.context.crst_list = LIST_HEAD_INIT(name.context.crst_list), \
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),
#endif #endif
...@@ -35,11 +35,9 @@ static inline int init_new_context(struct task_struct *tsk, ...@@ -35,11 +35,9 @@ static inline int init_new_context(struct task_struct *tsk,
* and if has_pgste is set, it will create extended page * and if has_pgste is set, it will create extended page
* tables. * tables.
*/ */
mm->context.noexec = 0;
mm->context.has_pgste = 1; mm->context.has_pgste = 1;
mm->context.alloc_pgste = 1; mm->context.alloc_pgste = 1;
} else { } else {
mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
mm->context.has_pgste = 0; mm->context.has_pgste = 0;
mm->context.alloc_pgste = 0; mm->context.alloc_pgste = 0;
} }
...@@ -63,10 +61,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) ...@@ -63,10 +61,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
if (user_mode != HOME_SPACE_MODE) { if (user_mode != HOME_SPACE_MODE) {
/* Load primary space page table origin. */ /* Load primary space page table origin. */
pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
asm volatile(LCTL_OPCODE" 1,1,%0\n" asm volatile(LCTL_OPCODE" 1,1,%0\n"
: : "m" (S390_lowcore.user_exec_asce) ); : : "m" (S390_lowcore.user_asce) );
} else } else
/* Load home space page table origin. */ /* Load home space page table origin. */
asm volatile(LCTL_OPCODE" 13,13,%0" asm volatile(LCTL_OPCODE" 13,13,%0"
......
...@@ -19,14 +19,13 @@ ...@@ -19,14 +19,13 @@
#define check_pgt_cache() do {} while (0) #define check_pgt_cache() do {} while (0)
unsigned long *crst_table_alloc(struct mm_struct *, int); unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *); void crst_table_free(struct mm_struct *, unsigned long *);
void crst_table_free_rcu(struct mm_struct *, unsigned long *); void crst_table_free_rcu(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *); unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *); void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mm_struct *, unsigned long *); void page_table_free_rcu(struct mm_struct *, unsigned long *);
void disable_noexec(struct mm_struct *, struct task_struct *);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n) static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{ {
...@@ -50,9 +49,6 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n) ...@@ -50,9 +49,6 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
static inline void crst_table_init(unsigned long *crst, unsigned long entry) static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{ {
clear_table(crst, entry, sizeof(unsigned long)*2048); clear_table(crst, entry, sizeof(unsigned long)*2048);
crst = get_shadow_table(crst);
if (crst)
clear_table(crst, entry, sizeof(unsigned long)*2048);
} }
#ifndef __s390x__ #ifndef __s390x__
...@@ -90,7 +86,7 @@ void crst_table_downgrade(struct mm_struct *, unsigned long limit); ...@@ -90,7 +86,7 @@ void crst_table_downgrade(struct mm_struct *, unsigned long limit);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
unsigned long *table = crst_table_alloc(mm, mm->context.noexec); unsigned long *table = crst_table_alloc(mm);
if (table) if (table)
crst_table_init(table, _REGION3_ENTRY_EMPTY); crst_table_init(table, _REGION3_ENTRY_EMPTY);
return (pud_t *) table; return (pud_t *) table;
...@@ -99,7 +95,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -99,7 +95,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{ {
unsigned long *table = crst_table_alloc(mm, mm->context.noexec); unsigned long *table = crst_table_alloc(mm);
if (table) if (table)
crst_table_init(table, _SEGMENT_ENTRY_EMPTY); crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
return (pmd_t *) table; return (pmd_t *) table;
...@@ -115,11 +111,6 @@ static inline void pgd_populate_kernel(struct mm_struct *mm, ...@@ -115,11 +111,6 @@ static inline void pgd_populate_kernel(struct mm_struct *mm,
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{ {
pgd_populate_kernel(mm, pgd, pud); pgd_populate_kernel(mm, pgd, pud);
if (mm->context.noexec) {
pgd = get_shadow_table(pgd);
pud = get_shadow_table(pud);
pgd_populate_kernel(mm, pgd, pud);
}
} }
static inline void pud_populate_kernel(struct mm_struct *mm, static inline void pud_populate_kernel(struct mm_struct *mm,
...@@ -131,11 +122,6 @@ static inline void pud_populate_kernel(struct mm_struct *mm, ...@@ -131,11 +122,6 @@ static inline void pud_populate_kernel(struct mm_struct *mm,
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{ {
pud_populate_kernel(mm, pud, pmd); pud_populate_kernel(mm, pud, pmd);
if (mm->context.noexec) {
pud = get_shadow_table(pud);
pmd = get_shadow_table(pmd);
pud_populate_kernel(mm, pud, pmd);
}
} }
#endif /* __s390x__ */ #endif /* __s390x__ */
...@@ -143,10 +129,8 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ...@@ -143,10 +129,8 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
spin_lock_init(&mm->context.list_lock); spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.crst_list);
INIT_LIST_HEAD(&mm->context.pgtable_list); INIT_LIST_HEAD(&mm->context.pgtable_list);
return (pgd_t *) return (pgd_t *) crst_table_alloc(mm);
crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
} }
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
...@@ -160,10 +144,6 @@ static inline void pmd_populate(struct mm_struct *mm, ...@@ -160,10 +144,6 @@ static inline void pmd_populate(struct mm_struct *mm,
pmd_t *pmd, pgtable_t pte) pmd_t *pmd, pgtable_t pte)
{ {
pmd_populate_kernel(mm, pmd, pte); pmd_populate_kernel(mm, pmd, pte);
if (mm->context.noexec) {
pmd = get_shadow_table(pmd);
pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
}
} }
#define pmd_pgtable(pmd) \ #define pmd_pgtable(pmd) \
......
...@@ -256,8 +256,6 @@ extern unsigned long VMALLOC_START; ...@@ -256,8 +256,6 @@ extern unsigned long VMALLOC_START;
#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
#define _PAGE_TYPE_RO 0x200 #define _PAGE_TYPE_RO 0x200
#define _PAGE_TYPE_RW 0x000 #define _PAGE_TYPE_RW 0x000
#define _PAGE_TYPE_EX_RO 0x202
#define _PAGE_TYPE_EX_RW 0x002
/* /*
* Only four types for huge pages, using the invalid bit and protection bit * Only four types for huge pages, using the invalid bit and protection bit
...@@ -287,8 +285,6 @@ extern unsigned long VMALLOC_START; ...@@ -287,8 +285,6 @@ extern unsigned long VMALLOC_START;
* _PAGE_TYPE_FILE 11?1 -> 11?1 * _PAGE_TYPE_FILE 11?1 -> 11?1
* _PAGE_TYPE_RO 0100 -> 1100 * _PAGE_TYPE_RO 0100 -> 1100
* _PAGE_TYPE_RW 0000 -> 1000 * _PAGE_TYPE_RW 0000 -> 1000
* _PAGE_TYPE_EX_RO 0110 -> 1110
* _PAGE_TYPE_EX_RW 0010 -> 1010
* *
* pte_none is true for bits combinations 1000, 1010, 1100, 1110 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
* pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
...@@ -387,55 +383,33 @@ extern unsigned long VMALLOC_START; ...@@ -387,55 +383,33 @@ extern unsigned long VMALLOC_START;
#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
#define PAGE_RO __pgprot(_PAGE_TYPE_RO) #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
#define PAGE_RW __pgprot(_PAGE_TYPE_RW) #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
#define PAGE_KERNEL PAGE_RW #define PAGE_KERNEL PAGE_RW
#define PAGE_COPY PAGE_RO #define PAGE_COPY PAGE_RO
/* /*
* Dependent on the EXEC_PROTECT option s390 can do execute protection. * On s390 the page table entry has an invalid bit and a read-only bit.
* Write permission always implies read permission. In theory with a * Read permission implies execute permission and write permission
* primary/secondary page table execute only can be implemented but * implies read permission.
* it would cost an additional bit in the pte to distinguish all the
* different pte types. To avoid that execute permission currently
* implies read permission as well.
*/ */
/*xwr*/ /*xwr*/
#define __P000 PAGE_NONE #define __P000 PAGE_NONE
#define __P001 PAGE_RO #define __P001 PAGE_RO
#define __P010 PAGE_RO #define __P010 PAGE_RO
#define __P011 PAGE_RO #define __P011 PAGE_RO
#define __P100 PAGE_EX_RO #define __P100 PAGE_RO
#define __P101 PAGE_EX_RO #define __P101 PAGE_RO
#define __P110 PAGE_EX_RO #define __P110 PAGE_RO
#define __P111 PAGE_EX_RO #define __P111 PAGE_RO
#define __S000 PAGE_NONE #define __S000 PAGE_NONE
#define __S001 PAGE_RO #define __S001 PAGE_RO
#define __S010 PAGE_RW #define __S010 PAGE_RW
#define __S011 PAGE_RW #define __S011 PAGE_RW
#define __S100 PAGE_EX_RO #define __S100 PAGE_RO
#define __S101 PAGE_EX_RO #define __S101 PAGE_RO
#define __S110 PAGE_EX_RW #define __S110 PAGE_RW
#define __S111 PAGE_EX_RW #define __S111 PAGE_RW
#ifndef __s390x__
# define PxD_SHADOW_SHIFT 1
#else /* __s390x__ */
# define PxD_SHADOW_SHIFT 2
#endif /* __s390x__ */
static inline void *get_shadow_table(void *table)
{
unsigned long addr, offset;
struct page *page;
addr = (unsigned long) table;
offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
page = virt_to_page((void *)(addr ^ offset));
return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
}
/* /*
* Certain architectures need to do special things when PTEs * Certain architectures need to do special things when PTEs
...@@ -446,14 +420,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -446,14 +420,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry) pte_t *ptep, pte_t entry)
{ {
*ptep = entry; *ptep = entry;
if (mm->context.noexec) {
if (!(pte_val(entry) & _PAGE_INVALID) &&
(pte_val(entry) & _PAGE_SWX))
pte_val(entry) |= _PAGE_RO;
else
pte_val(entry) = _PAGE_TYPE_EMPTY;
ptep[PTRS_PER_PTE] = entry;
}
} }
/* /*
...@@ -662,11 +628,7 @@ static inline void pgd_clear_kernel(pgd_t * pgd) ...@@ -662,11 +628,7 @@ static inline void pgd_clear_kernel(pgd_t * pgd)
static inline void pgd_clear(pgd_t * pgd) static inline void pgd_clear(pgd_t * pgd)
{ {
pgd_t *shadow = get_shadow_table(pgd);
pgd_clear_kernel(pgd); pgd_clear_kernel(pgd);
if (shadow)
pgd_clear_kernel(shadow);
} }
static inline void pud_clear_kernel(pud_t *pud) static inline void pud_clear_kernel(pud_t *pud)
...@@ -677,13 +639,8 @@ static inline void pud_clear_kernel(pud_t *pud) ...@@ -677,13 +639,8 @@ static inline void pud_clear_kernel(pud_t *pud)
static inline void pud_clear(pud_t *pud) static inline void pud_clear(pud_t *pud)
{ {
pud_t *shadow = get_shadow_table(pud);
pud_clear_kernel(pud); pud_clear_kernel(pud);
if (shadow)
pud_clear_kernel(shadow);
} }
#endif /* __s390x__ */ #endif /* __s390x__ */
static inline void pmd_clear_kernel(pmd_t * pmdp) static inline void pmd_clear_kernel(pmd_t * pmdp)
...@@ -693,18 +650,12 @@ static inline void pmd_clear_kernel(pmd_t * pmdp) ...@@ -693,18 +650,12 @@ static inline void pmd_clear_kernel(pmd_t * pmdp)
static inline void pmd_clear(pmd_t *pmd) static inline void pmd_clear(pmd_t *pmd)
{ {
pmd_t *shadow = get_shadow_table(pmd);
pmd_clear_kernel(pmd); pmd_clear_kernel(pmd);
if (shadow)
pmd_clear_kernel(shadow);
} }
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec)
pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
} }
/* /*
...@@ -903,10 +854,6 @@ static inline void ptep_invalidate(struct mm_struct *mm, ...@@ -903,10 +854,6 @@ static inline void ptep_invalidate(struct mm_struct *mm,
} }
__ptep_ipte(address, ptep); __ptep_ipte(address, ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec) {
__ptep_ipte(address, ptep + PTRS_PER_PTE);
pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
}
} }
/* /*
......
...@@ -80,16 +80,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) ...@@ -80,16 +80,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* on all cpus instead of doing a local flush if the mm * on all cpus instead of doing a local flush if the mm
* only ran on the local cpu. * only ran on the local cpu.
*/ */
if (MACHINE_HAS_IDTE) { if (MACHINE_HAS_IDTE)
if (mm->context.noexec)
__tlb_flush_idte((unsigned long)
get_shadow_table(mm->pgd) |
mm->context.asce_bits);
__tlb_flush_idte((unsigned long) mm->pgd | __tlb_flush_idte((unsigned long) mm->pgd |
mm->context.asce_bits); mm->context.asce_bits);
return; else
} __tlb_flush_full(mm);
__tlb_flush_full(mm);
} }
static inline void __tlb_flush_mm_cond(struct mm_struct * mm) static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
......
...@@ -128,9 +128,6 @@ int main(void) ...@@ -128,9 +128,6 @@ int main(void)
DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
......
...@@ -305,8 +305,7 @@ static int set_amode_and_uaccess(unsigned long user_amode, ...@@ -305,8 +305,7 @@ static int set_amode_and_uaccess(unsigned long user_amode,
*/ */
static int __init early_parse_switch_amode(char *p) static int __init early_parse_switch_amode(char *p)
{ {
if (user_mode != SECONDARY_SPACE_MODE) user_mode = PRIMARY_SPACE_MODE;
user_mode = PRIMARY_SPACE_MODE;
return 0; return 0;
} }
early_param("switch_amode", early_parse_switch_amode); early_param("switch_amode", early_parse_switch_amode);
...@@ -315,10 +314,6 @@ static int __init early_parse_user_mode(char *p) ...@@ -315,10 +314,6 @@ static int __init early_parse_user_mode(char *p)
{ {
if (p && strcmp(p, "primary") == 0) if (p && strcmp(p, "primary") == 0)
user_mode = PRIMARY_SPACE_MODE; user_mode = PRIMARY_SPACE_MODE;
#ifdef CONFIG_S390_EXEC_PROTECT
else if (p && strcmp(p, "secondary") == 0)
user_mode = SECONDARY_SPACE_MODE;
#endif
else if (!p || strcmp(p, "home") == 0) else if (!p || strcmp(p, "home") == 0)
user_mode = HOME_SPACE_MODE; user_mode = HOME_SPACE_MODE;
else else
...@@ -327,31 +322,9 @@ static int __init early_parse_user_mode(char *p) ...@@ -327,31 +322,9 @@ static int __init early_parse_user_mode(char *p)
} }
early_param("user_mode", early_parse_user_mode); early_param("user_mode", early_parse_user_mode);
#ifdef CONFIG_S390_EXEC_PROTECT
/*
* Enable execute protection?
*/
static int __init early_parse_noexec(char *p)
{
if (!strncmp(p, "off", 3))
return 0;
user_mode = SECONDARY_SPACE_MODE;
return 0;
}
early_param("noexec", early_parse_noexec);
#endif /* CONFIG_S390_EXEC_PROTECT */
static void setup_addressing_mode(void) static void setup_addressing_mode(void)
{ {
if (user_mode == SECONDARY_SPACE_MODE) { if (user_mode == PRIMARY_SPACE_MODE) {
if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
PSW32_ASC_SECONDARY))
pr_info("Execute protection active, "
"mvcos available\n");
else
pr_info("Execute protection active, "
"mvcos not available\n");
} else if (user_mode == PRIMARY_SPACE_MODE) {
if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
pr_info("Address spaces switched, " pr_info("Address spaces switched, "
"mvcos available\n"); "mvcos available\n");
......
...@@ -225,33 +225,6 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code, ...@@ -225,33 +225,6 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code,
force_sig_info(SIGBUS, &si, tsk); force_sig_info(SIGBUS, &si, tsk);
} }
#ifdef CONFIG_S390_EXEC_PROTECT
static noinline int signal_return(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code)
{
u16 instruction;
int rc;
rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
if (!rc && instruction == 0x0a77) {
clear_tsk_thread_flag(current, TIF_PER_TRAP);
if (is_compat_task())
sys32_sigreturn();
else
sys_sigreturn();
} else if (!rc && instruction == 0x0aad) {
clear_tsk_thread_flag(current, TIF_PER_TRAP);
if (is_compat_task())
sys32_rt_sigreturn();
else
sys_rt_sigreturn();
} else
do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
return 0;
}
#endif /* CONFIG_S390_EXEC_PROTECT */
static noinline void do_fault_error(struct pt_regs *regs, long int_code, static noinline void do_fault_error(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code, int fault) unsigned long trans_exc_code, int fault)
{ {
...@@ -259,13 +232,6 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code, ...@@ -259,13 +232,6 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
switch (fault) { switch (fault) {
case VM_FAULT_BADACCESS: case VM_FAULT_BADACCESS:
#ifdef CONFIG_S390_EXEC_PROTECT
if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
(trans_exc_code & 3) == 0) {
signal_return(regs, int_code, trans_exc_code);
break;
}
#endif /* CONFIG_S390_EXEC_PROTECT */
case VM_FAULT_BADMAP: case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */ /* Bad memory access. Check if it is kernel or user space. */
if (regs->psw.mask & PSW_MASK_PSTATE) { if (regs->psw.mask & PSW_MASK_PSTATE) {
...@@ -414,11 +380,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code, ...@@ -414,11 +380,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
int access, fault; int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE; access = VM_READ | VM_EXEC | VM_WRITE;
#ifdef CONFIG_S390_EXEC_PROTECT
if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
(trans_exc_code & 3) == 0)
access = VM_EXEC;
#endif
fault = do_exception(regs, access, trans_exc_code); fault = do_exception(regs, access, trans_exc_code);
if (unlikely(fault)) if (unlikely(fault))
do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
......
...@@ -13,7 +13,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -13,7 +13,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *pteptr, pte_t pteval) pte_t *pteptr, pte_t pteval)
{ {
pmd_t *pmdp = (pmd_t *) pteptr; pmd_t *pmdp = (pmd_t *) pteptr;
pte_t shadow_pteval = pteval;
unsigned long mask; unsigned long mask;
if (!MACHINE_HAS_HPAGE) { if (!MACHINE_HAS_HPAGE) {
...@@ -21,18 +20,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -21,18 +20,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mask = pte_val(pteval) & mask = pte_val(pteval) &
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
if (mm->context.noexec) {
pteptr += PTRS_PER_PTE;
pte_val(shadow_pteval) =
(_SEGMENT_ENTRY + __pa(pteptr)) | mask;
}
} }
pmd_val(*pmdp) = pte_val(pteval); pmd_val(*pmdp) = pte_val(pteval);
if (mm->context.noexec) {
pmdp = get_shadow_table(pmdp);
pmd_val(*pmdp) = pte_val(shadow_pteval);
}
} }
int arch_prepare_hugepage(struct page *page) int arch_prepare_hugepage(struct page *page)
......
...@@ -40,7 +40,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); ...@@ -40,7 +40,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
static void __page_table_free(struct mm_struct *mm, unsigned long *table); static void __page_table_free(struct mm_struct *mm, unsigned long *table);
static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
{ {
...@@ -67,7 +66,7 @@ static void rcu_table_freelist_callback(struct rcu_head *head) ...@@ -67,7 +66,7 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
while (batch->pgt_index > 0) while (batch->pgt_index > 0)
__page_table_free(batch->mm, batch->table[--batch->pgt_index]); __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
while (batch->crst_index < RCU_FREELIST_SIZE) while (batch->crst_index < RCU_FREELIST_SIZE)
__crst_table_free(batch->mm, batch->table[batch->crst_index++]); crst_table_free(batch->mm, batch->table[batch->crst_index++]);
free_page((unsigned long) batch); free_page((unsigned long) batch);
} }
...@@ -125,63 +124,33 @@ static int __init parse_vmalloc(char *arg) ...@@ -125,63 +124,33 @@ static int __init parse_vmalloc(char *arg)
} }
early_param("vmalloc", parse_vmalloc); early_param("vmalloc", parse_vmalloc);
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) unsigned long *crst_table_alloc(struct mm_struct *mm)
{ {
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!page) if (!page)
return NULL; return NULL;
page->index = 0;
if (noexec) {
struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!shadow) {
__free_pages(page, ALLOC_ORDER);
return NULL;
}
page->index = page_to_phys(shadow);
}
spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.crst_list);
spin_unlock_bh(&mm->context.list_lock);
return (unsigned long *) page_to_phys(page); return (unsigned long *) page_to_phys(page);
} }
static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
{
unsigned long *shadow = get_shadow_table(table);
if (shadow)
free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
}
void crst_table_free(struct mm_struct *mm, unsigned long *table) void crst_table_free(struct mm_struct *mm, unsigned long *table)
{ {
struct page *page = virt_to_page(table); free_pages((unsigned long) table, ALLOC_ORDER);
spin_lock_bh(&mm->context.list_lock);
list_del(&page->lru);
spin_unlock_bh(&mm->context.list_lock);
__crst_table_free(mm, table);
} }
void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
{ {
struct rcu_table_freelist *batch; struct rcu_table_freelist *batch;
struct page *page = virt_to_page(table);
spin_lock_bh(&mm->context.list_lock);
list_del(&page->lru);
spin_unlock_bh(&mm->context.list_lock);
if (atomic_read(&mm->mm_users) < 2 && if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
__crst_table_free(mm, table); crst_table_free(mm, table);
return; return;
} }
batch = rcu_table_freelist_get(mm); batch = rcu_table_freelist_get(mm);
if (!batch) { if (!batch) {
smp_call_function(smp_sync, NULL, 1); smp_call_function(smp_sync, NULL, 1);
__crst_table_free(mm, table); crst_table_free(mm, table);
return; return;
} }
batch->table[--batch->crst_index] = table; batch->table[--batch->crst_index] = table;
...@@ -197,7 +166,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) ...@@ -197,7 +166,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
BUG_ON(limit > (1UL << 53)); BUG_ON(limit > (1UL << 53));
repeat: repeat:
table = crst_table_alloc(mm, mm->context.noexec); table = crst_table_alloc(mm);
if (!table) if (!table)
return -ENOMEM; return -ENOMEM;
spin_lock_bh(&mm->page_table_lock); spin_lock_bh(&mm->page_table_lock);
...@@ -273,7 +242,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -273,7 +242,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
unsigned long *table; unsigned long *table;
unsigned long bits; unsigned long bits;
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; bits = (mm->context.has_pgste) ? 3UL : 1UL;
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
page = NULL; page = NULL;
if (!list_empty(&mm->context.pgtable_list)) { if (!list_empty(&mm->context.pgtable_list)) {
...@@ -329,7 +298,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) ...@@ -329,7 +298,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page; struct page *page;
unsigned long bits; unsigned long bits;
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; bits = (mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
...@@ -366,7 +335,7 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) ...@@ -366,7 +335,7 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
page_table_free(mm, table); page_table_free(mm, table);
return; return;
} }
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; bits = (mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
...@@ -379,25 +348,6 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) ...@@ -379,25 +348,6 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
rcu_table_freelist_finish(); rcu_table_freelist_finish();
} }
void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
{
struct page *page;
spin_lock_bh(&mm->context.list_lock);
/* Free shadow region and segment tables. */
list_for_each_entry(page, &mm->context.crst_list, lru)
if (page->index) {
free_pages((unsigned long) page->index, ALLOC_ORDER);
page->index = 0;
}
/* "Free" second halves of page tables. */
list_for_each_entry(page, &mm->context.pgtable_list, lru)
page->flags &= ~SECOND_HALVES;
spin_unlock_bh(&mm->context.list_lock);
mm->context.noexec = 0;
update_mm(mm, tsk);
}
/* /*
* switch on pgstes for its userspace process (for kvm) * switch on pgstes for its userspace process (for kvm)
*/ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册