提交 17269668 编写于 作者: Y Yang Yingliang

ilp32: revert ilp32 support

hulk inclusion
category: feature
bugzilla: NA
CVE: NA
---------------------------

This feature may be conflict with pointer authentication (PAC) feature
from ARMv8.3 Extensions, revert it temporarily.
Reviewed-by: NXiongfeng Wang <wangxiongfeng2@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 b9bf6182
ILP32 AARCH64 SYSCALL ABI
=========================
This document describes the ILP32 syscall ABI and where it differs
from the generic compat linux syscall interface.
ILP32 is acronym for memory model which stands for "Integers, Longs and
Pointers are 32-bit". The main purpose of ILP32 in Linux kernel is providing
compatibility with 32-bit legacy code. Also, ILP32 binaries look better in some
performance tests. ARM has AN490 document which coves ILP32 details for ARM64
platform:
http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0490a/ar01s01.html
AARCH64/ILP32 userspace may pass garbage in the top halve of w0-w7 registers
(syscall arguments). So top 32 bits are zeroed for them.
Comparing to AARCH32, AARCH64/ILP32 has 64-bit length of following types:
ino_t is u64 type.
off_t is s64 type.
blkcnt_t is s64 type.
fsblkcnt_t is u64 type.
fsfilcnt_t is u64 type.
rlim_t is u64 type.
AARCH64/ILP32 ABI uses standard syscall table which can be found at
include/uapi/asm-generic/unistd.h, with the exceptions listed below.
Syscalls which pass 64-bit values are handled by the code shared from
AARCH32 and pass that value as a pair. Following syscalls are affected:
fadvise64_64()
fallocate()
ftruncate64()
pread64()
pwrite64()
readahead()
sync_file_range()
truncate64()
ptrace() syscall is handled by compat version.
shmat() syscall is handled by non-compat handler as aarch64/ilp32 has no
limitation on 4-pages alignment for shared memory.
statfs() and fstatfs() take the size of struct statfs as an argument.
It is calculated differently in kernel and user spaces. So AARCH32 handlers
are taken to handle it.
struct rt_sigframe is redefined and contains struct compat_siginfo,
as compat syscalls expect, and struct ilp32_ucontext, to handle
AARCH64 register set and 32-bit userspace register representation.
elf_gregset_t is taken from lp64 to handle registers properly.
...@@ -276,21 +276,6 @@ config ARCH_THREAD_STACK_ALLOCATOR ...@@ -276,21 +276,6 @@ config ARCH_THREAD_STACK_ALLOCATOR
config ARCH_WANTS_DYNAMIC_TASK_STRUCT config ARCH_WANTS_DYNAMIC_TASK_STRUCT
bool bool
config ARCH_32BIT_OFF_T
bool
depends on !64BIT
help
All new 32-bit architectures should have 64-bit off_t type on
userspace side which corresponds to the loff_t kernel type. This
is the requirement for modern ABIs. Some existing architectures
already have 32-bit off_t. This option is enabled for all such
architectures explicitly. Namely: arc, arm, blackfin, cris, frv,
h8300, hexagon, m32r, m68k, metag, microblaze, mips32, mn10300,
nios2, openrisc, parisc32, powerpc32, score, sh, sparc, tile32,
unicore32, x86_32 and xtensa. This is the complete list. Any
new 32-bit architecture should declare 64-bit off_t type on user
side and so should not enable this option.
config HAVE_REGS_AND_STACK_ACCESS_API config HAVE_REGS_AND_STACK_ACCESS_API
bool bool
help help
......
...@@ -14,7 +14,6 @@ config ARC ...@@ -14,7 +14,6 @@ config ARC
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
select ARCH_32BIT_OFF_T
select BUILDTIME_EXTABLE_SORT select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS select CLONE_BACKWARDS
select COMMON_CLK select COMMON_CLK
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#define _UAPI_ASM_ARC_UNISTD_H #define _UAPI_ASM_ARC_UNISTD_H
#define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_VFORK
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
config ARM config ARM
bool bool
default y default y
select ARCH_32BIT_OFF_T
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC
select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_VIRTUAL if MMU
......
...@@ -447,7 +447,7 @@ config ARM64_ERRATUM_834220 ...@@ -447,7 +447,7 @@ config ARM64_ERRATUM_834220
config ARM64_ERRATUM_845719 config ARM64_ERRATUM_845719
bool "Cortex-A53: 845719: a load might read incorrect data" bool "Cortex-A53: 845719: a load might read incorrect data"
depends on AARCH32_EL0 depends on COMPAT
default y default y
help help
This option adds an alternative code sequence to work around ARM This option adds an alternative code sequence to work around ARM
...@@ -1042,7 +1042,7 @@ config ARM64_SSBD ...@@ -1042,7 +1042,7 @@ config ARM64_SSBD
menuconfig ARMV8_DEPRECATED menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions" bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on AARCH32_EL0 depends on COMPAT
depends on SYSCTL depends on SYSCTL
help help
Legacy software support may require certain instructions Legacy software support may require certain instructions
...@@ -1493,13 +1493,9 @@ config DMI ...@@ -1493,13 +1493,9 @@ config DMI
endmenu endmenu
config COMPAT config COMPAT
def_bool y
depends on AARCH32_EL0 || ARM64_ILP32
config AARCH32_EL0
bool "Kernel support for 32-bit EL0" bool "Kernel support for 32-bit EL0"
def_bool y
depends on ARM64_4K_PAGES || EXPERT depends on ARM64_4K_PAGES || EXPERT
select COMPAT_BINFMT_ELF if BINFMT_ELF
select HAVE_UID16 select HAVE_UID16
select OLD_SIGSUSPEND3 select OLD_SIGSUSPEND3
select COMPAT_OLD_SIGACTION select COMPAT_OLD_SIGACTION
...@@ -1515,13 +1511,6 @@ config AARCH32_EL0 ...@@ -1515,13 +1511,6 @@ config AARCH32_EL0
If you want to execute 32-bit userspace applications, say Y. If you want to execute 32-bit userspace applications, say Y.
config ARM64_ILP32
bool "Kernel support for ILP32"
help
This option enables support for AArch64 ILP32 user space. ILP32
is an ABI where long and pointers are 32bits but it uses the AARCH64
instruction set.
config SYSVIPC_COMPAT config SYSVIPC_COMPAT
def_bool y def_bool y
depends on COMPAT && SYSVIPC depends on COMPAT && SYSVIPC
......
...@@ -158,9 +158,6 @@ ifeq ($(KBUILD_EXTMOD),) ...@@ -158,9 +158,6 @@ ifeq ($(KBUILD_EXTMOD),)
prepare: vdso_prepare prepare: vdso_prepare
vdso_prepare: prepare0 vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
ifeq ($(CONFIG_ARM64_ILP32), y)
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso-ilp32 include/generated/vdso-ilp32-offsets.h
endif
endif endif
define archhelp define archhelp
......
...@@ -484,8 +484,6 @@ CONFIG_EFI_STUB=y ...@@ -484,8 +484,6 @@ CONFIG_EFI_STUB=y
CONFIG_EFI=y CONFIG_EFI=y
CONFIG_DMI=y CONFIG_DMI=y
CONFIG_COMPAT=y CONFIG_COMPAT=y
CONFIG_AARCH32_EL0=y
CONFIG_ARM64_ILP32=y
CONFIG_SYSVIPC_COMPAT=y CONFIG_SYSVIPC_COMPAT=y
# #
......
...@@ -490,8 +490,6 @@ CONFIG_EFI_STUB=y ...@@ -490,8 +490,6 @@ CONFIG_EFI_STUB=y
CONFIG_EFI=y CONFIG_EFI=y
CONFIG_DMI=y CONFIG_DMI=y
CONFIG_COMPAT=y CONFIG_COMPAT=y
CONFIG_AARCH32_EL0=y
CONFIG_ARM64_ILP32=y
CONFIG_SYSVIPC_COMPAT=y CONFIG_SYSVIPC_COMPAT=y
# #
......
...@@ -487,8 +487,6 @@ CONFIG_EFI_STUB=y ...@@ -487,8 +487,6 @@ CONFIG_EFI_STUB=y
CONFIG_EFI=y CONFIG_EFI=y
CONFIG_DMI=y CONFIG_DMI=y
CONFIG_COMPAT=y CONFIG_COMPAT=y
CONFIG_AARCH32_EL0=y
# CONFIG_ARM64_ILP32 is not set
CONFIG_SYSVIPC_COMPAT=y CONFIG_SYSVIPC_COMPAT=y
# #
......
...@@ -467,8 +467,6 @@ CONFIG_EFI_STUB=y ...@@ -467,8 +467,6 @@ CONFIG_EFI_STUB=y
CONFIG_EFI=y CONFIG_EFI=y
CONFIG_DMI=y CONFIG_DMI=y
CONFIG_COMPAT=y CONFIG_COMPAT=y
CONFIG_AARCH32_EL0=y
# CONFIG_ARM64_ILP32 is not set
CONFIG_SYSVIPC_COMPAT=y CONFIG_SYSVIPC_COMPAT=y
# #
......
...@@ -481,8 +481,6 @@ CONFIG_EFI_STUB=y ...@@ -481,8 +481,6 @@ CONFIG_EFI_STUB=y
CONFIG_EFI=y CONFIG_EFI=y
CONFIG_DMI=y CONFIG_DMI=y
CONFIG_COMPAT=y CONFIG_COMPAT=y
CONFIG_AARCH32_EL0=y
CONFIG_ARM64_ILP32=y
CONFIG_SYSVIPC_COMPAT=y CONFIG_SYSVIPC_COMPAT=y
# #
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <asm/is_compat.h>
#define COMPAT_USER_HZ 100 #define COMPAT_USER_HZ 100
#ifdef __AARCH64EB__ #ifdef __AARCH64EB__
#define COMPAT_UTS_MACHINE "armv8b\0\0" #define COMPAT_UTS_MACHINE "armv8b\0\0"
...@@ -226,6 +224,23 @@ struct compat_shmid64_ds { ...@@ -226,6 +224,23 @@ struct compat_shmid64_ds {
compat_ulong_t __unused5; compat_ulong_t __unused5;
}; };
static inline int is_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
}
static inline int is_compat_thread(struct thread_info *thread)
{
return test_ti_thread_flag(thread, TIF_32BIT);
}
#else /* !CONFIG_COMPAT */
static inline int is_compat_thread(struct thread_info *thread)
{
return 0;
}
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_COMPAT_H */ #endif /* __ASM_COMPAT_H */
...@@ -16,10 +16,6 @@ ...@@ -16,10 +16,6 @@
#ifndef __ASM_ELF_H #ifndef __ASM_ELF_H
#define __ASM_ELF_H #define __ASM_ELF_H
#ifndef __ASSEMBLY__
#include <linux/compat.h>
#endif
#include <asm/hwcap.h> #include <asm/hwcap.h>
/* /*
...@@ -146,7 +142,6 @@ typedef struct user_fpsimd_state elf_fpregset_t; ...@@ -146,7 +142,6 @@ typedef struct user_fpsimd_state elf_fpregset_t;
#define SET_PERSONALITY(ex) \ #define SET_PERSONALITY(ex) \
({ \ ({ \
clear_thread_flag(TIF_32BIT_AARCH64); \
clear_thread_flag(TIF_32BIT); \ clear_thread_flag(TIF_32BIT); \
current->personality &= ~READ_IMPLIES_EXEC; \ current->personality &= ~READ_IMPLIES_EXEC; \
}) })
...@@ -174,9 +169,13 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, ...@@ -174,9 +169,13 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp); int uses_interp);
/* 1GB of VA */ /* 1GB of VA */
#define STACK_RND_MASK (is_compat_task() ? \ #ifdef CONFIG_COMPAT
#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
0x7ff >> (PAGE_SHIFT - 12) : \ 0x7ff >> (PAGE_SHIFT - 12) : \
0x3ffff >> (PAGE_SHIFT - 12)) 0x3ffff >> (PAGE_SHIFT - 12))
#else
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
#ifdef __AARCH64EB__ #ifdef __AARCH64EB__
#define COMPAT_ELF_PLATFORM ("v8b") #define COMPAT_ELF_PLATFORM ("v8b")
...@@ -188,16 +187,35 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, ...@@ -188,16 +187,35 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ /* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL #define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
#endif /*CONFIG_COMPAT */
#ifdef CONFIG_AARCH32_EL0
/* AArch32 registers. */ /* AArch32 registers. */
#define COMPAT_ELF_NGREG 18 #define COMPAT_ELF_NGREG 18
typedef unsigned int compat_elf_greg_t; typedef unsigned int compat_elf_greg_t;
typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
/* AArch32 EABI. */
#define EF_ARM_EABI_MASK 0xff000000
#define compat_elf_check_arch(x) (system_supports_32bit_el0() && \
((x)->e_machine == EM_ARM) && \
((x)->e_flags & EF_ARM_EABI_MASK))
#define compat_start_thread compat_start_thread
/*
* Unlike the native SET_PERSONALITY macro, the compat version maintains
* READ_IMPLIES_EXEC across an execve() since this is the behaviour on
* arch/arm/.
*/
#define COMPAT_SET_PERSONALITY(ex) \
({ \
set_thread_flag(TIF_32BIT); \
})
#define COMPAT_ARCH_DLINFO
extern int aarch32_setup_vectors_page(struct linux_binprm *bprm, extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
int uses_interp); int uses_interp);
#endif /* CONFIG_AARCH32_EL0 */ #define compat_arch_setup_additional_pages \
aarch32_setup_vectors_page
#endif /* CONFIG_COMPAT */
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#if defined(__KERNEL__) && defined(CONFIG_AARCH32_EL0) #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */ /* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f #define VFP_FPSCR_STAT_MASK 0xf800009f
#define VFP_FPSCR_CTRL_MASK 0x07f79f00 #define VFP_FPSCR_CTRL_MASK 0x07f79f00
......
...@@ -54,7 +54,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) ...@@ -54,7 +54,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
{ {
return is_a32_compat_task(); return is_compat_task();
} }
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
......
...@@ -46,13 +46,15 @@ ...@@ -46,13 +46,15 @@
*/ */
#define ELF_HWCAP (elf_hwcap) #define ELF_HWCAP (elf_hwcap)
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
extern unsigned int a32_elf_hwcap, a32_elf_hwcap2; #define COMPAT_ELF_HWCAP (compat_elf_hwcap)
#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2)
extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
#endif #endif
enum { enum {
CAP_HWCAP = 1, CAP_HWCAP = 1,
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
CAP_COMPAT_HWCAP, CAP_COMPAT_HWCAP,
CAP_COMPAT_HWCAP2, CAP_COMPAT_HWCAP2,
#endif #endif
......
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __ASM_IS_COMPAT_H
#define __ASM_IS_COMPAT_H
#ifndef __ASSEMBLY__
#include <linux/thread_bits.h>
#ifdef CONFIG_AARCH32_EL0
static inline int is_a32_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
}
static inline int is_a32_compat_thread(struct thread_info *thread)
{
return test_ti_thread_flag(thread, TIF_32BIT);
}
#else
static inline int is_a32_compat_task(void)
{
return 0;
}
static inline int is_a32_compat_thread(struct thread_info *thread)
{
return 0;
}
#endif /* CONFIG_AARCH32_EL0 */
#ifdef CONFIG_ARM64_ILP32
static inline int is_ilp32_compat_task(void)
{
return test_thread_flag(TIF_32BIT_AARCH64);
}
static inline int is_ilp32_compat_thread(struct thread_info *thread)
{
return test_ti_thread_flag(thread, TIF_32BIT_AARCH64);
}
#else
static inline int is_ilp32_compat_task(void)
{
return 0;
}
static inline int is_ilp32_compat_thread(struct thread_info *thread)
{
return 0;
}
#endif /* CONFIG_ARM64_ILP32 */
#ifdef CONFIG_COMPAT
static inline int is_compat_task(void)
{
return is_a32_compat_task() || is_ilp32_compat_task();
}
#endif /* CONFIG_COMPAT */
static inline int is_compat_thread(struct thread_info *thread)
{
return is_a32_compat_thread(thread) || is_ilp32_compat_thread(thread);
}
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_IS_COMPAT_H */
...@@ -192,12 +192,10 @@ extern u64 kimage_vaddr; ...@@ -192,12 +192,10 @@ extern u64 kimage_vaddr;
/* the offset between the kernel virtual and physical mappings */ /* the offset between the kernel virtual and physical mappings */
extern u64 kimage_voffset; extern u64 kimage_voffset;
#ifndef __ILP32__
static inline unsigned long kaslr_offset(void) static inline unsigned long kaslr_offset(void)
{ {
return kimage_vaddr - KIMAGE_VADDR; return kimage_vaddr - KIMAGE_VADDR;
} }
#endif
/* /*
* Allow all memory at the discovery stage. We will clip it later. * Allow all memory at the discovery stage. We will clip it later.
...@@ -257,7 +255,6 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); ...@@ -257,7 +255,6 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
#ifndef __ILP32__
/* /*
* Note: Drivers should NOT use these. They are the wrong * Note: Drivers should NOT use these. They are the wrong
* translation for translating DMA addresses. Use the driver * translation for translating DMA addresses. Use the driver
...@@ -274,7 +271,6 @@ static inline void *phys_to_virt(phys_addr_t x) ...@@ -274,7 +271,6 @@ static inline void *phys_to_virt(phys_addr_t x)
{ {
return (void *)(__phys_to_virt(x)); return (void *)(__phys_to_virt(x));
} }
#endif
/* /*
* Drivers should NOT use these either. * Drivers should NOT use these either.
......
...@@ -50,7 +50,6 @@ ...@@ -50,7 +50,6 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/is_compat.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
#include <asm/lse.h> #include <asm/lse.h>
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
...@@ -72,9 +71,9 @@ ...@@ -72,9 +71,9 @@
#else #else
#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE) #define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
#endif /* CONFIG_ARM64_64K_PAGES */ #endif /* CONFIG_ARM64_64K_PAGES */
#define TASK_SIZE (is_compat_task() ? \ #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64) TASK_SIZE_32 : TASK_SIZE_64)
#define TASK_SIZE_OF(tsk) (is_compat_thread(tsk) ? \ #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64) TASK_SIZE_32 : TASK_SIZE_64)
#else #else
#define TASK_SIZE TASK_SIZE_64 #define TASK_SIZE TASK_SIZE_64
...@@ -85,7 +84,7 @@ ...@@ -85,7 +84,7 @@
#define STACK_TOP_MAX TASK_SIZE_64 #define STACK_TOP_MAX TASK_SIZE_64
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#define AARCH32_VECTORS_BASE 0xffff0000 #define AARCH32_VECTORS_BASE 0xffff0000
#define STACK_TOP (is_compat_task() ? \ #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
AARCH32_VECTORS_BASE : STACK_TOP_MAX) AARCH32_VECTORS_BASE : STACK_TOP_MAX)
#else #else
#define STACK_TOP STACK_TOP_MAX #define STACK_TOP STACK_TOP_MAX
...@@ -162,11 +161,11 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, ...@@ -162,11 +161,11 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
*size = sizeof_field(struct thread_struct, uw); *size = sizeof_field(struct thread_struct, uw);
} }
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
#define task_user_tls(t) \ #define task_user_tls(t) \
({ \ ({ \
unsigned long *__tls; \ unsigned long *__tls; \
if (is_a32_compat_thread(task_thread_info(t))) \ if (is_compat_thread(task_thread_info(t))) \
__tls = &(t)->thread.uw.tp2_value; \ __tls = &(t)->thread.uw.tp2_value; \
else \ else \
__tls = &(t)->thread.uw.tp_value; \ __tls = &(t)->thread.uw.tp_value; \
...@@ -215,7 +214,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, ...@@ -215,7 +214,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
regs->sp = sp; regs->sp = sp;
} }
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp) unsigned long sp)
{ {
......
...@@ -207,17 +207,17 @@ static inline void forget_syscall(struct pt_regs *regs) ...@@ -207,17 +207,17 @@ static inline void forget_syscall(struct pt_regs *regs)
#define arch_has_single_step() (1) #define arch_has_single_step() (1)
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
#define a32_thumb_mode(regs) \ #define compat_thumb_mode(regs) \
(((regs)->pstate & PSR_AA32_T_BIT)) (((regs)->pstate & PSR_AA32_T_BIT))
#else #else
#define a32_thumb_mode(regs) (0) #define compat_thumb_mode(regs) (0)
#endif #endif
#define user_mode(regs) \ #define user_mode(regs) \
(((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t) (((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t)
#define a32_user_mode(regs) \ #define compat_user_mode(regs) \
(((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \ (((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \
(PSR_MODE32_BIT | PSR_MODE_EL0t)) (PSR_MODE32_BIT | PSR_MODE_EL0t))
...@@ -236,10 +236,10 @@ static inline void forget_syscall(struct pt_regs *regs) ...@@ -236,10 +236,10 @@ static inline void forget_syscall(struct pt_regs *regs)
(!((regs)->pstate & PSR_F_BIT)) (!((regs)->pstate & PSR_F_BIT))
#define GET_USP(regs) \ #define GET_USP(regs) \
(!a32_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
#define SET_USP(ptregs, value) \ #define SET_USP(ptregs, value) \
(!a32_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value)) (!compat_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value))
extern int regs_query_register_offset(const char *name); extern int regs_query_register_offset(const char *name);
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
#define __NR_seccomp_read_32 __NR_compat_read #define __NR_seccomp_read_32 __NR_compat_read
#define __NR_seccomp_write_32 __NR_compat_write #define __NR_seccomp_write_32 __NR_compat_write
#define __NR_seccomp_exit_32 __NR_compat_exit #define __NR_seccomp_exit_32 __NR_compat_exit
......
...@@ -17,37 +17,34 @@ ...@@ -17,37 +17,34 @@
#define __ASM_SIGNAL32_H #define __ASM_SIGNAL32_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifdef CONFIG_COMPAT
#ifdef CONFIG_AARCH32_EL0
#include <linux/compat.h> #include <linux/compat.h>
#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500 #define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500
int a32_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs); struct pt_regs *regs);
int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
int a32_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs); struct pt_regs *regs);
void a32_setup_restart_syscall(struct pt_regs *regs); void compat_setup_restart_syscall(struct pt_regs *regs);
#else #else
static inline int a32_setup_frame(int usid, struct ksignal *ksig, static inline int compat_setup_frame(int usid, struct ksignal *ksig,
sigset_t *set, struct pt_regs *regs) sigset_t *set, struct pt_regs *regs)
{ {
return -ENOSYS; return -ENOSYS;
} }
static inline int a32_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, static inline int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs) struct pt_regs *regs)
{ {
return -ENOSYS; return -ENOSYS;
} }
static inline void a32_setup_restart_syscall(struct pt_regs *regs) static inline void compat_setup_restart_syscall(struct pt_regs *regs)
{ {
} }
#endif /* CONFIG_AARCH32_EL0 */ #endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_SIGNAL32_H */ #endif /* __ASM_SIGNAL32_H */
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __ASM_SIGNAL32_COMMON_H
#define __ASM_SIGNAL32_COMMON_H
#ifdef CONFIG_COMPAT
int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set);
int get_sigset_t(sigset_t *set, const compat_sigset_t __user *uset);
#endif /* CONFIG_COMPAT*/
#endif /* __ASM_SIGNAL32_COMMON_H */
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 1995-2009 Russell King
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2018 Cavium Networks.
*/
#ifndef __ASM_SIGNAL_COMMON_H
#define __ASM_SIGNAL_COMMON_H
#include <linux/uaccess.h>
#include <asm/fpsimd.h>
#include <asm/traps.h>
#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
#define SIGCONTEXT_RESERVED_SIZE sizeof(((struct sigcontext *)0)->__reserved)
#define RT_SIGFRAME_RESERVED_OFFSET \
offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved)
/*
* Sanity limit on the approximate maximum size of signal frame we'll
* try to generate. Stack alignment padding and the frame record are
* not taken into account. This limit is not a guarantee and is
* NOT ABI.
*/
#define SIGFRAME_MAXSZ SZ_64K
struct rt_sigframe_user_layout {
void __user *sigframe;
struct frame_record __user *next_frame;
unsigned long size; /* size of allocated sigframe data */
unsigned long limit; /* largest allowed size */
unsigned long fpsimd_offset;
unsigned long esr_offset;
unsigned long sve_offset;
unsigned long extra_offset;
unsigned long end_offset;
};
struct user_ctxs {
struct fpsimd_context __user *fpsimd;
struct sve_context __user *sve;
};
struct frame_record {
u64 fp;
u64 lr;
};
void __user *apply_user_offset(struct rt_sigframe_user_layout const *user,
unsigned long offset);
int setup_sigframe_layout(struct rt_sigframe_user_layout *user, bool add_all);
int setup_extra_context(char __user *sfp, unsigned long sf_size,
char __user *exprap);
int __parse_user_sigcontext(struct user_ctxs *user,
struct sigcontext __user const *sc,
void __user const *sigframe_base);
#define parse_user_sigcontext(user, sf) \
__parse_user_sigcontext(user, &(sf)->uc.uc_mcontext, sf)
int preserve_fpsimd_context(struct fpsimd_context __user *ctx);
int restore_fpsimd_context(struct fpsimd_context __user *ctx);
#ifdef CONFIG_ARM64_SVE
int preserve_sve_context(struct sve_context __user *ctx);
int restore_sve_fpsimd_context(struct user_ctxs *user);
#else /* ! CONFIG_ARM64_SVE */
/* Turn any non-optimised out attempts to use these into a link error: */
extern int preserve_sve_context(void __user *ctx);
extern int restore_sve_fpsimd_context(struct user_ctxs *user);
#endif /* ! CONFIG_ARM64_SVE */
int sigframe_alloc(struct rt_sigframe_user_layout *user,
unsigned long *offset, size_t size);
int sigframe_alloc_end(struct rt_sigframe_user_layout *user);
void __setup_return(struct pt_regs *regs, struct k_sigaction *ka,
struct rt_sigframe_user_layout *user, int usig);
static void init_user_layout(struct rt_sigframe_user_layout *user)
{
memset(user, 0, sizeof(*user));
user->size = RT_SIGFRAME_RESERVED_OFFSET;
user->limit = user->size + SIGCONTEXT_RESERVED_SIZE;
user->limit -= TERMINATOR_SIZE;
user->limit -= EXTRA_CONTEXT_SIZE;
/* Reserve space for extension and terminator ^ */
}
static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
{
return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
}
static int get_sigframe(struct rt_sigframe_user_layout *user,
struct ksignal *ksig, struct pt_regs *regs)
{
unsigned long sp, sp_top;
int err;
init_user_layout(user);
err = setup_sigframe_layout(user, false);
if (err)
return err;
sp = sp_top = sigsp(regs->sp, ksig);
sp = round_down(sp - sizeof(struct frame_record), 16);
user->next_frame = (struct frame_record __user *)sp;
sp = round_down(sp, 16) - sigframe_size(user);
user->sigframe = (void __user *)sp;
/*
* Check that we can actually write to the signal frame.
*/
if (!access_ok(user->sigframe, sp_top - sp))
return -EFAULT;
return 0;
}
static int restore_sigframe(struct pt_regs *regs,
struct rt_sigframe __user *sf)
{
sigset_t set;
int i, err;
struct user_ctxs user;
err = get_sigset(&set, &sf->uc.uc_sigmask);
if (err == 0)
set_current_blocked(&set);
for (i = 0; i < 31; i++)
__get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
err);
__get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
__get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
__get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
/*
* Avoid sys_rt_sigreturn() restarting.
*/
forget_syscall(regs);
err |= !valid_user_regs(&regs->user_regs, current);
if (err == 0)
err = parse_user_sigcontext(&user, sf);
if (err == 0) {
if (!user.fpsimd)
return -EINVAL;
if (user.sve) {
if (!system_supports_sve())
return -EINVAL;
err = restore_sve_fpsimd_context(&user);
} else {
err = restore_fpsimd_context(user.fpsimd);
}
}
return err;
}
static int setup_sigframe(struct rt_sigframe_user_layout *user,
struct pt_regs *regs, sigset_t *set)
{
int i, err = 0;
struct rt_sigframe __user *sf = user->sigframe;
/* set up the stack frame for unwinding */
__put_user_error(regs->regs[29], &user->next_frame->fp, err);
__put_user_error(regs->regs[30], &user->next_frame->lr, err);
for (i = 0; i < 31; i++)
__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
err);
__put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
__put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
__put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
__put_user_error(current->thread.fault_address,
&sf->uc.uc_mcontext.fault_address, err);
err |= put_sigset(set, &sf->uc.uc_sigmask);
if (err == 0) {
struct fpsimd_context __user *fpsimd_ctx =
apply_user_offset(user, user->fpsimd_offset);
err |= preserve_fpsimd_context(fpsimd_ctx);
}
/* fault information, if valid */
if (err == 0 && user->esr_offset) {
struct esr_context __user *esr_ctx =
apply_user_offset(user, user->esr_offset);
__put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
__put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
__put_user_error(current->thread.fault_code,
&esr_ctx->esr, err);
}
/* Scalable Vector Extension state, if present */
if (system_supports_sve() && err == 0 && user->sve_offset) {
struct sve_context __user *sve_ctx =
apply_user_offset(user, user->sve_offset);
err |= preserve_sve_context(sve_ctx);
}
if (err == 0 && user->extra_offset)
setup_extra_context((char __user *)user->sigframe, user->size,
(char __user *)apply_user_offset(user,
user->extra_offset));
/* set the "end" magic */
if (err == 0) {
struct _aarch64_ctx __user *end =
apply_user_offset(user, user->end_offset);
__put_user_error(0, &end->magic, err);
__put_user_error(0, &end->size, err);
}
return err;
}
static long __sys_rt_sigreturn(struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 128-bit boundary, then 'sp' should
* be word aligned here.
*/
if (regs->sp & 15)
goto badframe;
frame = (struct rt_sigframe __user *)regs->sp;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (restore_sigframe(regs, frame))
goto badframe;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return regs->regs[0];
badframe:
arm64_notify_segfault(regs->sp);
return 0;
}
static int __setup_rt_frame(int usig, struct ksignal *ksig,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe_user_layout user;
struct rt_sigframe __user *frame;
int err = 0;
fpsimd_signal_preserve_current_state();
if (get_sigframe(&user, ksig, regs))
return 1;
frame = user.sigframe;
__put_user_error(0, &frame->uc.uc_flags, err);
__put_user_error((typeof(frame->uc.uc_link)) 0,
&frame->uc.uc_link, err);
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
err |= setup_sigframe(&user, regs, set);
if (err == 0) {
setup_return(regs, &ksig->ka, &user, usig);
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
regs->regs[1] = (unsigned long)&frame->info;
regs->regs[2] = (unsigned long)&frame->uc;
}
}
return err;
}
#endif /* __ASM_SIGNAL_COMMON_H */
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __ASM_SIGNAL_ILP32_H
#define __ASM_SIGNAL_ILP32_H
#ifdef CONFIG_ARM64_ILP32
#include <linux/compat.h>
int ilp32_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs);
#else
static inline int ilp32_setup_rt_frame(int usig, struct ksignal *ksig,
sigset_t *set, struct pt_regs *regs)
{
return -ENOSYS;
}
#endif /* CONFIG_ARM64_ILP32 */
#endif /* __ASM_SIGNAL_ILP32_H */
...@@ -24,12 +24,8 @@ typedef long (*syscall_fn_t)(const struct pt_regs *regs); ...@@ -24,12 +24,8 @@ typedef long (*syscall_fn_t)(const struct pt_regs *regs);
extern const syscall_fn_t sys_call_table[]; extern const syscall_fn_t sys_call_table[];
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
extern const syscall_fn_t a32_sys_call_table[]; extern const syscall_fn_t compat_sys_call_table[];
#endif
#ifdef CONFIG_ARM64_ILP32
extern const syscall_fn_t ilp32_sys_call_table[];
#endif #endif
static inline int syscall_get_nr(struct task_struct *task, static inline int syscall_get_nr(struct task_struct *task,
...@@ -133,7 +129,7 @@ static inline void syscall_set_arguments(struct task_struct *task, ...@@ -133,7 +129,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
*/ */
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)
{ {
if (is_a32_compat_task()) if (is_compat_task())
return AUDIT_ARCH_ARM; return AUDIT_ARCH_ARM;
return AUDIT_ARCH_AARCH64; return AUDIT_ARCH_AARCH64;
......
...@@ -88,11 +88,10 @@ void arch_release_task_struct(struct task_struct *tsk); ...@@ -88,11 +88,10 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_FREEZE 19 #define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20 #define TIF_RESTORE_SIGMASK 20
#define TIF_SINGLESTEP 21 #define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* AARCH32 process */ #define TIF_32BIT 22 /* 32bit process */
#define TIF_SVE 23 /* Scalable Vector Extension in use */ #define TIF_SVE 23 /* Scalable Vector Extension in use */
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */ #define TIF_SSBD 25 /* Wants SSB mitigation */
#define TIF_32BIT_AARCH64 26 /* 32 bit process on AArch64(ILP32) */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
...@@ -109,7 +108,6 @@ void arch_release_task_struct(struct task_struct *tsk); ...@@ -109,7 +108,6 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_SVE (1 << TIF_SVE) #define _TIF_SVE (1 << TIF_SVE)
#define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
......
...@@ -13,16 +13,12 @@ ...@@ -13,16 +13,12 @@
* You should have received a copy of the GNU General Public License * You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#define __ARCH_WANT_COMPAT_STAT64 #define __ARCH_WANT_COMPAT_STAT64
#define __ARCH_WANT_SYS_LLSEEK
#endif
#ifdef CONFIG_AARCH32_EL0
#define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_GETPGRP #define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_SIGPROCMASK
......
...@@ -29,12 +29,6 @@ ...@@ -29,12 +29,6 @@
#include <generated/vdso-offsets.h> #include <generated/vdso-offsets.h>
#ifdef CONFIG_ARM64_ILP32
#include <generated/vdso-ilp32-offsets.h>
#else
#define vdso_offset_sigtramp_ilp32 ({ BUILD_BUG(); 0; })
#endif
#define VDSO_SYMBOL(base, name) \ #define VDSO_SYMBOL(base, name) \
({ \ ({ \
(void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \ (void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \
......
...@@ -17,14 +17,7 @@ ...@@ -17,14 +17,7 @@
#ifndef __ASM_BITSPERLONG_H #ifndef __ASM_BITSPERLONG_H
#define __ASM_BITSPERLONG_H #define __ASM_BITSPERLONG_H
#if defined(__LP64__) #define __BITS_PER_LONG 64
/* Assuming __LP64__ will be defined for native ELF64's and not for ILP32. */
# define __BITS_PER_LONG 64
#elif defined(__ILP32__)
# define __BITS_PER_LONG 32
#else
# error "Neither LP64 nor ILP32: unsupported ABI in asm/bitsperlong.h"
#endif
#include <asm-generic/bitsperlong.h> #include <asm-generic/bitsperlong.h>
......
...@@ -15,19 +15,6 @@ ...@@ -15,19 +15,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
/*
* Use AARCH32 interface for sys_sync_file_range() as it passes 64-bit arguments.
*/
#if defined(__ILP32__) || defined(__SYSCALL_COMPAT)
#define __ARCH_WANT_SYNC_FILE_RANGE2
#endif
/*
* AARCH64/ILP32 is introduced after next syscalls were deprecated.
*/
#if !(defined(__ILP32__) || defined(__SYSCALL_COMPAT))
#define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_SET_GET_RLIMIT
#endif
#include <asm-generic/unistd.h> #include <asm-generic/unistd.h>
...@@ -27,11 +27,8 @@ OBJCOPYFLAGS := --prefix-symbols=__efistub_ ...@@ -27,11 +27,8 @@ OBJCOPYFLAGS := --prefix-symbols=__efistub_
$(obj)/%.stub.o: $(obj)/%.o FORCE $(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
arm64-obj-$(CONFIG_AARCH32_EL0) += sys32.o kuser32.o signal32.o \ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o binfmt_elf32.o sys_compat.o
arm64-obj-$(CONFIG_ARM64_ILP32) += binfmt_ilp32.o sys_ilp32.o \
signal_ilp32.o
arm64-obj-$(CONFIG_COMPAT) += sys32_common.o signal32_common.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
...@@ -68,7 +65,6 @@ arm64-obj-$(CONFIG_MPAM) += mpam.o mpam_ctrlmon.o mpam_mon.o ...@@ -68,7 +65,6 @@ arm64-obj-$(CONFIG_MPAM) += mpam.o mpam_ctrlmon.o mpam_mon.o
arm64-obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o arm64-obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-y += $(arm64-obj-y) vdso/ probes/ obj-y += $(arm64-obj-y) vdso/ probes/
obj-$(CONFIG_ARM64_ILP32) += vdso-ilp32/
obj-m += $(arm64-obj-m) obj-m += $(arm64-obj-m)
head-y := head.o head-y := head.o
extra-y += $(head-y) vmlinux.lds extra-y += $(head-y) vmlinux.lds
......
...@@ -559,7 +559,7 @@ static int setend_set_hw_mode(bool enable) ...@@ -559,7 +559,7 @@ static int setend_set_hw_mode(bool enable)
return 0; return 0;
} }
static int __a32_setend_handler(struct pt_regs *regs, u32 big_endian) static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
{ {
char *insn; char *insn;
...@@ -582,14 +582,14 @@ static int __a32_setend_handler(struct pt_regs *regs, u32 big_endian) ...@@ -582,14 +582,14 @@ static int __a32_setend_handler(struct pt_regs *regs, u32 big_endian)
static int a32_setend_handler(struct pt_regs *regs, u32 instr) static int a32_setend_handler(struct pt_regs *regs, u32 instr)
{ {
int rc = __a32_setend_handler(regs, (instr >> 9) & 1); int rc = compat_setend_handler(regs, (instr >> 9) & 1);
arm64_skip_faulting_instruction(regs, 4); arm64_skip_faulting_instruction(regs, 4);
return rc; return rc;
} }
static int t16_setend_handler(struct pt_regs *regs, u32 instr) static int t16_setend_handler(struct pt_regs *regs, u32 instr)
{ {
int rc = __a32_setend_handler(regs, (instr >> 3) & 1); int rc = compat_setend_handler(regs, (instr >> 3) & 1);
arm64_skip_faulting_instruction(regs, 2); arm64_skip_faulting_instruction(regs, 2);
return rc; return rc;
} }
......
...@@ -70,7 +70,7 @@ int main(void) ...@@ -70,7 +70,7 @@ int main(void)
DEFINE(S_X28, offsetof(struct pt_regs, regs[28])); DEFINE(S_X28, offsetof(struct pt_regs, regs[28]));
DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
DEFINE(S_SP, offsetof(struct pt_regs, sp)); DEFINE(S_SP, offsetof(struct pt_regs, sp));
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp)); DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp));
#endif #endif
DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
...@@ -129,13 +129,6 @@ int main(void) ...@@ -129,13 +129,6 @@ int main(void)
DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec)); DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec));
DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec)); DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec));
BLANK(); BLANK();
#ifdef CONFIG_COMPAT
DEFINE(COMPAT_TVAL_TV_SEC, offsetof(struct compat_timeval, tv_sec));
DEFINE(COMPAT_TVAL_TV_USEC, offsetof(struct compat_timeval, tv_usec));
DEFINE(COMPAT_TSPEC_TV_SEC, offsetof(struct compat_timespec, tv_sec));
DEFINE(COMPAT_TSPEC_TV_NSEC, offsetof(struct compat_timespec, tv_nsec));
BLANK();
#endif
DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
BLANK(); BLANK();
......
// SPDX-License-Identifier: GPL-2.0+
/*
* Support for AArch32 Linux ELF binaries.
*/
/* AArch32 EABI. */
#define EF_ARM_EABI_MASK 0xff000000
#define compat_start_thread compat_start_thread
/*
* Unlike the native SET_PERSONALITY macro, the compat version inherits
* READ_IMPLIES_EXEC across a fork() since this is the behaviour on
* arch/arm/.
*/
#define COMPAT_SET_PERSONALITY(ex) \
({ \
clear_thread_flag(TIF_32BIT_AARCH64); \
set_thread_flag(TIF_32BIT); \
})
#define COMPAT_ARCH_DLINFO
#define COMPAT_ELF_HWCAP (a32_elf_hwcap)
#define COMPAT_ELF_HWCAP2 (a32_elf_hwcap2)
#define compat_arch_setup_additional_pages \
aarch32_setup_vectors_page
/* AArch32 EABI. */
#define compat_elf_check_arch(x) (system_supports_32bit_el0() && \
((x)->e_machine == EM_ARM) && \
((x)->e_flags & EF_ARM_EABI_MASK))
#include "../../../fs/compat_binfmt_elf.c"
// SPDX-License-Identifier: GPL-2.0+
/*
* Support for ILP32 Linux/aarch64 ELF binaries.
*/
#undef CONFIG_AARCH32_EL0
#define compat_elf_gregset_t elf_gregset_t
#include <linux/elfcore-compat.h>
#include <linux/time.h>
#undef ELF_CLASS
#define ELF_CLASS ELFCLASS32
#undef elfhdr
#undef elf_phdr
#undef elf_shdr
#undef elf_note
#undef elf_addr_t
#define elfhdr elf32_hdr
#define elf_phdr elf32_phdr
#define elf_shdr elf32_shdr
#define elf_note elf32_note
#define elf_addr_t Elf32_Addr
/*
* Some data types as stored in coredump.
*/
#define user_long_t compat_long_t
#define user_siginfo_t compat_siginfo_t
#define copy_siginfo_to_user copy_siginfo_to_user32
/*
* The machine-dependent core note format types are defined in elfcore-compat.h,
* which requires asm/elf.h to define compat_elf_gregset_t et al.
*/
#define elf_prstatus compat_elf_prstatus
#define elf_prpsinfo compat_elf_prpsinfo
/* AARCH64 ILP32 EABI. */
#undef elf_check_arch
#define elf_check_arch(x) (((x)->e_machine == EM_AARCH64) \
&& (x)->e_ident[EI_CLASS] == ELFCLASS32)
#undef SET_PERSONALITY
#define SET_PERSONALITY(ex) \
do { \
set_bit(TIF_32BIT, &current->mm->context.flags); \
set_thread_flag(TIF_32BIT_AARCH64); \
clear_thread_flag(TIF_32BIT); \
} while (0)
#undef ARCH_DLINFO
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(elf_addr_t)(long)current->mm->context.vdso); \
} while (0)
#undef ELF_PLATFORM
#ifdef __AARCH64EB__
#define ELF_PLATFORM ("aarch64_be:ilp32")
#else
#define ELF_PLATFORM ("aarch64:ilp32")
#endif
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE COMPAT_ELF_ET_DYN_BASE
#undef ELF_HWCAP
#undef ELF_HWCAP2
#define ELF_HWCAP ((u32) elf_hwcap)
#define ELF_HWCAP2 ((u32) (elf_hwcap >> 32))
/*
* Rename a few of the symbols that binfmt_elf.c will define.
* These are all local so the names don't really matter, but it
* might make some debugging less confusing not to duplicate them.
*/
#define elf_format compat_elf_format
#define init_elf_binfmt init_compat_elf_binfmt
#define exit_elf_binfmt exit_compat_elf_binfmt
#undef ns_to_timeval
#define ns_to_timeval ns_to_compat_timeval
#include "../../../fs/binfmt_elf.c"
...@@ -38,14 +38,14 @@ ...@@ -38,14 +38,14 @@
unsigned long elf_hwcap __read_mostly; unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap); EXPORT_SYMBOL_GPL(elf_hwcap);
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
#define AARCH32_EL0_ELF_HWCAP_DEFAULT \ #define COMPAT_ELF_HWCAP_DEFAULT \
(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\ COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_LPAE) COMPAT_HWCAP_LPAE)
unsigned int a32_elf_hwcap __read_mostly = AARCH32_EL0_ELF_HWCAP_DEFAULT; unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int a32_elf_hwcap2 __read_mostly; unsigned int compat_elf_hwcap2 __read_mostly;
#endif #endif
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
...@@ -1706,8 +1706,8 @@ static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope) ...@@ -1706,8 +1706,8 @@ static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
} }
#endif #endif
static const struct arm64_cpu_capabilities a32_elf_hwcaps[] = { static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON), HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
...@@ -1728,12 +1728,12 @@ static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) ...@@ -1728,12 +1728,12 @@ static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
case CAP_HWCAP: case CAP_HWCAP:
elf_hwcap |= cap->hwcap; elf_hwcap |= cap->hwcap;
break; break;
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
case CAP_COMPAT_HWCAP: case CAP_COMPAT_HWCAP:
a32_elf_hwcap |= (u32)cap->hwcap; compat_elf_hwcap |= (u32)cap->hwcap;
break; break;
case CAP_COMPAT_HWCAP2: case CAP_COMPAT_HWCAP2:
a32_elf_hwcap2 |= (u32)cap->hwcap; compat_elf_hwcap2 |= (u32)cap->hwcap;
break; break;
#endif #endif
default: default:
...@@ -1751,12 +1751,12 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap) ...@@ -1751,12 +1751,12 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
case CAP_HWCAP: case CAP_HWCAP:
rc = (elf_hwcap & cap->hwcap) != 0; rc = (elf_hwcap & cap->hwcap) != 0;
break; break;
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
case CAP_COMPAT_HWCAP: case CAP_COMPAT_HWCAP:
rc = (a32_elf_hwcap & (u32)cap->hwcap) != 0; rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
break; break;
case CAP_COMPAT_HWCAP2: case CAP_COMPAT_HWCAP2:
rc = (a32_elf_hwcap2 & (u32)cap->hwcap) != 0; rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
break; break;
#endif #endif
default: default:
...@@ -2005,7 +2005,7 @@ static void verify_local_cpu_capabilities(void) ...@@ -2005,7 +2005,7 @@ static void verify_local_cpu_capabilities(void)
verify_local_elf_hwcaps(arm64_elf_hwcaps); verify_local_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0()) if (system_supports_32bit_el0())
verify_local_elf_hwcaps(a32_elf_hwcaps); verify_local_elf_hwcaps(compat_elf_hwcaps);
if (system_supports_sve()) if (system_supports_sve())
verify_sve_features(); verify_sve_features();
...@@ -2076,7 +2076,7 @@ void __init setup_cpu_features(void) ...@@ -2076,7 +2076,7 @@ void __init setup_cpu_features(void)
setup_elf_hwcaps(arm64_elf_hwcaps); setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0()) if (system_supports_32bit_el0())
setup_elf_hwcaps(a32_elf_hwcaps); setup_elf_hwcaps(compat_elf_hwcaps);
if (system_uses_ttbr0_pan()) if (system_uses_ttbr0_pan())
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
......
...@@ -88,7 +88,7 @@ static const char *const hwcap_str[] = { ...@@ -88,7 +88,7 @@ static const char *const hwcap_str[] = {
NULL NULL
}; };
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
static const char *const compat_hwcap_str[] = { static const char *const compat_hwcap_str[] = {
"swp", "swp",
"half", "half",
...@@ -123,12 +123,12 @@ static const char *const compat_hwcap2_str[] = { ...@@ -123,12 +123,12 @@ static const char *const compat_hwcap2_str[] = {
"crc32", "crc32",
NULL NULL
}; };
#endif /* CONFIG_AARCH32_EL0 */ #endif /* CONFIG_COMPAT */
static int c_show(struct seq_file *m, void *v) static int c_show(struct seq_file *m, void *v)
{ {
int i, j; int i, j;
bool aarch32 = personality(current->personality) == PER_LINUX32; bool compat = personality(current->personality) == PER_LINUX32;
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
...@@ -140,7 +140,7 @@ static int c_show(struct seq_file *m, void *v) ...@@ -140,7 +140,7 @@ static int c_show(struct seq_file *m, void *v)
* "processor". Give glibc what it expects. * "processor". Give glibc what it expects.
*/ */
seq_printf(m, "processor\t: %d\n", i); seq_printf(m, "processor\t: %d\n", i);
if (aarch32) if (compat)
seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
...@@ -155,16 +155,16 @@ static int c_show(struct seq_file *m, void *v) ...@@ -155,16 +155,16 @@ static int c_show(struct seq_file *m, void *v)
* software which does already (at least for 32-bit). * software which does already (at least for 32-bit).
*/ */
seq_puts(m, "Features\t:"); seq_puts(m, "Features\t:");
if (aarch32) { if (compat) {
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
for (j = 0; compat_hwcap_str[j]; j++) for (j = 0; compat_hwcap_str[j]; j++)
if (a32_elf_hwcap & (1 << j)) if (compat_elf_hwcap & (1 << j))
seq_printf(m, " %s", compat_hwcap_str[j]); seq_printf(m, " %s", compat_hwcap_str[j]);
for (j = 0; compat_hwcap2_str[j]; j++) for (j = 0; compat_hwcap2_str[j]; j++)
if (a32_elf_hwcap2 & (1 << j)) if (compat_elf_hwcap2 & (1 << j))
seq_printf(m, " %s", compat_hwcap2_str[j]); seq_printf(m, " %s", compat_hwcap2_str[j]);
#endif /* CONFIG_AARCH32_EL0 */ #endif /* CONFIG_COMPAT */
} else { } else {
for (j = 0; hwcap_str[j]; j++) for (j = 0; hwcap_str[j]; j++)
if (elf_hwcap & (1 << j)) if (elf_hwcap & (1 << j))
......
...@@ -343,10 +343,10 @@ int aarch32_break_handler(struct pt_regs *regs) ...@@ -343,10 +343,10 @@ int aarch32_break_handler(struct pt_regs *regs)
bool bp = false; bool bp = false;
void __user *pc = (void __user *)instruction_pointer(regs); void __user *pc = (void __user *)instruction_pointer(regs);
if (!a32_user_mode(regs)) if (!compat_user_mode(regs))
return -EFAULT; return -EFAULT;
if (a32_thumb_mode(regs)) { if (compat_thumb_mode(regs)) {
/* get 16-bit Thumb instruction */ /* get 16-bit Thumb instruction */
__le16 instr; __le16 instr;
get_user(instr, (__le16 __user *)pc); get_user(instr, (__le16 __user *)pc);
......
...@@ -489,7 +489,7 @@ ENTRY(vectors) ...@@ -489,7 +489,7 @@ ENTRY(vectors)
kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
kernel_ventry 0, error // Error 64-bit EL0 kernel_ventry 0, error // Error 64-bit EL0
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
...@@ -558,7 +558,7 @@ el0_error_invalid: ...@@ -558,7 +558,7 @@ el0_error_invalid:
inv_entry 0, BAD_ERROR inv_entry 0, BAD_ERROR
ENDPROC(el0_error_invalid) ENDPROC(el0_error_invalid)
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
el0_fiq_invalid_compat: el0_fiq_invalid_compat:
inv_entry 0, BAD_FIQ, 32 inv_entry 0, BAD_FIQ, 32
ENDPROC(el0_fiq_invalid_compat) ENDPROC(el0_fiq_invalid_compat)
...@@ -759,7 +759,7 @@ el0_sync: ...@@ -759,7 +759,7 @@ el0_sync:
b.ge el0_dbg b.ge el0_dbg
b el0_inv b el0_inv
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
.align 6 .align 6
el0_sync_compat: el0_sync_compat:
kernel_entry 0, 32 kernel_entry 0, 32
......
...@@ -542,7 +542,7 @@ set_hcr: ...@@ -542,7 +542,7 @@ set_hcr:
msr vpidr_el2, x0 msr vpidr_el2, x0
msr vmpidr_el2, x1 msr vmpidr_el2, x1
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
msr hstr_el2, xzr // Disable CP15 traps to EL2 msr hstr_el2, xzr // Disable CP15 traps to EL2
#endif #endif
......
...@@ -168,7 +168,7 @@ enum hw_breakpoint_ops { ...@@ -168,7 +168,7 @@ enum hw_breakpoint_ops {
HW_BREAKPOINT_RESTORE HW_BREAKPOINT_RESTORE
}; };
static int is_a32_compat_bp(struct perf_event *bp) static int is_compat_bp(struct perf_event *bp)
{ {
struct task_struct *tsk = bp->hw.target; struct task_struct *tsk = bp->hw.target;
...@@ -179,7 +179,7 @@ static int is_a32_compat_bp(struct perf_event *bp) ...@@ -179,7 +179,7 @@ static int is_a32_compat_bp(struct perf_event *bp)
* deprecated behaviour if we use unaligned watchpoints in * deprecated behaviour if we use unaligned watchpoints in
* AArch64 state. * AArch64 state.
*/ */
return tsk && is_a32_compat_thread(task_thread_info(tsk)); return tsk && is_compat_thread(task_thread_info(tsk));
} }
/** /**
...@@ -478,7 +478,7 @@ static int arch_build_bp_info(struct perf_event *bp, ...@@ -478,7 +478,7 @@ static int arch_build_bp_info(struct perf_event *bp,
* Watchpoints can be of length 1, 2, 4 or 8 bytes. * Watchpoints can be of length 1, 2, 4 or 8 bytes.
*/ */
if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) { if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
if (is_a32_compat_bp(bp)) { if (is_compat_bp(bp)) {
if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 && if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
hw->ctrl.len != ARM_BREAKPOINT_LEN_4) hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
return -EINVAL; return -EINVAL;
...@@ -536,7 +536,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, ...@@ -536,7 +536,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
* AArch32 tasks expect some simple alignment fixups, so emulate * AArch32 tasks expect some simple alignment fixups, so emulate
* that here. * that here.
*/ */
if (is_a32_compat_bp(bp)) { if (is_compat_bp(bp)) {
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8) if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7; alignment_mask = 0x7;
else else
......
...@@ -63,26 +63,26 @@ user_backtrace(struct frame_tail __user *tail, ...@@ -63,26 +63,26 @@ user_backtrace(struct frame_tail __user *tail,
return buftail.fp; return buftail.fp;
} }
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
/* /*
* The registers we're interested in are at the end of the variable * The registers we're interested in are at the end of the variable
* length saved register structure. The fp points at the end of this * length saved register structure. The fp points at the end of this
* structure so the address of this struct is: * structure so the address of this struct is:
* (struct a32_frame_tail *)(xxx->fp)-1 * (struct compat_frame_tail *)(xxx->fp)-1
* *
* This code has been adapted from the ARM OProfile support. * This code has been adapted from the ARM OProfile support.
*/ */
struct a32_frame_tail { struct compat_frame_tail {
compat_uptr_t fp; /* a (struct a32_frame_tail *) in compat mode */ compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
u32 sp; u32 sp;
u32 lr; u32 lr;
} __attribute__((packed)); } __attribute__((packed));
static struct a32_frame_tail __user * static struct compat_frame_tail __user *
compat_user_backtrace(struct a32_frame_tail __user *tail, compat_user_backtrace(struct compat_frame_tail __user *tail,
struct perf_callchain_entry_ctx *entry) struct perf_callchain_entry_ctx *entry)
{ {
struct a32_frame_tail buftail; struct compat_frame_tail buftail;
unsigned long err; unsigned long err;
/* Also check accessibility of one struct frame_tail beyond */ /* Also check accessibility of one struct frame_tail beyond */
...@@ -102,13 +102,13 @@ compat_user_backtrace(struct a32_frame_tail __user *tail, ...@@ -102,13 +102,13 @@ compat_user_backtrace(struct a32_frame_tail __user *tail,
* Frame pointers should strictly progress back up the stack * Frame pointers should strictly progress back up the stack
* (towards higher addresses). * (towards higher addresses).
*/ */
if (tail + 1 >= (struct a32_frame_tail __user *) if (tail + 1 >= (struct compat_frame_tail __user *)
compat_ptr(buftail.fp)) compat_ptr(buftail.fp))
return NULL; return NULL;
return (struct a32_frame_tail __user *)compat_ptr(buftail.fp) - 1; return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
} }
#endif /* CONFIG_AARCH32_EL0 */ #endif /* CONFIG_COMPAT */
void perf_callchain_user(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
...@@ -120,7 +120,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, ...@@ -120,7 +120,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
perf_callchain_store(entry, regs->pc); perf_callchain_store(entry, regs->pc);
if (!a32_user_mode(regs)) { if (!compat_user_mode(regs)) {
/* AARCH64 mode */ /* AARCH64 mode */
struct frame_tail __user *tail; struct frame_tail __user *tail;
...@@ -130,11 +130,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, ...@@ -130,11 +130,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
tail && !((unsigned long)tail & 0xf)) tail && !((unsigned long)tail & 0xf))
tail = user_backtrace(tail, entry); tail = user_backtrace(tail, entry);
} else { } else {
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
/* AARCH32 compat mode */ /* AARCH32 compat mode */
struct a32_frame_tail __user *tail; struct compat_frame_tail __user *tail;
tail = (struct a32_frame_tail __user *)regs->compat_fp - 1; tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
while ((entry->nr < entry->max_stack) && while ((entry->nr < entry->max_stack) &&
tail && !((unsigned long)tail & 0x3)) tail && !((unsigned long)tail & 0x3))
......
...@@ -36,7 +36,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) ...@@ -36,7 +36,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
* At the time we make a sample, we don't know whether the consumer is * At the time we make a sample, we don't know whether the consumer is
* 32-bit or 64-bit, so we have to cater for both possibilities. * 32-bit or 64-bit, so we have to cater for both possibilities.
*/ */
if (a32_user_mode(regs)) { if (compat_user_mode(regs)) {
if ((u32)idx == PERF_REG_ARM64_SP) if ((u32)idx == PERF_REG_ARM64_SP)
return regs->compat_sp; return regs->compat_sp;
if ((u32)idx == PERF_REG_ARM64_LR) if ((u32)idx == PERF_REG_ARM64_LR)
...@@ -66,7 +66,7 @@ int perf_reg_validate(u64 mask) ...@@ -66,7 +66,7 @@ int perf_reg_validate(u64 mask)
u64 perf_reg_abi(struct task_struct *task) u64 perf_reg_abi(struct task_struct *task)
{ {
if (is_a32_compat_thread(task_thread_info(task))) if (is_compat_thread(task_thread_info(task)))
return PERF_SAMPLE_REGS_ABI_32; return PERF_SAMPLE_REGS_ABI_32;
else else
return PERF_SAMPLE_REGS_ABI_64; return PERF_SAMPLE_REGS_ABI_64;
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/arch_gicv3.h> #include <asm/arch_gicv3.h>
#include <asm/compat.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/exec.h> #include <asm/exec.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
...@@ -224,7 +225,7 @@ static void print_pstate(struct pt_regs *regs) ...@@ -224,7 +225,7 @@ static void print_pstate(struct pt_regs *regs)
{ {
u64 pstate = regs->pstate; u64 pstate = regs->pstate;
if (a32_user_mode(regs)) { if (compat_user_mode(regs)) {
printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
pstate, pstate,
pstate & PSR_AA32_N_BIT ? 'N' : 'n', pstate & PSR_AA32_N_BIT ? 'N' : 'n',
...@@ -258,7 +259,7 @@ void __show_regs(struct pt_regs *regs) ...@@ -258,7 +259,7 @@ void __show_regs(struct pt_regs *regs)
int i, top_reg; int i, top_reg;
u64 lr, sp; u64 lr, sp;
if (a32_user_mode(regs)) { if (compat_user_mode(regs)) {
lr = regs->compat_lr; lr = regs->compat_lr;
sp = regs->compat_sp; sp = regs->compat_sp;
top_reg = 12; top_reg = 12;
...@@ -309,7 +310,7 @@ static void tls_thread_flush(void) ...@@ -309,7 +310,7 @@ static void tls_thread_flush(void)
{ {
write_sysreg(0, tpidr_el0); write_sysreg(0, tpidr_el0);
if (is_a32_compat_task()) { if (is_compat_task()) {
current->thread.uw.tp_value = 0; current->thread.uw.tp_value = 0;
/* /*
...@@ -391,7 +392,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -391,7 +392,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
*task_user_tls(p) = read_sysreg(tpidr_el0); *task_user_tls(p) = read_sysreg(tpidr_el0);
if (stack_start) { if (stack_start) {
if (is_a32_compat_thread(task_thread_info(p))) if (is_compat_thread(task_thread_info(p)))
childregs->compat_sp = stack_start; childregs->compat_sp = stack_start;
else else
childregs->sp = stack_start; childregs->sp = stack_start;
...@@ -435,7 +436,7 @@ static void tls_thread_switch(struct task_struct *next) ...@@ -435,7 +436,7 @@ static void tls_thread_switch(struct task_struct *next)
{ {
tls_preserve_current_state(); tls_preserve_current_state();
if (is_a32_compat_thread(task_thread_info(next))) if (is_compat_thread(task_thread_info(next)))
write_sysreg(next->thread.uw.tp_value, tpidrro_el0); write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
else if (!arm64_kernel_unmapped_at_el0()) else if (!arm64_kernel_unmapped_at_el0())
write_sysreg(0, tpidrro_el0); write_sysreg(0, tpidrro_el0);
...@@ -481,7 +482,7 @@ static void ssbs_thread_switch(struct task_struct *next) ...@@ -481,7 +482,7 @@ static void ssbs_thread_switch(struct task_struct *next)
test_tsk_thread_flag(next, TIF_SSBD)) test_tsk_thread_flag(next, TIF_SSBD))
return; return;
if (a32_user_mode(regs)) if (compat_user_mode(regs))
set_compat_ssbs_bit(regs); set_compat_ssbs_bit(regs);
else if (user_mode(regs)) else if (user_mode(regs))
set_ssbs_bit(regs); set_ssbs_bit(regs);
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/tracehook.h> #include <linux/tracehook.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <asm/compat.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
...@@ -190,8 +191,8 @@ static void ptrace_hbptriggered(struct perf_event *bp, ...@@ -190,8 +191,8 @@ static void ptrace_hbptriggered(struct perf_event *bp,
info.si_code = TRAP_HWBKPT; info.si_code = TRAP_HWBKPT;
info.si_addr = (void __user *)(bkpt->trigger); info.si_addr = (void __user *)(bkpt->trigger);
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
if (is_a32_compat_task()) { if (is_compat_task()) {
int si_errno = 0; int si_errno = 0;
int i; int i;
...@@ -1245,10 +1246,6 @@ static const struct user_regset_view user_aarch64_view = { ...@@ -1245,10 +1246,6 @@ static const struct user_regset_view user_aarch64_view = {
}; };
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#include <linux/compat.h>
#endif
#ifdef CONFIG_AARCH32_EL0
enum compat_regset { enum compat_regset {
REGSET_COMPAT_GPR, REGSET_COMPAT_GPR,
REGSET_COMPAT_VFP, REGSET_COMPAT_VFP,
...@@ -1714,7 +1711,7 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, ...@@ -1714,7 +1711,7 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
} }
#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_HAVE_HW_BREAKPOINT */
static long compat_a32_ptrace(struct task_struct *child, compat_long_t request, long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata) compat_ulong_t caddr, compat_ulong_t cdata)
{ {
unsigned long addr = caddr; unsigned long addr = caddr;
...@@ -1791,35 +1788,20 @@ static long compat_a32_ptrace(struct task_struct *child, compat_long_t request, ...@@ -1791,35 +1788,20 @@ static long compat_a32_ptrace(struct task_struct *child, compat_long_t request,
return ret; return ret;
} }
#endif /* CONFIG_COMPAT */
#else
#define compat_a32_ptrace(child, request, caddr, cdata) (0)
#endif /* CONFIG_AARCH32_EL0 */
#ifdef CONFIG_COMPAT
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
if (is_a32_compat_task())
return compat_a32_ptrace(child, request, caddr, cdata);
/* ILP32 */
return compat_ptrace_request(child, request, caddr, cdata);
}
#endif
const struct user_regset_view *task_user_regset_view(struct task_struct *task) const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{ {
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
/* /*
* Core dumping of 32-bit tasks or compat ptrace requests must use the * Core dumping of 32-bit tasks or compat ptrace requests must use the
* user_aarch32_view compatible with arm32. Native ptrace requests on * user_aarch32_view compatible with arm32. Native ptrace requests on
* 32-bit children use an extended user_aarch32_ptrace_view to allow * 32-bit children use an extended user_aarch32_ptrace_view to allow
* access to the TLS register. * access to the TLS register.
*/ */
if (is_a32_compat_task()) if (is_compat_task())
return &user_aarch32_view; return &user_aarch32_view;
else if (is_a32_compat_thread(task_thread_info(task))) else if (is_compat_thread(task_thread_info(task)))
return &user_aarch32_ptrace_view; return &user_aarch32_ptrace_view;
#endif #endif
return &user_aarch64_view; return &user_aarch64_view;
...@@ -1846,7 +1828,7 @@ static void tracehook_report_syscall(struct pt_regs *regs, ...@@ -1846,7 +1828,7 @@ static void tracehook_report_syscall(struct pt_regs *regs,
* A scratch register (ip(r12) on AArch32, x7 on AArch64) is * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
* used to denote syscall entry/exit: * used to denote syscall entry/exit:
*/ */
regno = (is_a32_compat_task() ? 12 : 7); regno = (is_compat_task() ? 12 : 7);
saved_reg = regs->regs[regno]; saved_reg = regs->regs[regno];
regs->regs[regno] = dir; regs->regs[regno] = dir;
...@@ -1977,7 +1959,7 @@ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) ...@@ -1977,7 +1959,7 @@ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
/* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
user_regs_reset_single_step(regs, task); user_regs_reset_single_step(regs, task);
if (is_a32_compat_thread(task_thread_info(task))) if (is_compat_thread(task_thread_info(task)))
return valid_compat_regs(regs); return valid_compat_regs(regs);
else else
return valid_native_regs(regs); return valid_native_regs(regs);
......
...@@ -44,10 +44,6 @@ ...@@ -44,10 +44,6 @@
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/ras.h> #include <asm/ras.h>
#include <asm/signal_ilp32.h>
#define get_sigset(s, m) __copy_from_user(s, m, sizeof(*s))
#define put_sigset(s, m) __copy_to_user(m, s, sizeof(*s))
/* /*
* Do a signal return; undo the signal stack. These are aligned to 128-bit. * Do a signal return; undo the signal stack. These are aligned to 128-bit.
...@@ -56,12 +52,57 @@ struct rt_sigframe { ...@@ -56,12 +52,57 @@ struct rt_sigframe {
struct siginfo info; struct siginfo info;
struct ucontext uc; struct ucontext uc;
}; };
struct rt_sigframe_user_layout;
static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, struct frame_record {
struct rt_sigframe_user_layout *user, int usig); u64 fp;
u64 lr;
};
struct rt_sigframe_user_layout {
struct rt_sigframe __user *sigframe;
struct frame_record __user *next_frame;
unsigned long size; /* size of allocated sigframe data */
unsigned long limit; /* largest allowed size */
unsigned long fpsimd_offset;
unsigned long esr_offset;
unsigned long sve_offset;
unsigned long extra_offset;
unsigned long end_offset;
};
#define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
static void init_user_layout(struct rt_sigframe_user_layout *user)
{
const size_t reserved_size =
sizeof(user->sigframe->uc.uc_mcontext.__reserved);
memset(user, 0, sizeof(*user));
user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
user->limit = user->size + reserved_size;
user->limit -= TERMINATOR_SIZE;
user->limit -= EXTRA_CONTEXT_SIZE;
/* Reserve space for extension and terminator ^ */
}
#include <asm/signal_common.h> static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
{
return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
}
/*
* Sanity limit on the approximate maximum size of signal frame we'll
* try to generate. Stack alignment padding and the frame record are
* not taken into account. This limit is not a guarantee and is
* NOT ABI.
*/
#define SIGFRAME_MAXSZ SZ_64K
static int __sigframe_alloc(struct rt_sigframe_user_layout *user, static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
unsigned long *offset, size_t size, bool extend) unsigned long *offset, size_t size, bool extend)
...@@ -106,14 +147,14 @@ static int __sigframe_alloc(struct rt_sigframe_user_layout *user, ...@@ -106,14 +147,14 @@ static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
* signal frame. The offset from the signal frame base address to the * signal frame. The offset from the signal frame base address to the
* allocated block is assigned to *offset. * allocated block is assigned to *offset.
*/ */
int sigframe_alloc(struct rt_sigframe_user_layout *user, static int sigframe_alloc(struct rt_sigframe_user_layout *user,
unsigned long *offset, size_t size) unsigned long *offset, size_t size)
{ {
return __sigframe_alloc(user, offset, size, true); return __sigframe_alloc(user, offset, size, true);
} }
/* Allocate the null terminator record and prevent further allocations */ /* Allocate the null terminator record and prevent further allocations */
int sigframe_alloc_end(struct rt_sigframe_user_layout *user) static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
{ {
int ret; int ret;
...@@ -130,7 +171,7 @@ int sigframe_alloc_end(struct rt_sigframe_user_layout *user) ...@@ -130,7 +171,7 @@ int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
return 0; return 0;
} }
void __user *apply_user_offset( static void __user *apply_user_offset(
struct rt_sigframe_user_layout const *user, unsigned long offset) struct rt_sigframe_user_layout const *user, unsigned long offset)
{ {
char __user *base = (char __user *)user->sigframe; char __user *base = (char __user *)user->sigframe;
...@@ -138,7 +179,7 @@ void __user *apply_user_offset( ...@@ -138,7 +179,7 @@ void __user *apply_user_offset(
return base + offset; return base + offset;
} }
int preserve_fpsimd_context(struct fpsimd_context __user *ctx) static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
{ {
struct user_fpsimd_state const *fpsimd = struct user_fpsimd_state const *fpsimd =
&current->thread.uw.fpsimd_state; &current->thread.uw.fpsimd_state;
...@@ -156,7 +197,7 @@ int preserve_fpsimd_context(struct fpsimd_context __user *ctx) ...@@ -156,7 +197,7 @@ int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
return err ? -EFAULT : 0; return err ? -EFAULT : 0;
} }
int restore_fpsimd_context(struct fpsimd_context __user *ctx) static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
{ {
struct user_fpsimd_state fpsimd; struct user_fpsimd_state fpsimd;
__u32 magic, size; __u32 magic, size;
...@@ -185,9 +226,15 @@ int restore_fpsimd_context(struct fpsimd_context __user *ctx) ...@@ -185,9 +226,15 @@ int restore_fpsimd_context(struct fpsimd_context __user *ctx)
return err ? -EFAULT : 0; return err ? -EFAULT : 0;
} }
struct user_ctxs {
struct fpsimd_context __user *fpsimd;
struct sve_context __user *sve;
};
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
int preserve_sve_context(struct sve_context __user *ctx) static int preserve_sve_context(struct sve_context __user *ctx)
{ {
int err = 0; int err = 0;
u16 reserved[ARRAY_SIZE(ctx->__reserved)]; u16 reserved[ARRAY_SIZE(ctx->__reserved)];
...@@ -219,7 +266,7 @@ int preserve_sve_context(struct sve_context __user *ctx) ...@@ -219,7 +266,7 @@ int preserve_sve_context(struct sve_context __user *ctx)
return err ? -EFAULT : 0; return err ? -EFAULT : 0;
} }
int restore_sve_fpsimd_context(struct user_ctxs *user) static int restore_sve_fpsimd_context(struct user_ctxs *user)
{ {
int err; int err;
unsigned int vq; unsigned int vq;
...@@ -282,18 +329,25 @@ int restore_sve_fpsimd_context(struct user_ctxs *user) ...@@ -282,18 +329,25 @@ int restore_sve_fpsimd_context(struct user_ctxs *user)
return err ? -EFAULT : 0; return err ? -EFAULT : 0;
} }
#else /* ! CONFIG_ARM64_SVE */
/* Turn any non-optimised out attempts to use these into a link error: */
extern int preserve_sve_context(void __user *ctx);
extern int restore_sve_fpsimd_context(struct user_ctxs *user);
#endif /* ! CONFIG_ARM64_SVE */ #endif /* ! CONFIG_ARM64_SVE */
int __parse_user_sigcontext(struct user_ctxs *user,
struct sigcontext __user const *sc, static int parse_user_sigframe(struct user_ctxs *user,
void __user const *sigframe_base) struct rt_sigframe __user *sf)
{ {
struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
struct _aarch64_ctx __user *head; struct _aarch64_ctx __user *head;
char __user *base = (char __user *)&sc->__reserved; char __user *base = (char __user *)&sc->__reserved;
size_t offset = 0; size_t offset = 0;
size_t limit = sizeof(sc->__reserved); size_t limit = sizeof(sc->__reserved);
bool have_extra_context = false; bool have_extra_context = false;
char const __user *const sfp = (char const __user *)sigframe_base; char const __user *const sfp = (char const __user *)sf;
user->fpsimd = NULL; user->fpsimd = NULL;
user->sve = NULL; user->sve = NULL;
...@@ -442,11 +496,81 @@ int __parse_user_sigcontext(struct user_ctxs *user, ...@@ -442,11 +496,81 @@ int __parse_user_sigcontext(struct user_ctxs *user,
return -EINVAL; return -EINVAL;
} }
static int restore_sigframe(struct pt_regs *regs,
struct rt_sigframe __user *sf)
{
sigset_t set;
int i, err;
struct user_ctxs user;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (err == 0)
set_current_blocked(&set);
for (i = 0; i < 31; i++)
__get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
err);
__get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
__get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
__get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
/*
* Avoid sys_rt_sigreturn() restarting.
*/
forget_syscall(regs);
err |= !valid_user_regs(&regs->user_regs, current);
if (err == 0)
err = parse_user_sigframe(&user, sf);
if (err == 0) {
if (!user.fpsimd)
return -EINVAL;
if (user.sve) {
if (!system_supports_sve())
return -EINVAL;
err = restore_sve_fpsimd_context(&user);
} else {
err = restore_fpsimd_context(user.fpsimd);
}
}
return err;
}
SYSCALL_DEFINE0(rt_sigreturn) SYSCALL_DEFINE0(rt_sigreturn)
{ {
struct pt_regs *regs = current_pt_regs(); struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 128-bit boundary, then 'sp' should
* be word aligned here.
*/
if (regs->sp & 15)
goto badframe;
frame = (struct rt_sigframe __user *)regs->sp;
if (!access_ok(frame, sizeof (*frame)))
goto badframe;
if (restore_sigframe(regs, frame))
goto badframe;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return regs->regs[0];
return __sys_rt_sigreturn(regs); badframe:
arm64_notify_segfault(regs->sp);
return 0;
} }
/* /*
...@@ -456,7 +580,8 @@ SYSCALL_DEFINE0(rt_sigreturn) ...@@ -456,7 +580,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
* this task; otherwise, generates a layout for the current state * this task; otherwise, generates a layout for the current state
* of the task. * of the task.
*/ */
int setup_sigframe_layout(struct rt_sigframe_user_layout *user, bool add_all) static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
bool add_all)
{ {
int err; int err;
...@@ -494,28 +619,73 @@ int setup_sigframe_layout(struct rt_sigframe_user_layout *user, bool add_all) ...@@ -494,28 +619,73 @@ int setup_sigframe_layout(struct rt_sigframe_user_layout *user, bool add_all)
return sigframe_alloc_end(user); return sigframe_alloc_end(user);
} }
int setup_extra_context(char __user *sfp, unsigned long sf_size, static int setup_sigframe(struct rt_sigframe_user_layout *user,
char __user *extrap) struct pt_regs *regs, sigset_t *set)
{ {
int err = 0; int i, err = 0;
struct rt_sigframe __user *sf = user->sigframe;
/* set up the stack frame for unwinding */
__put_user_error(regs->regs[29], &user->next_frame->fp, err);
__put_user_error(regs->regs[30], &user->next_frame->lr, err);
for (i = 0; i < 31; i++)
__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
err);
__put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
__put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
__put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
if (err == 0) {
struct fpsimd_context __user *fpsimd_ctx =
apply_user_offset(user, user->fpsimd_offset);
err |= preserve_fpsimd_context(fpsimd_ctx);
}
/* fault information, if valid */
if (err == 0 && user->esr_offset) {
struct esr_context __user *esr_ctx =
apply_user_offset(user, user->esr_offset);
__put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
__put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
}
/* Scalable Vector Extension state, if present */
if (system_supports_sve() && err == 0 && user->sve_offset) {
struct sve_context __user *sve_ctx =
apply_user_offset(user, user->sve_offset);
err |= preserve_sve_context(sve_ctx);
}
if (err == 0 && user->extra_offset) {
char __user *sfp = (char __user *)user->sigframe;
char __user *userp =
apply_user_offset(user, user->extra_offset);
struct extra_context __user *extra; struct extra_context __user *extra;
struct _aarch64_ctx __user *end; struct _aarch64_ctx __user *end;
u64 extra_datap; u64 extra_datap;
u32 extra_size; u32 extra_size;
extra = (struct extra_context __user *)extrap; extra = (struct extra_context __user *)userp;
extrap += EXTRA_CONTEXT_SIZE; userp += EXTRA_CONTEXT_SIZE;
end = (struct _aarch64_ctx __user *)extrap; end = (struct _aarch64_ctx __user *)userp;
extrap += TERMINATOR_SIZE; userp += TERMINATOR_SIZE;
/* /*
* extra_datap is just written to the signal frame. * extra_datap is just written to the signal frame.
* The value gets cast back to a void __user * * The value gets cast back to a void __user *
* during sigreturn. * during sigreturn.
*/ */
extra_datap = (__force u64)extrap; extra_datap = (__force u64)userp;
extra_size = sfp + round_up(sf_size, 16) - extrap; extra_size = sfp + round_up(user->size, 16) - userp;
__put_user_error(EXTRA_MAGIC, &extra->head.magic, err); __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
__put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
...@@ -525,18 +695,46 @@ int setup_extra_context(char __user *sfp, unsigned long sf_size, ...@@ -525,18 +695,46 @@ int setup_extra_context(char __user *sfp, unsigned long sf_size,
/* Add the terminator */ /* Add the terminator */
__put_user_error(0, &end->magic, err); __put_user_error(0, &end->magic, err);
__put_user_error(0, &end->size, err); __put_user_error(0, &end->size, err);
}
/* set the "end" magic */
if (err == 0) {
struct _aarch64_ctx __user *end =
apply_user_offset(user, user->end_offset);
__put_user_error(0, &end->magic, err);
__put_user_error(0, &end->size, err);
}
return err; return err;
} }
void __setup_return(struct pt_regs *regs, struct k_sigaction *ka, static int get_sigframe(struct rt_sigframe_user_layout *user,
struct rt_sigframe_user_layout *user, int usig) struct ksignal *ksig, struct pt_regs *regs)
{ {
regs->regs[0] = usig; unsigned long sp, sp_top;
regs->sp = (unsigned long)user->sigframe; int err;
regs->regs[29] = (unsigned long)&user->next_frame->fp;
regs->pc = (unsigned long)ka->sa.sa_handler; init_user_layout(user);
err = setup_sigframe_layout(user, false);
if (err)
return err;
sp = sp_top = sigsp(regs->sp, ksig);
sp = round_down(sp - sizeof(struct frame_record), 16);
user->next_frame = (struct frame_record __user *)sp;
sp = round_down(sp, 16) - sigframe_size(user);
user->sigframe = (struct rt_sigframe __user *)sp;
/*
* Check that we can actually write to the signal frame.
*/
if (!access_ok(user->sigframe, sp_top - sp))
return -EFAULT;
return 0;
} }
static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
...@@ -544,7 +742,10 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, ...@@ -544,7 +742,10 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
{ {
__sigrestore_t sigtramp; __sigrestore_t sigtramp;
__setup_return(regs, ka, user, usig); regs->regs[0] = usig;
regs->sp = (unsigned long)user->sigframe;
regs->regs[29] = (unsigned long)&user->next_frame->fp;
regs->pc = (unsigned long)ka->sa.sa_handler;
if (ka->sa.sa_flags & SA_RESTORER) if (ka->sa.sa_flags & SA_RESTORER)
sigtramp = ka->sa.sa_restorer; sigtramp = ka->sa.sa_restorer;
...@@ -557,13 +758,38 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, ...@@ -557,13 +758,38 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs) struct pt_regs *regs)
{ {
return __setup_rt_frame(usig, ksig, set, regs); struct rt_sigframe_user_layout user;
struct rt_sigframe __user *frame;
int err = 0;
fpsimd_signal_preserve_current_state();
if (get_sigframe(&user, ksig, regs))
return 1;
frame = user.sigframe;
__put_user_error(0, &frame->uc.uc_flags, err);
__put_user_error(NULL, &frame->uc.uc_link, err);
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
err |= setup_sigframe(&user, regs, set);
if (err == 0) {
setup_return(regs, &ksig->ka, &user, usig);
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
regs->regs[1] = (unsigned long)&frame->info;
regs->regs[2] = (unsigned long)&frame->uc;
}
}
return err;
} }
static void setup_restart_syscall(struct pt_regs *regs) static void setup_restart_syscall(struct pt_regs *regs)
{ {
if (is_a32_compat_task()) if (is_compat_task())
a32_setup_restart_syscall(regs); compat_setup_restart_syscall(regs);
else else
regs->regs[8] = __NR_restart_syscall; regs->regs[8] = __NR_restart_syscall;
} }
...@@ -582,13 +808,11 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -582,13 +808,11 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/* /*
* Set up the stack frame * Set up the stack frame
*/ */
if (is_a32_compat_task()) { if (is_compat_task()) {
if (ksig->ka.sa.sa_flags & SA_SIGINFO) if (ksig->ka.sa.sa_flags & SA_SIGINFO)
ret = a32_setup_rt_frame(usig, ksig, oldset, regs); ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
else else
ret = a32_setup_frame(usig, ksig, oldset, regs); ret = compat_setup_frame(usig, ksig, oldset, regs);
} else if (is_ilp32_compat_task()) {
ret = ilp32_setup_rt_frame(usig, ksig, oldset, regs);
} else { } else {
ret = setup_rt_frame(usig, ksig, oldset, regs); ret = setup_rt_frame(usig, ksig, oldset, regs);
} }
...@@ -623,7 +847,7 @@ static void do_signal(struct pt_regs *regs) ...@@ -623,7 +847,7 @@ static void do_signal(struct pt_regs *regs)
*/ */
if (syscall) { if (syscall) {
continue_addr = regs->pc; continue_addr = regs->pc;
restart_addr = continue_addr - (a32_thumb_mode(regs) ? 2 : 4); restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
retval = regs->regs[0]; retval = regs->regs[0];
/* /*
......
...@@ -27,11 +27,10 @@ ...@@ -27,11 +27,10 @@
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/signal32.h> #include <asm/signal32.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/signal32_common.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
struct a32_sigcontext { struct compat_sigcontext {
/* We always set these two fields to 0 */ /* We always set these two fields to 0 */
compat_ulong_t trap_no; compat_ulong_t trap_no;
compat_ulong_t error_code; compat_ulong_t error_code;
...@@ -57,17 +56,17 @@ struct a32_sigcontext { ...@@ -57,17 +56,17 @@ struct a32_sigcontext {
compat_ulong_t fault_address; compat_ulong_t fault_address;
}; };
struct a32_ucontext { struct compat_ucontext {
compat_ulong_t uc_flags; compat_ulong_t uc_flags;
compat_uptr_t uc_link; compat_uptr_t uc_link;
compat_stack_t uc_stack; compat_stack_t uc_stack;
struct a32_sigcontext uc_mcontext; struct compat_sigcontext uc_mcontext;
compat_sigset_t uc_sigmask; compat_sigset_t uc_sigmask;
int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))]; int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))];
compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8))); compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8)));
}; };
struct a32_vfp_sigframe { struct compat_vfp_sigframe {
compat_ulong_t magic; compat_ulong_t magic;
compat_ulong_t size; compat_ulong_t size;
struct compat_user_vfp { struct compat_user_vfp {
...@@ -82,34 +81,56 @@ struct a32_vfp_sigframe { ...@@ -82,34 +81,56 @@ struct a32_vfp_sigframe {
} __attribute__((__aligned__(8))); } __attribute__((__aligned__(8)));
#define VFP_MAGIC 0x56465001 #define VFP_MAGIC 0x56465001
#define VFP_STORAGE_SIZE sizeof(struct a32_vfp_sigframe) #define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe)
#define FSR_WRITE_SHIFT (11) #define FSR_WRITE_SHIFT (11)
struct a32_aux_sigframe { struct compat_aux_sigframe {
struct a32_vfp_sigframe vfp; struct compat_vfp_sigframe vfp;
/* Something that isn't a valid magic number for any coprocessor. */ /* Something that isn't a valid magic number for any coprocessor. */
unsigned long end_magic; unsigned long end_magic;
} __attribute__((__aligned__(8))); } __attribute__((__aligned__(8)));
struct a32_sigframe { struct compat_sigframe {
struct a32_ucontext uc; struct compat_ucontext uc;
compat_ulong_t retcode[2]; compat_ulong_t retcode[2];
}; };
struct a32_rt_sigframe { struct compat_rt_sigframe {
struct compat_siginfo info; struct compat_siginfo info;
struct a32_sigframe sig; struct compat_sigframe sig;
}; };
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
compat_sigset_t cset;
cset.sig[0] = set->sig[0] & 0xffffffffull;
cset.sig[1] = set->sig[0] >> 32;
return copy_to_user(uset, &cset, sizeof(*uset));
}
static inline int get_sigset_t(sigset_t *set,
const compat_sigset_t __user *uset)
{
compat_sigset_t s32;
if (copy_from_user(&s32, uset, sizeof(*uset)))
return -EFAULT;
set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
return 0;
}
/* /*
* VFP save/restore code. * VFP save/restore code.
* *
* We have to be careful with endianness, since the fpsimd context-switch * We have to be careful with endianness, since the fpsimd context-switch
* code operates on 128-bit (Q) register values whereas the a32 ABI * code operates on 128-bit (Q) register values whereas the compat ABI
* uses an array of 64-bit (D) registers. Consequently, we need to swap * uses an array of 64-bit (D) registers. Consequently, we need to swap
* the two halves of each Q register when running on a big-endian CPU. * the two halves of each Q register when running on a big-endian CPU.
*/ */
...@@ -126,7 +147,7 @@ union __fpsimd_vreg { ...@@ -126,7 +147,7 @@ union __fpsimd_vreg {
}; };
}; };
static int a32_preserve_vfp_context(struct a32_vfp_sigframe __user *frame) static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
{ {
struct user_fpsimd_state const *fpsimd = struct user_fpsimd_state const *fpsimd =
&current->thread.uw.fpsimd_state; &current->thread.uw.fpsimd_state;
...@@ -176,7 +197,7 @@ static int a32_preserve_vfp_context(struct a32_vfp_sigframe __user *frame) ...@@ -176,7 +197,7 @@ static int a32_preserve_vfp_context(struct a32_vfp_sigframe __user *frame)
return err ? -EFAULT : 0; return err ? -EFAULT : 0;
} }
static int a32_restore_vfp_context(struct a32_vfp_sigframe __user *frame) static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
{ {
struct user_fpsimd_state fpsimd; struct user_fpsimd_state fpsimd;
compat_ulong_t magic = VFP_MAGIC; compat_ulong_t magic = VFP_MAGIC;
...@@ -216,12 +237,12 @@ static int a32_restore_vfp_context(struct a32_vfp_sigframe __user *frame) ...@@ -216,12 +237,12 @@ static int a32_restore_vfp_context(struct a32_vfp_sigframe __user *frame)
return err ? -EFAULT : 0; return err ? -EFAULT : 0;
} }
static int a32_restore_sigframe(struct pt_regs *regs, static int compat_restore_sigframe(struct pt_regs *regs,
struct a32_sigframe __user *sf) struct compat_sigframe __user *sf)
{ {
int err; int err;
sigset_t set; sigset_t set;
struct a32_aux_sigframe __user *aux; struct compat_aux_sigframe __user *aux;
unsigned long psr; unsigned long psr;
err = get_sigset_t(&set, &sf->uc.uc_sigmask); err = get_sigset_t(&set, &sf->uc.uc_sigmask);
...@@ -257,9 +278,9 @@ static int a32_restore_sigframe(struct pt_regs *regs, ...@@ -257,9 +278,9 @@ static int a32_restore_sigframe(struct pt_regs *regs,
err |= !valid_user_regs(&regs->user_regs, current); err |= !valid_user_regs(&regs->user_regs, current);
aux = (struct a32_aux_sigframe __user *) sf->uc.uc_regspace; aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
if (err == 0) if (err == 0)
err |= a32_restore_vfp_context(&aux->vfp); err |= compat_restore_vfp_context(&aux->vfp);
return err; return err;
} }
...@@ -267,7 +288,7 @@ static int a32_restore_sigframe(struct pt_regs *regs, ...@@ -267,7 +288,7 @@ static int a32_restore_sigframe(struct pt_regs *regs,
COMPAT_SYSCALL_DEFINE0(sigreturn) COMPAT_SYSCALL_DEFINE0(sigreturn)
{ {
struct pt_regs *regs = current_pt_regs(); struct pt_regs *regs = current_pt_regs();
struct a32_sigframe __user *frame; struct compat_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */ /* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall; current->restart_block.fn = do_no_restart_syscall;
...@@ -280,12 +301,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) ...@@ -280,12 +301,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
if (regs->compat_sp & 7) if (regs->compat_sp & 7)
goto badframe; goto badframe;
frame = (struct a32_sigframe __user *)regs->compat_sp; frame = (struct compat_sigframe __user *)regs->compat_sp;
if (!access_ok(frame, sizeof (*frame))) if (!access_ok(frame, sizeof (*frame)))
goto badframe; goto badframe;
if (a32_restore_sigframe(regs, frame)) if (compat_restore_sigframe(regs, frame))
goto badframe; goto badframe;
return regs->regs[0]; return regs->regs[0];
...@@ -298,7 +319,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) ...@@ -298,7 +319,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
COMPAT_SYSCALL_DEFINE0(rt_sigreturn) COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{ {
struct pt_regs *regs = current_pt_regs(); struct pt_regs *regs = current_pt_regs();
struct a32_rt_sigframe __user *frame; struct compat_rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */ /* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall; current->restart_block.fn = do_no_restart_syscall;
...@@ -311,12 +332,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) ...@@ -311,12 +332,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
if (regs->compat_sp & 7) if (regs->compat_sp & 7)
goto badframe; goto badframe;
frame = (struct a32_rt_sigframe __user *)regs->compat_sp; frame = (struct compat_rt_sigframe __user *)regs->compat_sp;
if (!access_ok(frame, sizeof (*frame))) if (!access_ok(frame, sizeof (*frame)))
goto badframe; goto badframe;
if (a32_restore_sigframe(regs, &frame->sig)) if (compat_restore_sigframe(regs, &frame->sig))
goto badframe; goto badframe;
if (compat_restore_altstack(&frame->sig.uc.uc_stack)) if (compat_restore_altstack(&frame->sig.uc.uc_stack))
...@@ -329,7 +350,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) ...@@ -329,7 +350,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
return 0; return 0;
} }
static void __user *a32_get_sigframe(struct ksignal *ksig, static void __user *compat_get_sigframe(struct ksignal *ksig,
struct pt_regs *regs, struct pt_regs *regs,
int framesize) int framesize)
{ {
...@@ -350,7 +371,7 @@ static void __user *a32_get_sigframe(struct ksignal *ksig, ...@@ -350,7 +371,7 @@ static void __user *a32_get_sigframe(struct ksignal *ksig,
return frame; return frame;
} }
static void a32_setup_return(struct pt_regs *regs, struct k_sigaction *ka, static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
compat_ulong_t __user *rc, void __user *frame, compat_ulong_t __user *rc, void __user *frame,
int usig) int usig)
{ {
...@@ -394,10 +415,10 @@ static void a32_setup_return(struct pt_regs *regs, struct k_sigaction *ka, ...@@ -394,10 +415,10 @@ static void a32_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
regs->pstate = spsr; regs->pstate = spsr;
} }
static int a32_setup_sigframe(struct a32_sigframe __user *sf, static int compat_setup_sigframe(struct compat_sigframe __user *sf,
struct pt_regs *regs, sigset_t *set) struct pt_regs *regs, sigset_t *set)
{ {
struct a32_aux_sigframe __user *aux; struct compat_aux_sigframe __user *aux;
unsigned long psr = pstate_to_compat_psr(regs->pstate); unsigned long psr = pstate_to_compat_psr(regs->pstate);
int err = 0; int err = 0;
...@@ -420,7 +441,7 @@ static int a32_setup_sigframe(struct a32_sigframe __user *sf, ...@@ -420,7 +441,7 @@ static int a32_setup_sigframe(struct a32_sigframe __user *sf,
__put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err); __put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
/* set the aarch32 FSR WnR */ /* set the compat FSR WnR */
__put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) << __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) <<
FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err); FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err);
__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
...@@ -428,25 +449,25 @@ static int a32_setup_sigframe(struct a32_sigframe __user *sf, ...@@ -428,25 +449,25 @@ static int a32_setup_sigframe(struct a32_sigframe __user *sf,
err |= put_sigset_t(&sf->uc.uc_sigmask, set); err |= put_sigset_t(&sf->uc.uc_sigmask, set);
aux = (struct a32_aux_sigframe __user *) sf->uc.uc_regspace; aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
if (err == 0) if (err == 0)
err |= a32_preserve_vfp_context(&aux->vfp); err |= compat_preserve_vfp_context(&aux->vfp);
__put_user_error(0, &aux->end_magic, err); __put_user_error(0, &aux->end_magic, err);
return err; return err;
} }
/* /*
* aarch32-bit signal handling routines called from signal.c * 32-bit signal handling routines called from signal.c
*/ */
int a32_setup_rt_frame(int usig, struct ksignal *ksig, int compat_setup_rt_frame(int usig, struct ksignal *ksig,
sigset_t *set, struct pt_regs *regs) sigset_t *set, struct pt_regs *regs)
{ {
struct a32_rt_sigframe __user *frame; struct compat_rt_sigframe __user *frame;
int err = 0; int err = 0;
frame = a32_get_sigframe(ksig, regs, sizeof(*frame)); frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
if (!frame) if (!frame)
return 1; return 1;
...@@ -458,10 +479,10 @@ int a32_setup_rt_frame(int usig, struct ksignal *ksig, ...@@ -458,10 +479,10 @@ int a32_setup_rt_frame(int usig, struct ksignal *ksig,
err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp); err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp);
err |= a32_setup_sigframe(&frame->sig, regs, set); err |= compat_setup_sigframe(&frame->sig, regs, set);
if (err == 0) { if (err == 0) {
a32_setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig); compat_setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig);
regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info; regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info;
regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc; regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc;
} }
...@@ -469,27 +490,27 @@ int a32_setup_rt_frame(int usig, struct ksignal *ksig, ...@@ -469,27 +490,27 @@ int a32_setup_rt_frame(int usig, struct ksignal *ksig,
return err; return err;
} }
int a32_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct a32_sigframe __user *frame; struct compat_sigframe __user *frame;
int err = 0; int err = 0;
frame = a32_get_sigframe(ksig, regs, sizeof(*frame)); frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
if (!frame) if (!frame)
return 1; return 1;
__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
err |= a32_setup_sigframe(frame, regs, set); err |= compat_setup_sigframe(frame, regs, set);
if (err == 0) if (err == 0)
a32_setup_return(regs, &ksig->ka, frame->retcode, frame, usig); compat_setup_return(regs, &ksig->ka, frame->retcode, frame, usig);
return err; return err;
} }
void a32_setup_restart_syscall(struct pt_regs *regs) void compat_setup_restart_syscall(struct pt_regs *regs)
{ {
regs->regs[7] = __NR_compat_restart_syscall; regs->regs[7] = __NR_compat_restart_syscall;
} }
// SPDX-License-Identifier: GPL-2.0+
/*
* Based on arch/arm/kernel/signal.c
*
* Copyright (C) 1995-2009 Russell King
* Copyright (C) 2012 ARM Ltd.
* Modified by Will Deacon <will.deacon@arm.com>
*/
#include <linux/compat.h>
#include <linux/signal.h>
#include <linux/uaccess.h>
#include <asm/signal32_common.h>
#include <asm/unistd.h>
int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
compat_sigset_t cset;
cset.sig[0] = set->sig[0] & 0xffffffffull;
cset.sig[1] = set->sig[0] >> 32;
return copy_to_user(uset, &cset, sizeof(*uset));
}
int get_sigset_t(sigset_t *set, const compat_sigset_t __user *uset)
{
compat_sigset_t s32;
if (copy_from_user(&s32, uset, sizeof(*uset)))
return -EFAULT;
set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
return 0;
}
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 1995-2009 Russell King
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2018 Cavium Networks.
* Yury Norov <ynorov@caviumnetworks.com>
*/
#include <linux/compat.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <asm/fpsimd.h>
#include <asm/unistd.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
#include <asm/signal_ilp32.h>
#include <asm/signal32_common.h>
#define get_sigset(s, m) get_sigset_t(s, m)
#define put_sigset(s, m) put_sigset_t(m, s)
#define restore_altstack(stack) compat_restore_altstack(stack)
#define __save_altstack(stack, sp) __compat_save_altstack(stack, sp)
#define copy_siginfo_to_user(frame_info, ksig_info) \
copy_siginfo_to_user32(frame_info, ksig_info)
#define setup_return(regs, ka, user_layout, usig) \
{ \
__setup_return(regs, ka, user_layout, usig); \
regs->regs[30] = \
(unsigned long)VDSO_SYMBOL(current->mm->context.vdso, \
sigtramp_ilp32); \
}
struct ilp32_ucontext {
u32 uc_flags;
u32 uc_link;
compat_stack_t uc_stack;
compat_sigset_t uc_sigmask;
/* glibc uses a 1024-bit sigset_t */
__u8 __unused[1024 / 8 - sizeof(compat_sigset_t)];
/* last for future expansion */
struct sigcontext uc_mcontext;
};
struct rt_sigframe {
struct compat_siginfo info;
struct ilp32_ucontext uc;
};
#include <asm/signal_common.h>
COMPAT_SYSCALL_DEFINE0(ilp32_rt_sigreturn)
{
struct pt_regs *regs = current_pt_regs();
return __sys_rt_sigreturn(regs);
}
int ilp32_setup_rt_frame(int usig, struct ksignal *ksig,
sigset_t *set, struct pt_regs *regs)
{
return __setup_rt_frame(usig, ksig, set, regs);
}
...@@ -31,6 +31,108 @@ ...@@ -31,6 +31,108 @@
asmlinkage long compat_sys_sigreturn(void); asmlinkage long compat_sys_sigreturn(void);
asmlinkage long compat_sys_rt_sigreturn(void); asmlinkage long compat_sys_rt_sigreturn(void);
COMPAT_SYSCALL_DEFINE3(aarch32_statfs64, const char __user *, pathname,
compat_size_t, sz, struct compat_statfs64 __user *, buf)
{
/*
* 32-bit ARM applies an OABI compatibility fixup to statfs64 and
* fstatfs64 regardless of whether OABI is in use, and therefore
* arbitrary binaries may rely upon it, so we must do the same.
* For more details, see commit:
*
* 713c481519f19df9 ("[ARM] 3108/2: old ABI compat: statfs64 and
* fstatfs64")
*/
if (sz == 88)
sz = 84;
return kcompat_sys_statfs64(pathname, sz, buf);
}
COMPAT_SYSCALL_DEFINE3(aarch32_fstatfs64, unsigned int, fd, compat_size_t, sz,
struct compat_statfs64 __user *, buf)
{
/* see aarch32_statfs64 */
if (sz == 88)
sz = 84;
return kcompat_sys_fstatfs64(fd, sz, buf);
}
/*
* Note: off_4k is always in units of 4K. If we can't do the
* requested offset because it is not page-aligned, we return -EINVAL.
*/
COMPAT_SYSCALL_DEFINE6(aarch32_mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, off_4k)
{
if (off_4k & (~PAGE_MASK >> 12))
return -EINVAL;
off_4k >>= (PAGE_SHIFT - 12);
return ksys_mmap_pgoff(addr, len, prot, flags, fd, off_4k);
}
#ifdef CONFIG_CPU_BIG_ENDIAN
#define arg_u32p(name) u32, name##_hi, u32, name##_lo
#else
#define arg_u32p(name) u32, name##_lo, u32, name##_hi
#endif
#define arg_u64(name) (((u64)name##_hi << 32) | name##_lo)
COMPAT_SYSCALL_DEFINE6(aarch32_pread64, unsigned int, fd, char __user *, buf,
size_t, count, u32, __pad, arg_u32p(pos))
{
return ksys_pread64(fd, buf, count, arg_u64(pos));
}
COMPAT_SYSCALL_DEFINE6(aarch32_pwrite64, unsigned int, fd,
const char __user *, buf, size_t, count, u32, __pad,
arg_u32p(pos))
{
return ksys_pwrite64(fd, buf, count, arg_u64(pos));
}
COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname,
u32, __pad, arg_u32p(length))
{
return ksys_truncate(pathname, arg_u64(length));
}
COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad,
arg_u32p(length))
{
return ksys_ftruncate(fd, arg_u64(length));
}
COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad,
arg_u32p(offset), size_t, count)
{
return ksys_readahead(fd, arg_u64(offset), count);
}
COMPAT_SYSCALL_DEFINE6(aarch32_fadvise64_64, int, fd, int, advice,
arg_u32p(offset), arg_u32p(len))
{
return ksys_fadvise64_64(fd, arg_u64(offset), arg_u64(len), advice);
}
COMPAT_SYSCALL_DEFINE6(aarch32_sync_file_range2, int, fd, unsigned int, flags,
arg_u32p(offset), arg_u32p(nbytes))
{
return ksys_sync_file_range(fd, arg_u64(offset), arg_u64(nbytes),
flags);
}
COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
arg_u32p(offset), arg_u32p(len))
{
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
}
#undef __SYSCALL #undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd32.h> #include <asm/unistd32.h>
...@@ -38,7 +140,7 @@ asmlinkage long compat_sys_rt_sigreturn(void); ...@@ -38,7 +140,7 @@ asmlinkage long compat_sys_rt_sigreturn(void);
#undef __SYSCALL #undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = __arm64_##sym, #define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t a32_sys_call_table[__NR_compat_syscalls] = { const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
[0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall, [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd32.h> #include <asm/unistd32.h>
}; };
// SPDX-License-Identifier: GPL-2.0+
#include <linux/compat.h>
#include <linux/syscalls.h>
COMPAT_SYSCALL_DEFINE3(aarch32_statfs64, const char __user *, pathname,
compat_size_t, sz, struct compat_statfs64 __user *, buf)
{
/*
* 32-bit ARM applies an OABI compatibility fixup to statfs64 and
* fstatfs64 regardless of whether OABI is in use, and therefore
* arbitrary binaries may rely upon it, so we must do the same.
* For more details, see commit:
*
* 713c481519f19df9 ("[ARM] 3108/2: old ABI compat: statfs64 and
* fstatfs64")
*/
if (sz == 88)
sz = 84;
return kcompat_sys_statfs64(pathname, sz, buf);
}
COMPAT_SYSCALL_DEFINE3(aarch32_fstatfs64, unsigned int, fd, compat_size_t, sz,
struct compat_statfs64 __user *, buf)
{
/* see aarch32_statfs64 */
if (sz == 88)
sz = 84;
return kcompat_sys_fstatfs64(fd, sz, buf);
}
/*
* Note: off_4k is always in units of 4K. If we can't do the
* requested offset because it is not page-aligned, we return -EINVAL.
*/
COMPAT_SYSCALL_DEFINE6(aarch32_mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, off_4k)
{
if (off_4k & (~PAGE_MASK >> 12))
return -EINVAL;
off_4k >>= (PAGE_SHIFT - 12);
return ksys_mmap_pgoff(addr, len, prot, flags, fd, off_4k);
}
#ifdef CONFIG_CPU_BIG_ENDIAN
#define arg_u32p(name) u32, name##_hi, u32, name##_lo
#else
#define arg_u32p(name) u32, name##_lo, u32, name##_hi
#endif
#define arg_u64(name) (((u64)name##_hi << 32) | name##_lo)
COMPAT_SYSCALL_DEFINE6(aarch32_pread64, unsigned int, fd, char __user *, buf,
size_t, count, u32, __pad, arg_u32p(pos))
{
return ksys_pread64(fd, buf, count, arg_u64(pos));
}
COMPAT_SYSCALL_DEFINE6(aarch32_pwrite64, unsigned int, fd,
const char __user *, buf, size_t, count, u32, __pad,
arg_u32p(pos))
{
return ksys_pwrite64(fd, buf, count, arg_u64(pos));
}
COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname,
u32, __pad, arg_u32p(length))
{
return ksys_truncate(pathname, arg_u64(length));
}
COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad,
arg_u32p(length))
{
return ksys_ftruncate(fd, arg_u64(length));
}
COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad,
arg_u32p(offset), size_t, count)
{
return ksys_readahead(fd, arg_u64(offset), count);
}
COMPAT_SYSCALL_DEFINE6(aarch32_fadvise64_64, int, fd, int, advice,
arg_u32p(offset), arg_u32p(len))
{
return ksys_fadvise64_64(fd, arg_u64(offset), arg_u64(len), advice);
}
COMPAT_SYSCALL_DEFINE6(aarch32_sync_file_range2, int, fd, unsigned int, flags,
arg_u32p(offset), arg_u32p(nbytes))
{
return ksys_sync_file_range(fd, arg_u64(offset), arg_u64(nbytes),
flags);
}
COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
arg_u32p(offset), arg_u32p(len))
{
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
}
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
static long static long
__do_a32_cache_op(unsigned long start, unsigned long end) __do_compat_cache_op(unsigned long start, unsigned long end)
{ {
long ret; long ret;
...@@ -64,7 +64,7 @@ __do_a32_cache_op(unsigned long start, unsigned long end) ...@@ -64,7 +64,7 @@ __do_a32_cache_op(unsigned long start, unsigned long end)
} }
static inline long static inline long
do_a32_cache_op(unsigned long start, unsigned long end, int flags) do_compat_cache_op(unsigned long start, unsigned long end, int flags)
{ {
if (end < start || flags) if (end < start || flags)
return -EINVAL; return -EINVAL;
...@@ -72,12 +72,12 @@ do_a32_cache_op(unsigned long start, unsigned long end, int flags) ...@@ -72,12 +72,12 @@ do_a32_cache_op(unsigned long start, unsigned long end, int flags)
if (!access_ok((const void __user *)start, end - start)) if (!access_ok((const void __user *)start, end - start))
return -EFAULT; return -EFAULT;
return __do_a32_cache_op(start, end); return __do_compat_cache_op(start, end);
} }
/* /*
* Handle all unrecognised system calls. * Handle all unrecognised system calls.
*/ */
long a32_arm_syscall(struct pt_regs *regs, int scno) long compat_arm_syscall(struct pt_regs *regs, int scno)
{ {
siginfo_t info; siginfo_t info;
...@@ -97,7 +97,7 @@ long a32_arm_syscall(struct pt_regs *regs, int scno) ...@@ -97,7 +97,7 @@ long a32_arm_syscall(struct pt_regs *regs, int scno)
* the specified region). * the specified region).
*/ */
case __ARM_NR_compat_cacheflush: case __ARM_NR_compat_cacheflush:
return do_a32_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
case __ARM_NR_compat_set_tls: case __ARM_NR_compat_set_tls:
current->thread.uw.tp_value = regs->regs[0]; current->thread.uw.tp_value = regs->regs[0];
...@@ -127,7 +127,7 @@ long a32_arm_syscall(struct pt_regs *regs, int scno) ...@@ -127,7 +127,7 @@ long a32_arm_syscall(struct pt_regs *regs, int scno)
info.si_errno = 0; info.si_errno = 0;
info.si_code = ILL_ILLTRP; info.si_code = ILL_ILLTRP;
info.si_addr = (void __user *)instruction_pointer(regs) - info.si_addr = (void __user *)instruction_pointer(regs) -
(a32_thumb_mode(regs) ? 2 : 4); (compat_thumb_mode(regs) ? 2 : 4);
arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, scno); arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, scno);
return 0; return 0;
......
// SPDX-License-Identifier: GPL-2.0+
/*
* AArch64- ILP32 specific system calls implementation
* Copyright (C) 2018 Marvell.
*/
#define __SYSCALL_COMPAT
#include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
#include <asm/syscall.h>
/*
* AARCH32 requires 4-page alignment for shared memory,
* but AARCH64 - only 1 page. This is the only difference
* between compat and native sys_shmat(). So ILP32 just pick
* AARCH64 version.
*/
#define __arm64_compat_sys_shmat __arm64_sys_shmat
/*
* ILP32 needs special handling for some ptrace requests.
*/
#define __arm64_sys_ptrace __arm64_compat_sys_ptrace
/*
* Using AARCH32 interface for syscalls that take 64-bit
* parameters in registers.
*/
#define __arm64_compat_sys_fadvise64_64 __arm64_compat_sys_aarch32_fadvise64_64
#define __arm64_compat_sys_fallocate __arm64_compat_sys_aarch32_fallocate
#define __arm64_compat_sys_ftruncate64 __arm64_compat_sys_aarch32_ftruncate64
#define __arm64_compat_sys_pread64 __arm64_compat_sys_aarch32_pread64
#define __arm64_compat_sys_pwrite64 __arm64_compat_sys_aarch32_pwrite64
#define __arm64_compat_sys_readahead __arm64_compat_sys_aarch32_readahead
#define __arm64_compat_sys_sync_file_range2 __arm64_compat_sys_aarch32_sync_file_range2
#define __arm64_compat_sys_truncate64 __arm64_compat_sys_aarch32_truncate64
#define __arm64_sys_mmap2 __arm64_compat_sys_aarch32_mmap2
/*
* Using AARCH32 interface for syscalls that take the size of
* struct statfs as an argument, as it's calculated differently
* in kernel and user spaces.
*/
#define __arm64_compat_sys_fstatfs64 __arm64_compat_sys_aarch32_fstatfs64
#define __arm64_compat_sys_statfs64 __arm64_compat_sys_aarch32_statfs64
/*
* Using custom wrapper for rt_sigreturn() to handle custom
* struct rt_sigframe.
*/
#define __arm64_compat_sys_rt_sigreturn __arm64_compat_sys_ilp32_rt_sigreturn
/*
* Wrappers to pass the pt_regs argument.
*/
#define sys_personality sys_arm64_personality
asmlinkage long sys_ni_syscall(const struct pt_regs *);
#define __arm64_sys_ni_syscall sys_ni_syscall
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
const syscall_fn_t ilp32_sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
#include <asm/unistd.h>
};
...@@ -14,15 +14,15 @@ ...@@ -14,15 +14,15 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/unistd.h> #include <asm/unistd.h>
long a32_arm_syscall(struct pt_regs *regs, int scno); long compat_arm_syscall(struct pt_regs *regs, int scno);
long sys_ni_syscall(void); long sys_ni_syscall(void);
static long do_ni_syscall(struct pt_regs *regs, int scno) static long do_ni_syscall(struct pt_regs *regs, int scno)
{ {
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
long ret; long ret;
if (is_a32_compat_task()) { if (is_compat_task()) {
ret = a32_arm_syscall(regs, scno); ret = compat_arm_syscall(regs, scno);
if (ret != -ENOSYS) if (ret != -ENOSYS)
return ret; return ret;
} }
...@@ -157,39 +157,16 @@ static inline void sve_user_discard(void) ...@@ -157,39 +157,16 @@ static inline void sve_user_discard(void)
sve_user_disable(); sve_user_disable();
} }
#ifdef CONFIG_ARM64_ILP32
static inline void delouse_pt_regs(struct pt_regs *regs)
{
regs->regs[0] &= UINT_MAX;
regs->regs[1] &= UINT_MAX;
regs->regs[2] &= UINT_MAX;
regs->regs[3] &= UINT_MAX;
regs->regs[4] &= UINT_MAX;
regs->regs[5] &= UINT_MAX;
regs->regs[6] &= UINT_MAX;
regs->regs[7] &= UINT_MAX;
}
#endif
asmlinkage void el0_svc_handler(struct pt_regs *regs) asmlinkage void el0_svc_handler(struct pt_regs *regs)
{ {
const syscall_fn_t *t = sys_call_table;
#ifdef CONFIG_ARM64_ILP32
if (is_ilp32_compat_task()) {
t = ilp32_sys_call_table;
delouse_pt_regs(regs);
}
#endif
sve_user_discard(); sve_user_discard();
el0_svc_common(regs, regs->regs[8], __NR_syscalls, t); el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
} }
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
asmlinkage void el0_svc_compat_handler(struct pt_regs *regs) asmlinkage void el0_svc_compat_handler(struct pt_regs *regs)
{ {
el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls, el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
a32_sys_call_table); compat_sys_call_table);
} }
#endif #endif
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
*/ */
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/compat.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
...@@ -319,7 +318,7 @@ static int call_undef_hook(struct pt_regs *regs) ...@@ -319,7 +318,7 @@ static int call_undef_hook(struct pt_regs *regs)
if (probe_kernel_address((__force __le32 *)pc, instr_le)) if (probe_kernel_address((__force __le32 *)pc, instr_le))
goto exit; goto exit;
instr = le32_to_cpu(instr_le); instr = le32_to_cpu(instr_le);
} else if (a32_thumb_mode(regs)) { } else if (compat_thumb_mode(regs)) {
/* 16-bit Thumb instruction */ /* 16-bit Thumb instruction */
__le16 instr_le; __le16 instr_le;
if (get_user(instr_le, (__le16 __user *)pc)) if (get_user(instr_le, (__le16 __user *)pc))
......
# SPDX-License-Identifier: GPL-2.0+
#
# Building a vDSO image for AArch64.
#
# Author: Will Deacon <will.deacon@arm.com>
# Heavily based on the vDSO Makefiles for other archs.
#
obj-ilp32-vdso := gettimeofday-ilp32.o note-ilp32.o sigreturn-ilp32.o
# Build rules
targets := $(obj-ilp32-vdso) vdso-ilp32.so vdso-ilp32.so.dbg
obj-ilp32-vdso := $(addprefix $(obj)/, $(obj-ilp32-vdso))
ccflags-y := -shared -fno-common -fno-builtin -fno-stack-protector
ccflags-y += -DDISABLE_BRANCH_PROFILING
ccflags-y += -nostdlib -Wl,-soname=linux-ilp32-vdso.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
# Force -O2 to avoid libgcc dependencies
CFLAGS_REMOVE_gettimeofday-ilp32.o = -pg -Os
CFLAGS_gettimeofday-ilp32.o = -O2 -mcmodel=tiny -mabi=ilp32
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
# down to collect2, resulting in silent corruption of the vDSO image.
ccflags-y += -Wl,-shared
obj-y += vdso-ilp32.o
extra-y += vdso-ilp32.lds
CPPFLAGS_vdso-ilp32.lds += -P -C -U$(ARCH) -mabi=ilp32
# Force dependency (incbin is bad)
$(obj)/vdso-ilp32.o : $(obj)/vdso-ilp32.so
# Link rule for the .so file, .lds has to be first
$(obj)/vdso-ilp32.so.dbg: $(src)/vdso-ilp32.lds $(obj-ilp32-vdso)
$(call if_changed,vdso-ilp32ld)
# Strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
define cmd_vdsosym
$(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
endef
include/generated/vdso-ilp32-offsets.h: $(obj)/vdso-ilp32.so.dbg FORCE
$(call if_changed,vdsosym)
# Assembly rules for the .S files
#$(obj-ilp32-vdso): %.o: $(src)/../vdso/$(subst -ilp32,,%.S)
# $(call if_changed_dep,vdso-ilp32as)
$(obj)/gettimeofday-ilp32.o: $(src)/../vdso/gettimeofday.c
$(call if_changed_dep,vdso-ilp32cc)
$(obj)/note-ilp32.o: $(src)/../vdso/note.S
$(call if_changed_dep,vdso-ilp32as)
# This one should be fine because ILP32 uses the same generic
# __NR_rt_sigreturn syscall number.
$(obj)/sigreturn-ilp32.o: $(src)/../vdso/sigreturn.S
$(call if_changed_dep,vdso-ilp32as)
# Actual build commands
quiet_cmd_vdso-ilp32ld = VDSOILP32L $@
cmd_vdso-ilp32ld = $(CC) $(c_flags) -mabi=ilp32 -Wl,-n -Wl,-T $^ -o $@
quiet_cmd_vdso-ilp32as = VDSOILP32C $@
cmd_vdso-ilp32cc= $(CC) $(c_flags) -mabi=ilp32 -c -o $@ $<
quiet_cmd_vdso-ilp32as = VDSOILP32A $@
cmd_vdso-ilp32as = $(CC) $(a_flags) -mabi=ilp32 -c -o $@ $<
# Install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
vdso-ilp32.so: $(obj)/vdso-ilp32.so.dbg
@mkdir -p $(MODLIB)/vdso
$(call cmd,vdso_install)
vdso_install: vdso-ilp32.so
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2012 ARM Limited
* Author: Will Deacon <will.deacon@arm.com>
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/const.h>
#include <asm/page.h>
__PAGE_ALIGNED_DATA
.globl vdso_ilp32_start, vdso_ilp32_end
.balign PAGE_SIZE
vdso_ilp32_start:
.incbin "arch/arm64/kernel/vdso-ilp32/vdso-ilp32.so"
.balign PAGE_SIZE
vdso_ilp32_end:
.previous
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* GNU linker script for the VDSO library.
*
* Copyright (C) 2012 ARM Limited
* Author: Will Deacon <will.deacon@arm.com>
* Heavily based on the vDSO linker scripts for other archs.
*/
#include <linux/const.h>
#include <asm/page.h>
#include <asm/vdso.h>
SECTIONS
{
PROVIDE(_vdso_data = . - PAGE_SIZE);
. = VDSO_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
. = ALIGN(16);
.text : { *(.text*) } :text =0xd503201f
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : { *(.rodata*) } :text
_end = .;
PROVIDE(end = .);
/DISCARD/ : {
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}
}
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
LINUX_4.12 {
global:
__kernel_rt_sigreturn;
__kernel_gettimeofday;
__kernel_clock_gettime;
__kernel_clock_getres;
local: *;
};
}
/*
* Make the sigreturn code visible to the kernel.
*/
VDSO_sigtramp_ilp32 = __kernel_rt_sigreturn;
...@@ -37,13 +37,8 @@ ...@@ -37,13 +37,8 @@
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/vdso_datapage.h> #include <asm/vdso_datapage.h>
extern char vdso_lp64_start[], vdso_lp64_end[]; extern char vdso_start[], vdso_end[];
static unsigned long vdso_lp64_pages __ro_after_init; static unsigned long vdso_pages __ro_after_init;
#ifdef CONFIG_ARM64_ILP32
extern char vdso_ilp32_start[], vdso_ilp32_end[];
static unsigned long vdso_ilp32_pages __ro_after_init;
#endif
/* /*
* The vDSO data page. * The vDSO data page.
...@@ -54,7 +49,7 @@ static union { ...@@ -54,7 +49,7 @@ static union {
} vdso_data_store __page_aligned_data; } vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = &vdso_data_store.data; struct vdso_data *vdso_data = &vdso_data_store.data;
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
/* /*
* Create and map the vectors page for AArch32 tasks. * Create and map the vectors page for AArch32 tasks.
*/ */
...@@ -113,13 +108,13 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) ...@@ -113,13 +108,13 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
return PTR_ERR_OR_ZERO(ret); return PTR_ERR_OR_ZERO(ret);
} }
#endif /* CONFIG_AARCH32_EL0 */ #endif /* CONFIG_COMPAT */
static int vdso_mremap(const struct vm_special_mapping *sm, static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma) struct vm_area_struct *new_vma)
{ {
unsigned long new_size = new_vma->vm_end - new_vma->vm_start; unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
unsigned long vdso_size = vdso_lp64_end - vdso_lp64_start; unsigned long vdso_size = vdso_end - vdso_start;
if (vdso_size != new_size) if (vdso_size != new_size)
return -EINVAL; return -EINVAL;
...@@ -129,7 +124,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, ...@@ -129,7 +124,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return 0; return 0;
} }
static struct vm_special_mapping vdso_lp64_spec[2] __ro_after_init = { static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
{ {
.name = "[vvar]", .name = "[vvar]",
}, },
...@@ -139,23 +134,9 @@ static struct vm_special_mapping vdso_lp64_spec[2] __ro_after_init = { ...@@ -139,23 +134,9 @@ static struct vm_special_mapping vdso_lp64_spec[2] __ro_after_init = {
}, },
}; };
#ifdef CONFIG_ARM64_ILP32 static int __init vdso_init(void)
static struct vm_special_mapping vdso_ilp32_spec[2] __ro_after_init = {
{
.name = "[vvar]",
},
{
.name = "[vdso]",
},
};
#endif
static int __init vdso_init(char *vdso_start, char *vdso_end,
unsigned long *vdso_pagesp,
struct vm_special_mapping *vdso_spec)
{ {
int i; int i;
unsigned long vdso_pages;
struct page **vdso_pagelist; struct page **vdso_pagelist;
unsigned long pfn; unsigned long pfn;
...@@ -165,7 +146,6 @@ static int __init vdso_init(char *vdso_start, char *vdso_end, ...@@ -165,7 +146,6 @@ static int __init vdso_init(char *vdso_start, char *vdso_end,
} }
vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
*vdso_pagesp = vdso_pages;
/* Allocate the vDSO pagelist, plus a page for the data. */ /* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
...@@ -188,22 +168,7 @@ static int __init vdso_init(char *vdso_start, char *vdso_end, ...@@ -188,22 +168,7 @@ static int __init vdso_init(char *vdso_start, char *vdso_end,
return 0; return 0;
} }
arch_initcall(vdso_init);
static int __init vdso_lp64_init(void)
{
return vdso_init(vdso_lp64_start, vdso_lp64_end,
&vdso_lp64_pages, vdso_lp64_spec);
}
arch_initcall(vdso_lp64_init);
#ifdef CONFIG_ARM64_ILP32
static int __init vdso_ilp32_init(void)
{
return vdso_init(vdso_ilp32_start, vdso_ilp32_end,
&vdso_ilp32_pages, vdso_ilp32_spec);
}
arch_initcall(vdso_ilp32_init);
#endif
int arch_setup_additional_pages(struct linux_binprm *bprm, int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp) int uses_interp)
...@@ -211,17 +176,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, ...@@ -211,17 +176,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_text_len, vdso_mapping_len; unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
void *ret; void *ret;
unsigned long pages = vdso_lp64_pages;
struct vm_special_mapping *vdso_spec = vdso_lp64_spec;
#ifdef CONFIG_ARM64_ILP32
if (is_ilp32_compat_task()) {
pages = vdso_ilp32_pages;
vdso_spec = vdso_ilp32_spec;
}
#endif
vdso_text_len = pages << PAGE_SHIFT; vdso_text_len = vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */ /* Be sure to map the data page */
vdso_mapping_len = vdso_text_len + PAGE_SIZE; vdso_mapping_len = vdso_text_len + PAGE_SIZE;
......
...@@ -26,12 +26,6 @@ ...@@ -26,12 +26,6 @@
#include <linux/math64.h> #include <linux/math64.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#ifdef __ILP32__
#undef BITS_PER_LONG
#define BITS_PER_LONG 32
#endif
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
extern struct vdso_data _vdso_data; extern struct vdso_data _vdso_data;
......
...@@ -21,12 +21,12 @@ ...@@ -21,12 +21,12 @@
#include <linux/const.h> #include <linux/const.h>
#include <asm/page.h> #include <asm/page.h>
.globl vdso_lp64_start, vdso_lp64_end .globl vdso_start, vdso_end
.section .rodata .section .rodata
.balign PAGE_SIZE .balign PAGE_SIZE
vdso_lp64_start: vdso_start:
.incbin "arch/arm64/kernel/vdso/vdso.so" .incbin "arch/arm64/kernel/vdso/vdso.so"
.balign PAGE_SIZE .balign PAGE_SIZE
vdso_lp64_end: vdso_end:
.previous .previous
...@@ -54,7 +54,7 @@ unsigned long arch_mmap_rnd(void) ...@@ -54,7 +54,7 @@ unsigned long arch_mmap_rnd(void)
unsigned long rnd; unsigned long rnd;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (is_compat_task()) if (test_thread_flag(TIF_32BIT))
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
else else
#endif #endif
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
*/ */
#define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE
/* Use the standard ABI for syscalls. */ /* Use the standard ABI for syscalls. */
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
config H8300 config H8300
def_bool y def_bool y
select ARCH_32BIT_OFF_T
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select HAVE_UID16 select HAVE_UID16
select VIRT_TO_BUS select VIRT_TO_BUS
......
#define __ARCH_NOMMU #define __ARCH_NOMMU
#define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_SET_GET_RLIMIT
#include <asm-generic/unistd.h> #include <asm-generic/unistd.h>
...@@ -4,7 +4,6 @@ comment "Linux Kernel Configuration for Hexagon" ...@@ -4,7 +4,6 @@ comment "Linux Kernel Configuration for Hexagon"
config HEXAGON config HEXAGON
def_bool y def_bool y
select ARCH_32BIT_OFF_T
select ARCH_NO_PREEMPT select ARCH_NO_PREEMPT
select HAVE_OPROFILE select HAVE_OPROFILE
# Other pending projects/to-do items. # Other pending projects/to-do items.
......
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#define sys_mmap2 sys_mmap_pgoff #define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_VFORK
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
config M68K config M68K
bool bool
default y default y
select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
select ARCH_NO_COHERENT_DMA_MMAP if !MMU select ARCH_NO_COHERENT_DMA_MMAP if !MMU
......
config MICROBLAZE config MICROBLAZE
def_bool y def_bool y
select ARCH_32BIT_OFF_T
select ARCH_NO_SWAP select ARCH_NO_SWAP
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
config MIPS config MIPS
bool bool
default y default y
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_BINFMT_ELF_STATE select ARCH_BINFMT_ELF_STATE
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK select ARCH_DISCARD_MEMBLOCK
......
...@@ -31,7 +31,6 @@ config NDS32 ...@@ -31,7 +31,6 @@ config NDS32
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select ARCH_32BIT_OFF_T
select IRQ_DOMAIN select IRQ_DOMAIN
select LOCKDEP_SUPPORT select LOCKDEP_SUPPORT
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
// Copyright (C) 2005-2017 Andes Technology Corporation // Copyright (C) 2005-2017 Andes Technology Corporation
#define __ARCH_WANT_SYNC_FILE_RANGE2 #define __ARCH_WANT_SYNC_FILE_RANGE2
#define __ARCH_WANT_SET_GET_RLIMIT
/* Use the standard ABI for syscalls */ /* Use the standard ABI for syscalls */
#include <asm-generic/unistd.h> #include <asm-generic/unistd.h>
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
config NIOS2 config NIOS2
def_bool y def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_SWAP select ARCH_NO_SWAP
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#define sys_mmap2 sys_mmap_pgoff #define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_SET_GET_RLIMIT
/* Use the standard ABI for syscalls */ /* Use the standard ABI for syscalls */
#include <asm-generic/unistd.h> #include <asm-generic/unistd.h>
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
config OPENRISC config OPENRISC
def_bool y def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select DMA_NONCOHERENT_OPS select DMA_NONCOHERENT_OPS
select OF select OF
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#define sys_mmap2 sys_mmap_pgoff #define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
config PARISC config PARISC
def_bool y def_bool y
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE select HAVE_IDE
select HAVE_OPROFILE select HAVE_OPROFILE
......
...@@ -128,7 +128,6 @@ config PPC ...@@ -128,7 +128,6 @@ config PPC
# #
# Please keep this list sorted alphabetically. # Please keep this list sorted alphabetically.
# #
select ARCH_32BIT_OFF_T if PPC32
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_SET_COHERENT_MASK select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
......
...@@ -11,7 +11,6 @@ config 32BIT ...@@ -11,7 +11,6 @@ config 32BIT
config RISCV config RISCV
def_bool y def_bool y
select ARCH_32BIT_OFF_T if !64BIT
# even on 32-bit, physical (and DMA) addresses are > 32-bits # even on 32-bit, physical (and DMA) addresses are > 32-bits
select PHYS_ADDR_T_64BIT select PHYS_ADDR_T_64BIT
select OF select OF
......
...@@ -17,6 +17,5 @@ ...@@ -17,6 +17,5 @@
*/ */
#define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SET_GET_RLIMIT
#include <uapi/asm/unistd.h> #include <uapi/asm/unistd.h>
#include <uapi/asm/syscalls.h> #include <uapi/asm/syscalls.h>
...@@ -61,7 +61,6 @@ config SUPERH ...@@ -61,7 +61,6 @@ config SUPERH
config SUPERH32 config SUPERH32
def_bool "$(ARCH)" = "sh" def_bool "$(ARCH)" = "sh"
select ARCH_32BIT_OFF_T
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_IOREMAP_PROT if MMU && !X2TLB select HAVE_IOREMAP_PROT if MMU && !X2TLB
......
...@@ -50,7 +50,6 @@ config SPARC ...@@ -50,7 +50,6 @@ config SPARC
config SPARC32 config SPARC32
def_bool !64BIT def_bool !64BIT
select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
select DMA_NONCOHERENT_OPS select DMA_NONCOHERENT_OPS
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
config UNICORE32 config UNICORE32
def_bool y def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_MIGHT_HAVE_PC_SERIO
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
*/ */
#define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_SET_GET_RLIMIT
/* Use the standard ABI for syscalls. */ /* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h> #include <asm-generic/unistd.h>
......
...@@ -46,7 +46,6 @@ config X86 ...@@ -46,7 +46,6 @@ config X86
select ACPI_LEGACY_TABLES_LOOKUP if ACPI select ACPI_LEGACY_TABLES_LOOKUP if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ANON_INODES select ANON_INODES
select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
......
...@@ -16,7 +16,6 @@ config 64BIT ...@@ -16,7 +16,6 @@ config 64BIT
config X86_32 config X86_32
def_bool !64BIT def_bool !64BIT
select ARCH_32BIT_OFF_T
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select MODULES_USE_ELF_REL select MODULES_USE_ELF_REL
select CLONE_BACKWARDS select CLONE_BACKWARDS
......
...@@ -4,7 +4,6 @@ config ZONE_DMA ...@@ -4,7 +4,6 @@ config ZONE_DMA
config XTENSA config XTENSA
def_bool y def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
......
...@@ -821,8 +821,8 @@ static void arch_timer_evtstrm_enable(int divider) ...@@ -821,8 +821,8 @@ static void arch_timer_evtstrm_enable(int divider)
| ARCH_TIMER_VIRT_EVT_EN; | ARCH_TIMER_VIRT_EVT_EN;
arch_timer_set_cntkctl(cntkctl); arch_timer_set_cntkctl(cntkctl);
elf_hwcap |= HWCAP_EVTSTRM; elf_hwcap |= HWCAP_EVTSTRM;
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_COMPAT
a32_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
#endif #endif
cpumask_set_cpu(smp_processor_id(), &evtstrm_available); cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
} }
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
#ifndef force_o_largefile #ifndef force_o_largefile
#define force_o_largefile() (!IS_ENABLED(CONFIG_ARCH_32BIT_OFF_T)) #define force_o_largefile() (BITS_PER_LONG != 32)
#endif #endif
#if BITS_PER_LONG == 32 #if BITS_PER_LONG == 32
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include <linux/mm_types_task.h> #include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h> #include <linux/task_io_accounting.h>
#include <linux/rseq.h> #include <linux/rseq.h>
#include <linux/thread_bits.h>
/* task_struct member predeclarations (sorted alphabetically): */ /* task_struct member predeclarations (sorted alphabetically): */
struct audit_context; struct audit_context;
......
/* SPDX-License-Identifier: GPL-2.0+ */
/* Common low-level thread bits accessors */
#ifndef _LINUX_THREAD_BITS_H
#define _LINUX_THREAD_BITS_H
#ifndef __ASSEMBLY__
/*
* For per-arch arch_within_stack_frames() implementations, defined in
* asm/thread_info.h.
*/
enum {
BAD_STACK = -1,
NOT_STACK = 0,
GOOD_FRAME,
GOOD_STACK,
};
#include <linux/bitops.h>
#include <asm/thread_info.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
* For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
* definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
* including <asm/current.h> can cause a circular dependency on some platforms.
*/
#include <asm/current.h>
#define current_thread_info() ((struct thread_info *)current)
#endif
/*
* flag set/clear/test wrappers
* - pass TIF_xxxx constants to these functions
*/
static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
{
set_bit(flag, (unsigned long *)&ti->flags);
}
static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
{
clear_bit(flag, (unsigned long *)&ti->flags);
}
static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
bool value)
{
if (value)
set_ti_thread_flag(ti, flag);
else
clear_ti_thread_flag(ti, flag);
}
static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_set_bit(flag, (unsigned long *)&ti->flags);
}
static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
}
static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_bit(flag, (unsigned long *)&ti->flags);
}
#define set_thread_flag(flag) \
set_ti_thread_flag(current_thread_info(), flag)
#define clear_thread_flag(flag) \
clear_ti_thread_flag(current_thread_info(), flag)
#define update_thread_flag(flag, value) \
update_ti_thread_flag(current_thread_info(), flag, value)
#define test_and_set_thread_flag(flag) \
test_and_set_ti_thread_flag(current_thread_info(), flag)
#define test_and_clear_thread_flag(flag) \
test_and_clear_ti_thread_flag(current_thread_info(), flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_THREAD_BITS_H */
...@@ -11,9 +11,30 @@ ...@@ -11,9 +11,30 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/restart_block.h> #include <linux/restart_block.h>
#include <linux/thread_bits.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
* For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
* definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
* including <asm/current.h> can cause a circular dependency on some platforms.
*/
#include <asm/current.h>
#define current_thread_info() ((struct thread_info *)current)
#endif
#include <linux/bitops.h> #include <linux/bitops.h>
/*
* For per-arch arch_within_stack_frames() implementations, defined in
* asm/thread_info.h.
*/
enum {
BAD_STACK = -1,
NOT_STACK = 0,
GOOD_FRAME,
GOOD_STACK,
};
#include <asm/thread_info.h> #include <asm/thread_info.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -24,6 +45,58 @@ ...@@ -24,6 +45,58 @@
#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
/*
* flag set/clear/test wrappers
* - pass TIF_xxxx constants to these functions
*/
static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
{
set_bit(flag, (unsigned long *)&ti->flags);
}
static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
{
clear_bit(flag, (unsigned long *)&ti->flags);
}
static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
bool value)
{
if (value)
set_ti_thread_flag(ti, flag);
else
clear_ti_thread_flag(ti, flag);
}
static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_set_bit(flag, (unsigned long *)&ti->flags);
}
static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
}
static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_bit(flag, (unsigned long *)&ti->flags);
}
#define set_thread_flag(flag) \
set_ti_thread_flag(current_thread_info(), flag)
#define clear_thread_flag(flag) \
clear_ti_thread_flag(current_thread_info(), flag)
#define update_thread_flag(flag, value) \
update_ti_thread_flag(current_thread_info(), flag, value)
#define test_and_set_thread_flag(flag) \
test_and_set_ti_thread_flag(current_thread_info(), flag)
#define test_and_clear_thread_flag(flag) \
test_and_clear_ti_thread_flag(current_thread_info(), flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
......
...@@ -179,7 +179,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat) ...@@ -179,7 +179,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat)
#define __NR_fchown 55 #define __NR_fchown 55
__SYSCALL(__NR_fchown, sys_fchown) __SYSCALL(__NR_fchown, sys_fchown)
#define __NR_openat 56 #define __NR_openat 56
__SYSCALL(__NR_openat, sys_openat) __SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
#define __NR_close 57 #define __NR_close 57
__SYSCALL(__NR_close, sys_close) __SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58 #define __NR_vhangup 58
...@@ -465,15 +465,10 @@ __SYSCALL(__NR_uname, sys_newuname) ...@@ -465,15 +465,10 @@ __SYSCALL(__NR_uname, sys_newuname)
__SYSCALL(__NR_sethostname, sys_sethostname) __SYSCALL(__NR_sethostname, sys_sethostname)
#define __NR_setdomainname 162 #define __NR_setdomainname 162
__SYSCALL(__NR_setdomainname, sys_setdomainname) __SYSCALL(__NR_setdomainname, sys_setdomainname)
#ifdef __ARCH_WANT_SET_GET_RLIMIT
/* getrlimit and setrlimit are superseded with prlimit64 */
#define __NR_getrlimit 163 #define __NR_getrlimit 163
__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit) __SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
#define __NR_setrlimit 164 #define __NR_setrlimit 164
__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit) __SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
#endif
#define __NR_getrusage 165 #define __NR_getrusage 165
__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage) __SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
#define __NR_umask 166 #define __NR_umask 166
...@@ -681,7 +676,8 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) ...@@ -681,7 +676,8 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
#define __NR_name_to_handle_at 264 #define __NR_name_to_handle_at 264
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265 #define __NR_open_by_handle_at 265
__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at) __SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
compat_sys_open_by_handle_at)
#define __NR_clock_adjtime 266 #define __NR_clock_adjtime 266
__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime) __SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
#define __NR_syncfs 267 #define __NR_syncfs 267
......
...@@ -900,22 +900,6 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type, ...@@ -900,22 +900,6 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
EXPORT_SYMBOL_GPL(task_user_regset_view); EXPORT_SYMBOL_GPL(task_user_regset_view);
#endif #endif
static int ptrace_setsigmask(struct task_struct *child, sigset_t *new_set)
{
sigdelsetmask(new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
/*
* Every thread does recalc_sigpending() after resume, so
* retarget_shared_pending() and recalc_sigpending() are not
* called here.
*/
spin_lock_irq(&child->sighand->siglock);
child->blocked = *new_set;
spin_unlock_irq(&child->sighand->siglock);
return 0;
}
int ptrace_request(struct task_struct *child, long request, int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data) unsigned long addr, unsigned long data)
{ {
...@@ -995,10 +979,20 @@ int ptrace_request(struct task_struct *child, long request, ...@@ -995,10 +979,20 @@ int ptrace_request(struct task_struct *child, long request,
break; break;
} }
ret = ptrace_setsigmask(child, &new_set); sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
/*
* Every thread does recalc_sigpending() after resume, so
* retarget_shared_pending() and recalc_sigpending() are not
* called here.
*/
spin_lock_irq(&child->sighand->siglock);
child->blocked = new_set;
spin_unlock_irq(&child->sighand->siglock);
clear_tsk_restore_sigmask(child); clear_tsk_restore_sigmask(child);
ret = 0;
break; break;
} }
...@@ -1217,7 +1211,6 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request, ...@@ -1217,7 +1211,6 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
{ {
compat_ulong_t __user *datap = compat_ptr(data); compat_ulong_t __user *datap = compat_ptr(data);
compat_ulong_t word; compat_ulong_t word;
sigset_t new_set;
siginfo_t siginfo; siginfo_t siginfo;
int ret; int ret;
...@@ -1258,24 +1251,6 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request, ...@@ -1258,24 +1251,6 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
else else
ret = ptrace_setsiginfo(child, &siginfo); ret = ptrace_setsiginfo(child, &siginfo);
break; break;
case PTRACE_GETSIGMASK:
if (addr != sizeof(compat_sigset_t))
return -EINVAL;
ret = put_compat_sigset((compat_sigset_t __user *) datap,
&child->blocked, sizeof(compat_sigset_t));
break;
case PTRACE_SETSIGMASK:
if (addr != sizeof(compat_sigset_t))
return -EINVAL;
ret = get_compat_sigset(&new_set,
(compat_sigset_t __user *) datap);
if (ret)
break;
ret = ptrace_setsigmask(child, &new_set);
break;
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
case PTRACE_GETREGSET: case PTRACE_GETREGSET:
case PTRACE_SETREGSET: case PTRACE_SETREGSET:
......
...@@ -38,11 +38,6 @@ cat << EOF ...@@ -38,11 +38,6 @@ cat << EOF
#define __IGNORE_lstat64 /* fstatat64 */ #define __IGNORE_lstat64 /* fstatat64 */
#endif #endif
#ifndef __ARCH_WANT_SET_GET_RLIMIT
#define __IGNORE_getrlimit /* getrlimit */
#define __IGNORE_setrlimit /* setrlimit */
#endif
/* Missing flags argument */ /* Missing flags argument */
#define __IGNORE_renameat /* renameat2 */ #define __IGNORE_renameat /* renameat2 */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册